diff --git "a/2290.jsonl" "b/2290.jsonl" new file mode 100644--- /dev/null +++ "b/2290.jsonl" @@ -0,0 +1,644 @@ +{"seq_id":"36199414918","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.1\n# kernelspec:\n# display_name: fcul-als-python\n# language: python\n# name: fcul-als-python\n# ---\n\n# # eICU Data Exploration\n# ---\n#\n# Exploring the eICU dataset from MIT with the data from over 139k patients collected in the US.\n#\n# The eICU Collaborative Research Database is a multi-center intensive care unit (ICU) database with high granularity data for over 200,000 admissions to ICUs monitored by eICU Programs across the United States. The database is deidentified, and includes vital sign measurements, care plan documentation, severity of illness measures, diagnosis information, treatment information, and more.\n\n# + {\"colab_type\": \"text\", \"id\": \"KOdmFzXqF7nq\", \"cell_type\": \"markdown\"}\n# ## Importing the necessary packages\n\n# + {\"colab\": {}, \"colab_type\": \"code\", \"id\": \"G5RrWE9R_Nkl\"}\nimport pandas as pd # Pandas to handle the data in dataframes\nimport re # re to do regex searches in string data\nimport plotly # Plotly for interactive and pretty plots\nimport plotly.graph_objs as go\nfrom datetime import datetime # datetime to use proper date and time formats\nimport os # os handles directory/workspace changes\nimport numpy as np # NumPy to handle numeric and NaN operations\nfrom tqdm import tqdm_notebook # tqdm allows to track code execution progress\nimport numbers # numbers allows to check if data is numeric\nimport torch # PyTorch to create and apply deep learning models\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport utils # Contains auxiliary functions\n\n# +\n# Change to parent directory (presumably \"Documents\")\nos.chdir(\"../..\")\n\n# Path to the CSV dataset files\ndata_path = 'Datasets/Thesis/FCUL_ALS/'\n\n# + {\"colab_type\": \"text\", \"id\": \"bEqFkmlYCGOz\", \"cell_type\": \"markdown\"}\n# **Important:** Use the following two lines to be able to do plotly plots offline:\n\n# + {\"colab\": {}, \"colab_type\": \"code\", \"id\": \"fZCUmUOzCPeI\"}\nimport plotly.offline as py\nplotly.offline.init_notebook_mode(connected=True)\n# -\n\n# ## Exploring the preprocessed dataset\n\n# ### Basic stats\n\nALS_proc_df = pd.read_csv(f'{data_path}dataWithoutDunnoNIV.csv')\nALS_proc_df.head()\n\nALS_proc_df.dtypes\n\nALS_proc_df.nunique()\n\nutils.dataframe_missing_values(ALS_proc_df)\n\n# **Comment:** Many relevant features (timestamps, NIV, age, ALSFRS, etc) have zero or low missing values percentage (bellow 10%), much better than in the PRO-ACT dataset. However, there other interesting ones with more than half missing values (FVC, VC, etc).\n\nALS_proc_df.describe().transpose()\n\nALS_proc_df['El Escorial reviewed criteria'].value_counts()\n\nALS_proc_df['Onset form'].value_counts()\n\nALS_proc_df['UMN vs LMN'].value_counts()\n\nALS_proc_df['C9orf72'].value_counts()\n\nALS_proc_df['SNIP'].value_counts()\n\nALS_proc_df['1R'].value_counts()\n\n# ### Plots\n\n# +\nconfigure_plotly_browser_state()\n\nALS_proc_gender_count = ALS_proc_df.Gender.value_counts().to_frame()\ndata = [go.Pie(labels=ALS_proc_gender_count.index, values=ALS_proc_gender_count.Gender)]\nlayout = go.Layout(title='Patients Gender Demographics')\nfig = go.Figure(data, layout)\npy.iplot(fig)\n\n# +\nconfigure_plotly_browser_state()\n\nALS_proc_niv_count = ALS_proc_df.NIV.value_counts().to_frame()\ndata = [go.Pie(labels=ALS_proc_niv_count.index, values=ALS_proc_niv_count.NIV)]\nlayout = go.Layout(title='Visits where the patient is using NIV')\nfig = go.Figure(data, layout)\npy.iplot(fig)\n\n# +\nconfigure_plotly_browser_state()\n\ndata = [go.Histogram(x = ALS_proc_df.NIV)]\nlayout = go.Layout(title='Number of visits where the patient is using NIV.')\nfig = go.Figure(data, layout)\npy.iplot(fig)\n\n# +\nconfigure_plotly_browser_state()\n\ndata = [go.Scatter(\n x = ALS_proc_df.FVC,\n y = ALS_proc_df.NIV,\n mode = 'markers'\n )]\nlayout = go.Layout(\n title='Relation between NIV use and FVC values',\n xaxis=dict(title='FVC'),\n yaxis=dict(title='NIV')\n )\nfig = go.Figure(data, layout)\npy.iplot(fig)\n# -\n\n# Average FVC value when NIV is used:\nALS_proc_df[ALS_proc_df.NIV == 1].FVC.mean()\n\n# **Comments:** The average FVC when NIV is 1 is lower than average, but the scatter plot doesn't show a very clear dependence between the variables.\n\n# +\nconfigure_plotly_browser_state()\n\ndata = [go.Scatter(\n x = ALS_proc_df['Disease duration'],\n y = ALS_proc_df.NIV,\n mode = 'markers'\n )]\nlayout = go.Layout(\n title='Relation between NIV use and disease duration',\n xaxis=dict(title='Disease duration'),\n yaxis=dict(title='NIV')\n )\nfig = go.Figure(data, layout)\npy.iplot(fig)\n# -\n\n# Average disease duration when NIV is used:\nALS_proc_df[ALS_proc_df.NIV == 1]['Disease duration'].mean()\n\n# +\nconfigure_plotly_browser_state()\n\ndata = [go.Scatter(\n x = ALS_proc_df['Age at onset'],\n y = ALS_proc_df.NIV,\n mode = 'markers'\n )]\nlayout = go.Layout(\n title='Relation between NIV use and age',\n xaxis=dict(title='Age at onset'),\n yaxis=dict(title='NIV')\n )\nfig = go.Figure(data, layout)\npy.iplot(fig)\n# -\n\n# Average age at onset when NIV is used:\nALS_proc_df[ALS_proc_df.NIV == 1]['Age at onset'].mean()\n\n# +\nconfigure_plotly_browser_state()\n\nALS_proc_NIV_3R = ALS_proc_df.groupby(['3R', 'NIV']).REF.count().to_frame().reset_index()\ndata = [go.Bar(\n x=ALS_proc_NIV_3R[ALS_proc_NIV_3R.NIV == 0]['3R'],\n y=ALS_proc_NIV_3R[ALS_proc_NIV_3R.NIV == 0]['REF'],\n name='Not used'\n ),\n go.Bar(\n x=ALS_proc_NIV_3R[ALS_proc_NIV_3R.NIV == 1]['3R'],\n y=ALS_proc_NIV_3R[ALS_proc_NIV_3R.NIV == 1]['REF'],\n name='Using NIV'\n )]\nlayout = go.Layout(barmode='group')\nfig = go.Figure(data=data, layout=layout)\npy.iplot(fig, filename='grouped-bar')\n# -\n\n# Average 3R value when NIV is used:\nALS_proc_df[ALS_proc_df.NIV == 1]['3R'].mean()\n\n# **Comments:** Clearly, there's a big dependence of the use of NIV with the respiratory symptoms indicated by 3R, as expected.\n\n# ## Exploring the raw dataset\n\nALS_raw_df = pd.read_excel(f'{data_path}TabelaGeralnew_21012019_sem.xlsx')\nALS_raw_df.head()\n","repo_name":"AndreCNF/eICU-mortality-prediction","sub_path":"notebooks/eICU-data-exploration.py","file_name":"eICU-data-exploration.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23543607951","text":"t = int(input())\nfor j in range(t):\n\tall=[]\n\ts,n=input().strip().split()\n\tk=int(n)\n\tn=len(s)\n\tfor l in range(n):\n\t\tif(s[l]=='+'):\n\t\t\tall.append(1)\n\t\telse:\n\t\t\tall.append(0)\n\t\n\tans=0\n\tfor i in range(n-k+1):\n\t\tif(all[i]==0):\n\t\t\tfor l in range(i,i+k):\n\t\t\t\tall[l] = 1 - all[l]\n\t\t\tans+=1\n\timposs=0\n\tfor i in range(n-k,n):\n\t\tif(all[i]==0):\n\t\t\timposs=1\n\t\t\tbreak\n\tif(imposs==1):\n\t\tprint(\"Case #\"+str(j+1)+\":\",\"IMPOSSIBLE\")\n\telse:\n\t\tprint(\"Case #\"+str(j+1)+\":\",ans)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2494.py","file_name":"2494.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73793816514","text":"def crown(length, height):\r\n for i in range(0, height):\r\n for j in range(0, length):\r\n if i == 0:\r\n print(\" \", end = \"\") \r\n elif i == height - 1:\r\n print(\"-\", end = \"\")\r\n elif ((j < i or j > height - i) and\r\n (j < height + i or\r\n j >= length - i)) :\r\n print (\"#\", end = \"\")\r\n else :\r\n print (\" \", end = \"\") \r\n print()\r\nlength = 51\r\nheight = int((length - 1) / 2)\r\ncrown(length, height)\r\nprint(\"\\nRIP Queen Elizabeth II. Long Live King Charles III!\\n\")\r\n","repo_name":"CodeMaster7000/Queen-Elizabeth-II-Crown-Memorial","sub_path":"HM Crown Memorial.py","file_name":"HM Crown Memorial.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32164861460","text":"from collections import deque\n# reverse = O(n) -> for loop 안에 갇히면 시간 복잡도가 10만*10만이 되어버린다\ndef lingual(func, numbers, n):\n number = deque(numbers)\n\n if n == 0:\n print(\"error\")\n\n else:\n # reverse flag가 홀수이면 loop가 끝나고 뒤집는다\n # flag ---\n\n reverse_flag = 0\n\n for char in func:\n if char == \"R\":\n reverse_flag += 1\n elif char == \"D\":\n if len(number) < 1:\n print(\"error\")\n break\n else:\n if (reverse_flag % 2) == 1:\n number.pop()\n elif (reverse_flag % 2) == 0:\n number.popleft()\n\n if (reverse_flag % 2) == 1:\n number.reverse()\n\n if len(number) != 0:\n temp = '[' + ','.join(map(str, number)) + ']'\n print(temp)\n\n\nt = int(input())\n\nfor i in range(t):\n p = input()\n n = int(input())\n numbers = list(input()[1:-1].split(','))\n lingual(p, numbers, n)\n\n\n","repo_name":"HaJunYoo/Algorithm_Study","sub_path":"자료구조 활용/BOJ/AC(BOJ_5430).py","file_name":"AC(BOJ_5430).py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23546448591","text":"import copy\r\ndef change(k:list,l:int,rd:int):\r\n\r\n if rd > 500:\r\n return str(\"IMPOSSIBLE\")\r\n \r\n if \"+\"*len(k[0]) in k:\r\n return rd\r\n\r\n rlist = []\r\n \r\n for j in range(len(k)):\r\n kitem = k[j]\r\n for i in range(len(kitem)-l+1):\r\n nslice = copy.copy(kitem[i:i+l])\r\n nslice = nslice.replace(\"-\",\"0\").replace(\"+\",\"-\").replace(\"0\",\"+\")\r\n new = kitem[0:i] + nslice + kitem[i+l:]\r\n rlist.append(new)\r\n\r\n rlist = list(set(rlist))\r\n\r\n rd += 1\r\n return change(rlist,l,rd)\r\n\r\ndef main():\r\n alist = []\r\n s = int(input(\"\"))\r\n for i in range(s):\r\n o = input(\"\").split()\r\n g,c = o[0],int(o[1])\r\n alist.append(change([g],c,0))\r\n\r\n for j in range(len(alist)):\r\n print(\"Case #{}: {}\".format(j+1,alist[j]))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n \r\n \r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3444.py","file_name":"3444.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36820909954","text":"# pylint: disable=W0613\n# allow unused variables so all movement functions can have same parameter definition\nimport operator as _operator\nimport copy as _copy\n\nfrom chess.helpers import add_unit_direction as _add_unit_direction\nfrom chess.move.pathfinder import PathFinder as _pathfinder\n\n\ndef get_all_potential_end_locations(start, directions, board):\n ends = []\n for direction in directions:\n new_start = start\n location = _add_unit_direction(new_start, direction)\n while location in board:\n ends.append(location)\n new_start = location\n location = _add_unit_direction(new_start, direction)\n return ends\n\n\ndef distance_of_two(args):\n return [x for x in _get_two_moves_away(args.start, args.directions) if x in args.ends]\n\n\ndef distance_of_one(args):\n return [x for x in _get_one_move_away(args.start, args.directions) if x in args.ends]\n\n\ndef cant_move_onto_threatened_square(args):\n if args.board[args.start] is None:\n return []\n\n threat_color = args.board[args.start].opposite_color\n\n board = _copy.deepcopy(args.board)\n board[args.start] = None\n\n return [end for end in args.ends if not args.calculator.is_threatened(board, [end], threat_color)]\n\n\ndef _get_two_moves_away(start, directions):\n double_unit = [_add_unit_direction(move, move) for move in directions]\n return [_add_unit_direction(start, move) for move in double_unit]\n\n\ndef _get_one_move_away(start, directions):\n ret_val = [_add_unit_direction(move, start) for move in directions]\n return ret_val\n\n\ndef alternates_landing_on_enemy_and_empty_space(args):\n ends = []\n board = args.board\n for direction in args.directions:\n new_start = args.start\n location = _add_unit_direction(new_start, direction)\n while location in board:\n initial_move = _add_unit_direction(direction, new_start)\n new_start = _add_unit_direction(direction, initial_move)\n if initial_move in args.ends and new_start in args.ends:\n # enemy piece at first move and no pices at second move\n if board[initial_move] and not board[new_start] and board[initial_move].color != board[args.start].color:\n ends.append(new_start)\n else:\n break\n else:\n break\n return ends\n\n\ndef cant_jump_pieces(args):\n end_locations = args.ends\n\n directions = [_pathfinder.get_unit_direction(args.start, end) for end in end_locations]\n\n for direction in directions:\n location_to_remove = args.start\n found_piece = False\n while True:\n location_to_remove = tuple(map(_operator.add, location_to_remove, direction))\n if location_to_remove not in args.board:\n break\n\n if not found_piece and args.board[location_to_remove]:\n found_piece = True\n elif found_piece and location_to_remove in end_locations:\n end_locations.remove(location_to_remove)\n else:\n # print(\"floating somehwere {}\".format(location_to_remove))\n # print(\"had found piece: {}\".format(found_piece))\n # print(\"in potential_end_locations: {}\".format(location_to_remove in potential_end_locations))\n # print(\"in else: {}\".format(potential_end_locations))\n pass\n return end_locations\n\n\ndef doesnt_land_on_own_piece(args):\n ends = []\n board = args.board\n for end in args.ends:\n if board[end] and board[args.start]:\n if board[args.start].color != board[end].color:\n ends.append(end)\n else:\n ends.append(end)\n return ends\n\n\ndef doesnt_land_on_piece(args):\n return [end for end in args.ends if not args.board[end]]\n\n\ndef ends_on_enemy(args):\n ends = []\n board = args.board\n for end in args.ends:\n if board[end] and board[args.start] and board[end].color != board[args.start].color:\n ends.append(end)\n return ends\n\n\ndef directional(args):\n return [end for end in args.ends if _is_directional(args.start, end, args.player_direction)]\n\n\ndef _is_directional(start, end, direction):\n direct = True\n direct = direct and _directional_helper(start[0], end[0], direction[0])\n direct = direct and _directional_helper(start[1], end[1], direction[1])\n return direct\n\n\ndef _directional_helper(start, end, direct):\n if direct > 0:\n if end < start:\n return False\n elif direct < 0:\n if end > start:\n return False\n return True\n","repo_name":"theovoss/Chess","sub_path":"chess/move_pipeline/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"37586389804","text":"def readIn(text, boarding):\r\n with open(text, 'r') as f:\r\n boarding[:] = [[x] for x in f.read().split('\\n')]\r\n\r\ndef seatID(boarding):\r\n for item in boarding:\r\n start, end = 0, 127\r\n for i in range(len(item[0])-3):\r\n if item[0][i] == 'F':\r\n end -= (end-start)//2 + 1\r\n elif item[0][i] == 'B':\r\n start += (end-start)//2 + 1\r\n row = start\r\n start, end = 0, 7\r\n for i in range(len(item[0])-3, len(item[0])):\r\n if item[0][i] == 'R':\r\n start += (end-start)//2 + 1\r\n elif item[0][i] == 'L':\r\n end -= (end-start)//2 + 1\r\n item.append(row)\r\n item.append(start)\r\n item.append(row*8+start)\r\n \r\ndef findOurSeat(boarding):\r\n for i in range(len(boarding)-1):\r\n if boarding[i][1] == 0:\r\n continue\r\n if boarding[i+1][3] - boarding[i][3] == 2:\r\n return boarding[i][3]+1\r\n\r\nboarding = []\r\nreadIn(r'2020\\day5\\input\\input.txt', boarding)\r\nseatID(boarding)\r\nboarding.sort(key=lambda x:x[3])\r\nprint(boarding[-1][3])\r\nprint(findOurSeat(boarding))","repo_name":"Gamper98/AdventOfCode","sub_path":"2020/day5/code/Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25048154545","text":"class Solution(object):\r\n def coinChange(self, coins, amount):\r\n\r\n coins.sort(reverse = True)\r\n coinNum = [0] * len(coins)\r\n\r\n for i, coin in enumerate(coins):\r\n while amount >= coin:\r\n amount -= coin\r\n coinNum[i] += 1\r\n\r\n return coinNum\r\n\r\nsln = Solution()\r\n\r\n# input\r\ncoins = [25,10,5,1]\r\namount = 83\r\n\r\nrst = sln.coinChange(coins, amount)\r\n\r\nprint(rst)","repo_name":"GoldenAge2010/CodingExcercise","sub_path":"CoinChanges.py","file_name":"CoinChanges.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19729828108","text":"from tkinter import *\r\nfrom tkinter import ttk, messagebox\r\nimport db_cursor\r\n\r\n\r\nclass DeleteDelivery(Toplevel):\r\n\r\n def __init__(self):\r\n Toplevel.__init__(self)\r\n self.geometry(\"500x300+550+200\")\r\n self.title(\"Delete Delivery\")\r\n self.resizable(False, False)\r\n\r\n self.my_title = Label(self, text='Delete Delivery', font='Ariel 20 bold')\r\n self.my_title.place(x=150, y=10)\r\n\r\n # product id label & entry\r\n self.product_id_lbl = Label(self, text='Product ID :', font='Ariel 12 bold')\r\n self.product_id_lbl.place(x=20, y=60)\r\n\r\n self.product_id_entry = ttk.Entry(self, width=40)\r\n self.product_id_entry.place(x=145, y=60)\r\n\r\n # Delete , Clear , Cancel buttons\r\n self.delete_btn = Button(self, text='Delete', width=40, height=1, bd=3, font='Ariel 10 bold',\r\n command=self.delete)\r\n self.delete_btn.place(x=90, y=120)\r\n\r\n self.clear_btn = Button(self, text='Clear', width=40, height=1, bd=3, font='Ariel 10 bold', command=self.clear)\r\n self.clear_btn.place(x=90, y=160)\r\n\r\n self.cancel_btn = Button(self, text='Cancel', width=40, height=1, bd=3, font='Ariel 10 bold',\r\n command=self.cancel)\r\n self.cancel_btn.place(x=90, y=200)\r\n\r\n def delete(self):\r\n # check if text fields is not empty\r\n product_id = self.product_id_entry.get()\r\n if product_id == '':\r\n messagebox.showerror(\"ERROR\", \"Product ID field is empty\")\r\n return\r\n\r\n # search the product id in db\r\n sql = \"SELECT * FROM delivery WHERE product_id = %s\"\r\n adr = (product_id, )\r\n\r\n # execute the query\r\n db_cursor.cursor.execute(sql, adr)\r\n\r\n # result is a list of column from db\r\n result = db_cursor.cursor.fetchall()\r\n\r\n # if the list is empty -> there is no column with 'product_id' provided by the user\r\n if len(result) == 0:\r\n messagebox.showerror('ERROR', 'No deliveries with product ID provided')\r\n return\r\n\r\n sql = \"DELETE FROM delivery WHERE product_id = %s\"\r\n adr = (product_id, )\r\n\r\n db_cursor.cursor.execute(sql, adr)\r\n db_cursor.conn.commit()\r\n\r\n messagebox.showinfo('INFO', 'Delivery was deleted successfully')\r\n\r\n\r\n def clear(self):\r\n self.product_id_entry.delete(0, END)\r\n self.product_id_entry.insert(0, '')\r\n\r\n def cancel(self):\r\n self.destroy()\r\n\r\n","repo_name":"vadimbel/GuiDB","sub_path":"delete_delivery.py","file_name":"delete_delivery.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71890740673","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\ndf = pd.read_csv(\"HousingData.csv\")\ndf = df.fillna(0)\nX, y = df.values[1:, :-1], df.values[1:, -1]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\nn_features = X_train.shape[1]\nmodel = Sequential()\nmodel.add(Dense(10, activation='relu', kernel_initializer='he_normal', input_shape=(n_features,)))\nmodel.add(Dense(8, activation='relu', kernel_initializer='he_normal'))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')\nmodel.fit(X_train, y_train, epochs=150, batch_size=32, verbose=1)\nerror = model.evaluate(X_test, y_test, verbose=1)\nprint('MSE %.3f RMSE %.3f' % (error, np.sqrt(error)))\nrow = np.array([[0.00632, 18.00, 2.310, 0, 0.5380, 6.5750, 65.20, 4.0900, 1, 296.0, 15.30, 396.90, 4.98]])\nyhat = model.predict(row)\nprint('Predicted %.3f' % yhat)\n","repo_name":"shilpavakkayil/machinelearning1","sub_path":"boston_housing_regression.py","file_name":"boston_housing_regression.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5493051527","text":"from tokenize import String\nimport numpy as np\nfrom typing import Tuple, List\nimport constants\nfrom utils import pizza_calculations\n#our team code\nclass Player:\n def __init__(self, num_toppings, rng: np.random.Generator) -> None:\n \"\"\"Initialise the player\"\"\"\n self.rng = rng\n self.num_toppings = num_toppings\n self.multiplier=40\n self.x = 12*self.multiplier\t# Center Point x of pizza\n self.y = 10*self.multiplier\t# Center Point y of pizza\n self.calculator = pizza_calculations()\n self.counter = 0\n self.anglecounter = 0\n\n def customer_gen(self, num_cust, rng = None):\n \n \"\"\"Function in which we create a distribution of customer preferences\n\n Args:\n num_cust(int) : the total number of customer preferences you need to create\n rng(int) : A random seed that you can use to generate your customers. You can choose to not pass this, in that case the seed taken will be self.rng\n\n Returns:\n preferences_total(list) : List of size [num_cust, 2, num_toppings], having all generated customer preferences\n \"\"\"\n \n alpha = 6.0 \n beta = 2.0 \n\n preferences_total = []\n if rng == None:\n np.random.seed(self.rng)\n print(\"beta distribution\")\n for i in range(num_cust):\n preferences_1 = np.random.beta(alpha, beta, self.num_toppings)\n print(preferences_1)\n preferences_1 = np.clip(preferences_1, 0, None)\n preferences_1 /= preferences_1.sum() \n preferences_total.append(\n [preferences_1.tolist(), preferences_1.tolist()]) \n else:\n for i in range(num_cust):\n preferences_1 = rng.random((self.num_toppings,))\n preferences_1 = 12 * preferences_1 / np.sum(preferences_1)\n preferences_2 = rng.random((self.num_toppings,))\n preferences_2 = 12 * preferences_2 / np.sum(preferences_2)\n preferences = [preferences_1, preferences_2]\n equal_prob = rng.random()\n if equal_prob <= 0.0:\n preferences = (np.ones((2, self.num_toppings))\n * 12 / self.num_toppings).tolist()\n preferences_total.append(preferences)\n\n return preferences_total\n\n #def choose_discard(self, cards: list[str], constraints: list[str]):\n def choose_toppings(self, preferences):\n \"\"\"Function in which we choose position of toppings\n\n Args:\n num_toppings(int) : the total number of different topics chosen among 2, 3 and 4\n preferences(list) : List of size 100*2*num_toppings for 100 generated preference pairs(actual amounts) of customers.\n\n Returns:\n pizzas(list) : List of size [10,24,3], where 10 is the pizza id, 24 is the topping id, innermost list of size 3 is [x coordinate of topping center, y coordinate of topping center, topping number of topping(1/2/3/4) (Note that it starts from 1, not 0)]\n \"\"\"\n x_coords = [np.sin(np.pi/2)]\n pizzas = np.zeros((10, 24, 3))\n for j in range(10): # Assuming we want to make 10 pizzas\n pizza_indiv = np.zeros((24, 3))\n # Define the radius of the circle where the toppings will be placed\n inner_circle_radius = 3 # Radius for toppings 1 and 2\n outer_circle_radius = 4.5 # Radius for toppings 3 and 4\n\n for i in range(24):\n if self.num_toppings == 2:\n angle = 2 * np.pi * i / 24\n x = inner_circle_radius * np.cos(angle)\n y = inner_circle_radius * np.sin(angle)\n topping_type = 1 if angle < np.pi else 2\n\n elif self.num_toppings == 3:\n if i < 16: # Toppings 1 and 2\n angle = 2 * np.pi * i / 16\n x = 2 * np.cos(angle)\n y = 2 * np.sin(angle)\n topping_type = 1 if i < 8 else 2\n else: # Topping 3\n angle = 2 * np.pi * (i - 8) / 28 + np.pi/6\n x = outer_circle_radius * np.cos(angle)\n y = outer_circle_radius * np.sin(angle)\n topping_type = 3\n\n elif self.num_toppings == 4:\n if i < 12: # Toppings 1 and 2\n angle = 2 * np.pi * i / 12\n x = 2 * np.cos(angle)\n y = 2 * np.sin(angle)\n topping_type = 1 if y > 0 else 2\n else: # Toppings 3 and 4\n angle = 2 * np.pi * (i - 6) / 24\n if i < 18: # Topping 3\n angle += np.pi/4 + np.pi/24 \n x = outer_circle_radius * np.cos(angle)\n y = outer_circle_radius * np.sin(angle)\n topping_type = 3\n else: # Topping 4\n angle += np.pi/2 + np.pi/4 \n x = outer_circle_radius * np.cos(angle) \n y = outer_circle_radius * np.sin(angle)\n topping_type = 4\n\n pizza_indiv[i] = [x, y, topping_type]\n\n pizzas[j] = pizza_indiv\n return list(pizzas)\n \"\"\"\n pizzas = np.zeros((10, 24, 3))\n for j in range(constants.number_of_initial_pizzas):\n pizza_indiv = np.zeros((24,3))\n i = 0\n while i<24:\n angle = self.rng.random()*2*np.pi\n dist = self.rng.random()*6\n x = dist*np.cos(angle)\n y = dist*np.sin(angle)\n clash_exists = pizza_calculations.clash_exists(x, y, pizza_indiv, i)\n if not clash_exists:\n pizza_indiv[i] = [x, y, i%self.num_toppings + 1]\n i = i+1\n pizza_indiv = np.array(pizza_indiv)\n pizzas[j] = pizza_indiv\n \"\"\"\n return list(pizzas)\n \n #def play(self, cards: list[str], constraints: list[str], state: list[str], territory: list[int]) -> Tuple[int, str]:\n def choose_and_cut(self, pizzas, remaining_pizza_ids, customer_amounts):\n \"\"\"Function which based n current game state returns the distance and angle, the shot must be played\n\n Args:\n pizzas (list): List of size [10,24,3], where 10 is the pizza id, 24 is the topping id, innermost list of size 3 is [x coordinate of topping, y coordinate of topping, topping number of topping(1/2/3/4)]\n remaining_pizza_ids (list): A list of remaining pizza's ids\n customer_amounts (list): The amounts in which the customer wants their pizza\n\n Returns:\n Tuple[int, center, first cut angle]: Return the pizza id you choose, the center of the cut in format [x_coord, y_coord] where both are in inches relative of pizza center of radius 6, the angle of the first cut in radians. \n \"\"\"\n final_id = remaining_pizza_ids[0]\n final_center = [0,0]\n final_angle = 0\n max_score = self.get_score([pizzas[final_id]], [0], [customer_amounts], [[self.x + final_center[0]*self.multiplier, self.y - final_center[1]*self.multiplier, final_angle]])\n\n test_angle = 0\n while test_angle <= 3.14:\n cut = [self.x + final_center[0]*self.multiplier, self.y - final_center[1]*self.multiplier, test_angle]\n score = self.get_score([pizzas[final_id]], [0], [customer_amounts], [cut])\n\n if score > max_score:\n final_center = final_center\n final_angle = test_angle\n test_angle += .01\n \n radius = 5.5\n for i in range(24):\n angle = 2 * np.pi * i / 24\n x = radius * np.cos(angle)\n y = radius * np.sin(angle)\n test_center = [x,y]\n test_angle = 0\n while test_angle <= 3.14:\n cut = [self.x + test_center[0]*self.multiplier, self.y - test_center[1]*self.multiplier, test_angle]\n score = self.get_score([pizzas[final_id]], [0], [customer_amounts], [cut])\n\n if score > max_score:\n final_center = test_center\n final_angle = test_angle\n test_angle += .04\n \n return final_id, final_center, final_angle\n\n def get_score(self, pizzas, ids, preferences, cuts):\n B, C, U, obtained_preferences, center_offsets, slice_amount_metric = self.calculator.final_score(pizzas, ids, preferences, cuts, self.num_toppings, self.multiplier, self.x, self.y)\n usum = self.sum(U[0])\n bsum = self.sum(B[0])\n csum = self.sum(C[0])\n return bsum - csum\n\n def sum(self, array):\n sum = 0\n for a in array:\n for b in a:\n sum += b\n return sum\n","repo_name":"akshay-022/Pizza-game","sub_path":"players/team_3.py","file_name":"team_3.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23540837251","text":"from __future__ import print_function\nimport sys\nimport numpy as np\n\ndef getflips(status, flipsize):\n numflips = 0\n for i in range(0,len(status)-(flipsize-1)):\n if status[i] == -1:\n status[i:i+flipsize] = status[i:i+flipsize] * -1\n numflips += 1\n for i in status[-(flipsize-1):]:\n if i == -1:\n return 'IMPOSSIBLE'\n return str(numflips)\n\n#Read data\nprobset = []\nwith open(sys.argv[1], \"r\") as f:\n T = int(f.readline())\n for x in range(T):\n flipstatus, flippersize = f.readline().split()\n flippersize = int(flippersize)\n flipstatus = np.array([1 if x == '+' else -1 for x in list(flipstatus)])\n roads = {}\n probset.append([flipstatus, flippersize])\nx = 1\nfor prob in probset:\n print(\"Case #%d: %s\" % (x, getflips(prob[0], prob[1])))\n x += 1\n \n\n\n# In[ ]:\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/1567.py","file_name":"1567.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18123214786","text":"import re\nimport os\n\ndef main():\n \n '''\n #for small file we can copy it line by line\n infile = open('lines.txt','r') #default option 'r'for read,'w'for write,'a'for append,'r+'read or write,'rt'text file mode,'rb'binary file mode \n outfile = open('new.txt', 'w')\n \n for line in infile:\n #print(line, file=outfile, end='') #write to a next file name outfile\n print(line, end='') #write to a next file name outfile\n print() \n '''\n '''\n #wifiDevices\n wifiDevices=list() #This is a list of wifi Devices\n infile = open('wifidevices.txt','r') \n for line in infile:\n #print(line, end='') #write to a next file name outfile\n match = re.search('http://',line) #have results in match\n #step2: replace the matched pattern with another text\n if match:\n s=line.split() #create a list object\n for string in s:\n match = re.search('http://',string) #have results in match\n if match:\n print(string) \n wifiDevices.append(string)\n #print(line.replace(match.group(),'###'), end='') #call replace the match object\n print(wifiDevices) \n print()\n '''\n \n #zigbeeDevices\n zigbeeDevices=dict(Coordinator=2,Router=3,user=4)\n print(zigbeeDevices)\n print(os.getcwd())\n '''\n zigbeeDevices=list() #This is a list of zigbee Devices\n infile = open('zigbeedevices.txt','r') \n for line in infile:\n match = re.search('Device',line) #have results in match\n #step2: replace the matched pattern with another text\n if match:\n s=line.split() #create a list object\n print(s)\n# for string in s:\n# match = re.search('http://',string) #have results in match\n# if match:\n# print(string) \n# wifiDevices.append(string)\n# print(zigbeeDevices) \n print()\n '''\n \n '''\n #for big file -> we don't need to do it line by line instead we just use a buffer mode\n #use buffer to deal with a big chunk of file\n buffersize = 50000 #give buffersize = 50000 bytes \n inbigfile = open('bigfile.txt', 'r')\n outbigfile = open('newbigfile.txt', 'w')\n buffer = inbigfile.read(buffersize)\n while len(buffer):\n outbigfile.write(buffer)\n print('.', end ='')\n buffer=inbigfile.read(buffersize)\n print()\n print('Done!') \n '''\n \n \nif __name__ == \"__main__\": main()\n","repo_name":"kwarodom/sidtechtalent","sub_path":"ML-Week1/lecture1 basic python/9-Files/files-working.py","file_name":"files-working.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"28384951532","text":"import colorsys\nimport os\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom PIL import ImageDraw, ImageFont\n\nfrom nets.yolo import YoloBody\nfrom utils.utils import (cvtColor, get_anchors, get_classes, preprocess_input, resize_image)\nfrom utils.utils_bbox import DecodeBox\n\n\n'''\n[注] YOLOV5利用先验框与真实目标框的宽高比来定义正例, 宽高比小于阈值即为正例, 同时以允许多个先验框匹配一个真实目标框的方式增大正例的比例,\n 并设置偏移量, 用临近网格同时预测同一个目标, 进一步增加正例数量, 使得正例比例大大增加.\n'''\n\nclass YOLO(nn.Module):\n\n def __init__(self) :\n super().__init__()\n self.model_path = 'model_data/yolov5_s.pth'\n self.classes_path = 'model_data/coco_classes.txt'\n self.anchors_path = 'model_data/yolo_anchors.txt'\n self.anchors_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n self.input_shape = [640, 640]\n self.backbone = 'cspdarknet'\n self.phi = 's'\n self.confidence = 0.5\n self.nms_iou = 0.3\n self.letterbox_image = True\n self.cuda = True\n\n # 获得种类和先验框的数量\n self.class_names, self.num_classes = get_classes(self.classes_path)\n self.anchors, self.num_anchors = get_anchors(self.anchors_path)\n self.bbox_util = DecodeBox(self.anchors, self.num_classes, (self.input_shape[0], self.input_shape[1]), self.anchors_mask)\n\n # 画框设置不同的颜色\n hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))\n\n self.net = YoloBody(self.anchors_mask, self.num_classes, self.phi, input_shape=self.input_shape)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.net.load_state_dict(torch.load(self.model_path, map_location=device))\n self.net = self.net.eval()\n\n # 这里注意要把权重也放入gpu, 否则会出现input和weights不在同一区域计算的错误\n if self.cuda:\n self.net = nn.DataParallel(self.net)\n self.net = self.net.cuda()\n\n def forward(self, image, crop, count):\n\n image_shape = np.array(np.shape(image)[0:2])\n\n image = cvtColor(image)\n\n image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), self.letterbox_image)\n\n # 加上batch_size维度\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n\n outputs = self.net(images)\n outputs = self.bbox_util.decode_box(outputs)\n\n # 将预测框进行堆叠,然后进行非极大抑制\n results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape, \n image_shape, self.letterbox_image, conf_thres = self.confidence, nms_thres = self.nms_iou)\n \n if results[0] is None: \n return image\n\n # result 第6个参数值为 class_pred\n top_label = np.array(results[0][:, 6], dtype = 'int32')\n top_conf = results[0][:, 4] * results[0][:, 5]\n top_boxes = results[0][:, :4]\n\n font = ImageFont.truetype(font='model_data/simhei.ttf', size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = int(max((image.size[0] + image.size[1]) // np.mean(self.input_shape), 1))\n\n if count:\n print(\"top_label:\", top_label)\n classes_nums = np.zeros([self.num_classes])\n for i in range(self.num_classes):\n num = np.sum(top_label == i)\n if num > 0:\n print(self.class_names[i], \" : \", num)\n classes_nums[i] = num\n print(\"classes_nums:\", classes_nums)\n\n if crop:\n for i, c in list(enumerate(top_boxes)):\n top, left, bottom, right = top_boxes[i]\n top = max(0, np.floor(top).astype('int32'))\n left = max(0, np.floor(left).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom).astype('int32'))\n right = min(image.size[0], np.floor(right).astype('int32'))\n \n dir_save_path = \"img_crop\"\n if not os.path.exists(dir_save_path):\n os.makedirs(dir_save_path)\n crop_image = image.crop([left, top, right, bottom])\n crop_image.save(os.path.join(dir_save_path, \"crop_\" + str(i) + \".png\"), quality=95, subsampling=0)\n print(\"save crop_\" + str(i) + \".png to \" + dir_save_path)\n\n # 图像绘制\n for i, c in list(enumerate(top_label)):\n predicted_class = self.class_names[int(c)]\n box = top_boxes[i]\n score = top_conf[i]\n\n top, left, bottom, right = box\n\n top = max(0, np.floor(top).astype('int32'))\n left = max(0, np.floor(left).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom).astype('int32'))\n right = min(image.size[0], np.floor(right).astype('int32'))\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n print(label, top, left, bottom, right)\n \n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for i in range(thickness):\n # fill为画框内填充的颜色, outline为边框颜色\n draw.rectangle([left + i, top + i, right - i, bottom - i], outline=self.colors[c])\n\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=self.colors[c])\n draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)\n del draw\n\n print('{} model, and classes loaded.'.format(self.model_path))\n return image\n\n\nif __name__ == '__main__':\n classes_path = 'model_data/coco_classes.txt'\n class_names, num_classes = get_classes(classes_path)\n hsv_tuples = [(x / num_classes, 1., 1.) for x in range(num_classes)]\n\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))\n print(colors[0])","repo_name":"biuusegithub/Yolov5","sub_path":"yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22351499420","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"a simple sensor data generator that sends to an MQTT broker via paho\"\"\"\n\n\nimport json\nimport time\nimport random\nimport paho.mqtt.client as mqtt\n\n\n\"\"\"generate data and send it to an MQTT broker\"\"\"\nmqttc = mqtt.Client()\n\nmqttc.connect(\"localhost\", 1883)\n\ninterval_secs = 500 / 1000.0\n\ndata = {\n \"sensor_id\": random.randint(1,100),\n \"sensor_ts\": int(time.time()*1000000)\n}\n\ninject_error = False\nif random.randint(1,100) >= 85:\n inject_error = True\n\nmachines = {\"sensor_0\": [1, 5], \"sensor_1\": [1, 20], \"sensor_2\": [0, 5], \"sensor_3\": [20, 60], \"sensor_4\": [1, 60],\n \"sensor_5\": [50, 100], \"sensor_6\": [10, 100], \"sensor_7\": [80, 150], \"sensor_8\": [1, 50],\n \"sensor_9\": [1, 10], \"sensor_10\": [1, 10], \"sensor_11\": [1, 10]}\n\nfor key in range(0, 12):\n min_val, max_val = machines.get(\"sensor_\" + str(key))\n data[\"sensor_\" + str(key)] = random.randint(min_val, max_val)\n if inject_error and key < 2 and random.randint(1, 100) < 80:\n data[\"sensor_\" + str(key)] += random.randint(500,1000)\n\npayload = json.dumps(data)\nprint(\"%s\" % payload)\n\nmqttc.publish(\"iot\", payload)\n","repo_name":"asdaraujo/edge2ai-workshop","sub_path":"setup/terraform/resources/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"61"} +{"seq_id":"44030271230","text":"import requests,json\nimport facebook\nimport pytz\nfrom datetime import datetime\n#-----------------------------------------------------------------\nf = open('/home/mdtuhinhasnat/FB_AUTO_POST/credential.json')\ndata = json.load(f)\n#-----------------------------------------------------------------\ndef post(contest_data,time):\n name = contest_data['name']\n id = contest_data['id']\n dt = datetime.utcnow().replace(tzinfo=pytz.UTC)\n timezone = pytz.timezone('Asia/Dhaka')\n contest_time = dt.astimezone(timezone)\n encrypted_token = data['access_token']\n group_id = data['group_id']\n user_id = data['user_id']\n message = '🅲🅾🅽🆃🅴🆂🆃 🅰🅻🅴🆁🆃'+'\\nName: '+name+'\\nTime: '+str(contest_time.strftime(\"%I:%M %P\"))\n Link = 'https://codeforces.com/contests/'+str(id)\n graph = facebook.GraphAPI(encrypted_token)\n graph.put_object(group_id, 'feed', message=message, link=Link, from_user=user_id)\n print('Post published successfully!')\n\ndef codeforces():\n respons_api = requests.get('https://codeforces.com/api/contest.list?\"phase\"=\"BEFORE\" ')\n data = json.loads(respons_api.text)\n p = data['result']\n # print(p)\n for i in p:\n if i['phase']=='BEFORE' :\n timestamp = i['startTimeSeconds']\n time = datetime.fromtimestamp(timestamp)\n today = datetime.today()\n # post(i,time)\n if time.day == today.day and time.month == today.month and time.year == today.year :\n post(i,time)\ncodeforces()\n","repo_name":"md-tuhin-hasnat/Facebook_Post_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13484065056","text":"'''Script para dejar 1 muestra por cluster, pero sin revisar el origen de las muestras. Este script se usará con los pop. based datasets ya que todas las muestras son del mismo país'''\n\nimport sys\nimport random\n\nmultifasta_list = sys.argv[1]\nclusters_file_samples = sys.argv[2]\n\n\n# Creamos una lista donde añadimos todas las muestras del multifasta\nmultifasta_samples = []\n\nwith open(multifasta_list, \"r+\") as multifasta:\n\n lineas = multifasta.readlines()\n\n for linea in lineas:\n\n multifasta_samples.append(linea.rstrip())\n\n\n# Ahora, creamos una nueva lista que incluya TODAS las muestras en cluster, y una 2a lista que contenga una muestra aleatoria por cada cluster\n\nmuestras_en_cluster = []\nmuestra_unica_por_cluster = []\n\nwith open(clusters_file_samples, \"r+\") as clusters_file:\n\n lineas = clusters_file.readlines()\n\n lineas = lineas[1:]\n\n for linea in lineas:\n\n cluster, samples = linea.split(\"\\t\")\n\n samples_list = samples.split(\",\")\n\n muestra_unica_por_cluster.append(random.choice(samples_list).rstrip())\n\n for i in samples_list:\n\n muestras_en_cluster.append(i.rstrip())\n\nprint(\"MUESTRAS EN CLUSTER:\", muestras_en_cluster)\nprint(\"UNA POR CLUSTER\", muestra_unica_por_cluster)\n\nwith open(\"list_1_per_cluster.txt\", \"w+\") as output:\n\n for i in multifasta_samples:\n\n if i not in muestras_en_cluster:\n \n output.write(\"{0}\\n\".format(i))\n\n \n for i in muestra_unica_por_cluster:\n\n output.write(\"{0}\\n\".format(i))\n","repo_name":"fmartinez-tgu/general_scripts","sub_path":"1_por_cluster_not_country_aware.py","file_name":"1_por_cluster_not_country_aware.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23580097738","text":"import re\n\n'''\nmetakarakter \\w akan cocok/match dengan alphanumeric [a-zA_Z0-9_]\nunderscore juga termasuk\n\\w\\w\\w\\w' mencari alpanumerik secara berurutan (ada 4)\nbisa juga diganti dengan \\w{4}\n'''\nkata = '@93 CbA_ ikkkp'\npola = re.search(r'\\w\\w\\w\\w', kata)\nprint(pola) # ","repo_name":"OceanScrape/Regular-Expression","sub_path":"latihan11_metacharacter_w.py","file_name":"latihan11_metacharacter_w.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73253511233","text":"import json\nimport logging\nfrom processor import DataProcessor\nimport pandas as pd\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n\nclass FiQADataProcessor(DataProcessor):\n '''Only use train split for FiQA as test split doesn't have answer groundtruth'''\n def process_documents(self):\n\n documents = pd.read_csv(f'{self.root}/FiQA_train_doc_final.tsv', sep='\\t', index_col=0)\n with open(f'{self.root}/documents.jsonl', 'w') as outfile:\n for r, row in documents.iterrows():\n json.dump({'doc_id': row['docid'], 'title': '', 'text': str(row['doc']), 'metadata': {'timestamp': row['timestamp']}}, outfile)\n outfile.write('\\n')\n return\n \n def load_questions(self):\n qid_to_question = {}\n\n questions = pd.read_csv(f'{self.root}/FiQA_train_question_final.tsv', sep='\\t')\n for _, question in questions.iterrows():\n qid = question['qid']\n qid_to_question[qid] = question['question']\n return qid_to_question\n\n def load_documents(self):\n docid_to_document = {}\n documents = pd.read_csv(f'{self.root}/FiQA_train_doc_final.tsv', sep='\\t')\n for _, doc in documents.iterrows():\n docid = str(doc['docid'])\n docid_to_document[docid] = doc['doc']\n return docid_to_document\n \n","repo_name":"awslabs/robustqa-acl23","sub_path":"code/fiqa.py","file_name":"fiqa.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16943883814","text":"from typing import List\n\n\nclass Solution:\n def efficient_cash(self, n: int, m:int, cashes:List[int], d: List[int]) -> int:\n cashes.sort()\n for i in range(cashes[0], m + 1):\n flag=0\n for cash in cashes:\n if i-cash==0:\n d[i]+=1\n if d[i-cash]!=0:\n if flag==0:\n d[i]=d[i-cash] + 1\n flag=1\n else:\n d[i]=min(d[i],d[i - cash] + 1)\n if d[m]==0:\n return -1\n else:\n return d[m]\n\n\n\nsol = Solution()\nn, m = map(int, input().split())\nnums=[0]*n\nfor i in range(n):\n nums[i]=int(input())\nprint(sol.efficient_cash(n, m, nums, [0] * (m + 1)))\n","repo_name":"Y-Joo/Baekjoon-Algorithm","sub_path":"pythonProject/DynamicProgramming/EfficientCash.py","file_name":"EfficientCash.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72459211073","text":"# 功能:用于CLTR模型中,将.h5文件中的image和kpoint数据转换成原图片和其密度图的融合图\n# 具体操作:\n# 1.读取CLTR/npydata/文件下,要转换数据集的npy文件(里面放的是该数据集的所有图片路径)\n# 2.读取数据集图片路径,并获取其相对应的.h5文件(该.h5文件是用CLTR模型的prepare_nwpu.py或prepare_jhu.py生成的,里面存放的是该图片的image和kpoint,image是reshape后的图片数据,kpoint是该图片人头标签点)\n# 3.将.h5文件中的image和kpoint读取出来,并用kpoint去做密度图\n# 4.将密度图和原图image融合成一张图,并且保存密度图和融合图\n\nimport os.path\n\nimport PIL.Image\nimport cv2\nimport numpy\nimport numpy as np\nimport h5py\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm as CM\n\nimport readnpy as rn\nimport generateDensityMap as gdm\nimport readh as rh\n\n\ndef combine_source_pic(npy_file_path, density_img_to):\n # 密度图和原图融合图的文件夹\n combine_den_img_to = f'{density_img_to}/combine'\n\n # 文件夹创建\n if not os.path.exists(density_img_to):\n os.mkdir(density_img_to)\n if not os.path.exists(combine_den_img_to):\n os.mkdir(combine_den_img_to)\n\n # 读取要处理的h5文件\n print('img_list')\n img_list = rn.read_npy(npy_file_path)\n print('h5_list')\n h5_list = [file.replace('images_2048', 'gt_detr_map_2048').replace('jpg', 'h5') for file in img_list]\n print(f'{len(h5_list)}\\n{h5_list}')\n\n # 开始处理文件\n for file_path in h5_list:\n # 保存路径处理\n _, file_name = os.path.split(file_path)\n file_name = file_name.replace('h5', 'jpg')\n density_img_to_path = f'{density_img_to}/{file_name}'\n combine_den_img_to_path = f'{combine_den_img_to}/{file_name}'\n # HDF5的读取:\n h5files = rh.main(file_path)\n print(h5files)\n img = h5files['/image']\n kpoint = h5files['/kpoint']\n img = np.array(img)\n kpoint_data = np.array(kpoint, dtype=numpy.float64)\n # non_zero = [np.nonzero(data)]\n # print(non_zero)\n # 获取密度图\n den_map = gdm.gaussian_filter_density(kpoint_data)\n # non_zero2 = [np.nonzero(den_map)]\n # print(non_zero2)\n\n # 密度图展示和保存\n plt.figure(2)\n plt.imshow(den_map, cmap=CM.jet)\n plt.axis('off')\n plt.savefig(density_img_to_path, bbox_inches='tight', pad_inches=0)\n # plt.show()\n\n # 将密度图和原图融合保存\n heatmap = cv2.imread(density_img_to_path)\n combine = cv2.addWeighted(cv2.resize(heatmap, (img.shape[1], img.shape[0])), 0.5, img, 0.5, 0)\n cv2.imwrite(combine_den_img_to_path, combine)\n\n\nif __name__ == '__main__':\n # 基本路径\n npy_file = 'jhu_test_try' # 要做处理的npy文件名\n npy_file_path = f'../CLTR/npydata/{npy_file}.npy' # npy路径\n density_img_to = '../jhu_crowd_v2.0-001/jhu_crowd_v2.0/test/test_try_density' # 得到的密度图存放位置\n\n combine_source_pic(npy_file_path, density_img_to)\n","repo_name":"cxmmaycxm/yolov5CommonTools","sub_path":"makeCombinePicFromh5_CLTR.py","file_name":"makeCombinePicFromh5_CLTR.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24034191257","text":"\"\"\"CSV target sink class, which handles writing streams.\"\"\"\n\nimport datetime\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\nimport pytz\nfrom singer_sdk import PluginBase\nfrom singer_sdk.sinks import BatchSink\n\nfrom target_csv.serialization import write_csv\n\n\nclass CSVSink(BatchSink):\n \"\"\"CSV target sink class.\"\"\"\n\n max_size = sys.maxsize # We want all records in one batch\n\n def __init__( # noqa: D107\n self,\n target: PluginBase,\n stream_name: str,\n schema: Dict,\n key_properties: Optional[List[str]],\n ) -> None:\n self._timestamp_time: Optional[datetime.datetime] = None\n super().__init__(target, stream_name, schema, key_properties)\n\n @property\n def timestamp_time(self) -> datetime.datetime: # noqa: D102\n if not self._timestamp_time:\n self._timestamp_time = datetime.datetime.now(\n tz=pytz.timezone(self.config[\"timestamp_timezone\"])\n )\n\n return self._timestamp_time\n\n @property\n def filepath_replacement_map(self) -> Dict[str, str]: # noqa: D102\n return {\n \"stream_name\": self.stream_name,\n \"datestamp\": self.timestamp_time.strftime(self.config[\"datestamp_format\"]),\n \"timestamp\": self.timestamp_time.strftime(self.config[\"timestamp_format\"]),\n }\n\n @property\n def destination_path(self) -> Path: # noqa: D102\n result = self.config[\"file_naming_scheme\"]\n for key, val in self.filepath_replacement_map.items():\n replacement_pattern = \"{\" f\"{key}\" \"}\"\n if replacement_pattern in result:\n result = result.replace(replacement_pattern, val)\n\n if self.config.get(\"output_path_prefix\", None) is not None:\n result = f\"{self.config['output_path_prefix']}{result}\"\n\n return Path(result)\n\n def process_batch(self, context: dict) -> None:\n \"\"\"Write out any prepped records and return once fully written.\"\"\"\n output_file: Path = self.destination_path\n self.logger.info(f\"Writing to destination file '{output_file.resolve()}'...\")\n new_contents: dict # noqa: F842\n create_new = (\n self.config[\"overwrite_behavior\"] == \"replace_file\"\n or not output_file.exists()\n )\n if not create_new:\n raise NotImplementedError(\"Append mode is not yet supported.\")\n\n if not isinstance(context[\"records\"], list):\n self.logger.warning(f\"No values in {self.stream_name} records collection.\")\n context[\"records\"] = []\n\n records: List[Dict[str, Any]] = context[\"records\"]\n if \"record_sort_property_name\" in self.config:\n sort_property_name = self.config[\"record_sort_property_name\"]\n records = sorted(records, key=lambda x: x[sort_property_name])\n\n self.logger.info(f\"Writing {len(context['records'])} records to file...\")\n\n write_csv(output_file, context[\"records\"], self.schema)\n","repo_name":"MeltanoLabs/target-csv","sub_path":"target_csv/sinks.py","file_name":"sinks.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4988326893","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom patient_app.forms import PatientForm\nfrom .models import *\n\n# Create your views here.\n\ndef patient_details(request):\n patient = Patients.objects.all()\n form = PatientForm()\n\n if request.method == 'POST':\n form = PatientForm(request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, \"Patient appointment added successfully\")\n\n context = {\n 'form' : form,\n 'patient' : patient\n }\n \n return render(request, 'patient.html', context)\n\ndef patient_delete_item(request, id):\n patient = Patients.objects.get(id=id)\n patient.delete()\n messages.info(request, \"Item deleted success\")\n return redirect('patient_details')\n\ndef patient_edit_item(request, id):\n patients = Patients.objects.all()\n patient = Patients.objects.get(id=id)\n\n if request.method == 'POST':\n form = PatientForm(request.POST, instance=patient)\n if form.is_valid():\n form.save()\n messages.info(request, \"Patient updated successfully\")\n return redirect('patient_details')\n\n else:\n form = PatientForm(instance=patient)\n\n context = {\n 'form': form,\n 'patients' : patients\n }\n return render(request, 'patient_edit.html', context)","repo_name":"Daina40/Hospital-Management-System","sub_path":"hospital_prj/patient_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72353802435","text":"import os\r\nimport boto3\r\nimport json\r\nimport re\r\nimport logging\r\n\r\n\r\ndef lambda_handler(event, context):\r\n\r\n\trekognition = boto3.client('rekognition')\r\n\tsns = boto3.client('sns')\r\n\ts3 = boto3.resource('s3')\r\n\trekognition_collection_id = \"mylab_collection\"\r\n\tface_match_threshold = 60\r\n\r\n\tprint (\"=================Lambda Start=================\")\r\n\tprint (event)\r\n\tbucket_name = event['Records'][0]#['s3']['bucket']['name']\r\n\tobject_name = event['Records'][0]#['s3']['Object']['key']\r\n\tdict_event = event['Records'][0]\r\n\t\r\n\ts_object_name = dict_event['s3']['object']['key']\r\n\tprint (s_object_name)\r\n\r\n\tprint(bucket_name)\r\n\tprint(object_name)\r\n\tprint(s_object_name)\r\n\t\r\n\tresource_bucket_name = ''\r\n\taim_bucket_name = ''\r\n\r\n\tprint (\"Bucket: \" + resource_bucket_name)\r\n\tprint (\"Object: \" + s_object_name)\r\n \r\n\tpattern = re.compile('.jpg$|.png$|.jpeg$|.JPG$|.PNG$|.JPEG$')\r\n\tif pattern.findall(s_object_name):\r\n\t print (\"It's vaild input: \" + pattern.findall(s_object_name)[0])\r\n\telse:\r\n\t print (\"Sorry, not supported format\")\r\n\t return 0\r\n \r\n\trekognition_response = rekognition.search_faces_by_image(\r\n\t\tCollectionId = rekognition_collection_id,\r\n\t\tImage = {\r\n\t\t\t'S3Object': {\r\n\t\t\t\t'Bucket': resource_bucket_name,\r\n\t\t\t\t'Name': s_object_name\r\n\t\t\t}\r\n\t\t},\r\n\t\tMaxFaces=123,\r\n\t\tFaceMatchThreshold = face_match_threshold\r\n\t)\r\n\t\r\n\tprint (rekognition_response)\r\n\tprint (\"=================Rekognition Response=================\")\r\n\tprint (json.dumps(rekognition_response, indent = 4))\r\n\t\r\n\tprint (\"=================Face Match Result====================\")\r\n\tface_matches = rekognition_response['FaceMatches']\r\n\tprint (face_matches)\r\n\t\r\n\tface_matches_result_id = rekognition_response\r\n\tprint (face_matches_result_id)\r\n\t\r\n\tif len(face_matches) > 0:\r\n\t print (\"match!\")\r\n\t print (\"=================Item=================\")\r\n\t face_matches_result_id = rekognition_response['FaceMatches'][0]['Face']['ExternalImageId']\r\n\t print (face_matches_result_id)\r\n\t face_matches_result_id = []\r\n\t for item in rekognition_response['FaceMatches']:\r\n\t print (item)\r\n\t face_matches_result_id.append(item['Face']['ExternalImageId'])\r\n\t \r\n\t print (face_matches_result_id)\r\n\t print (set(face_matches_result_id))\r\n\t print (\"=================Moving Picture=================\")\r\n\t for face_id in set(face_matches_result_id):\r\n\t print (face_id)\r\n\t aim_object_name = ('rekognition-result' + '/' + face_id + '/' + s_object_name)\r\n\t s3.Object(aim_bucket_name, aim_object_name).copy_from(CopySource={'Bucket': resource_bucket_name, 'Key': s_object_name})\r\n\t print (\"Moving to %s\" % (aim_bucket_name + aim_object_name))\r\n\t \r\n\t s3.Object(resource_bucket_name, s_object_name).delete()\r\n\t sns.publish(TopicArn ='', Subject = 'Recog_result: ', Message = 'match'+ face_id)\r\n\r\n\t \r\n\telse:\r\n\t print (\"looks like someone else.\")\r\n\t aim_object_name = ('rekognition-result' + '/not-found/' + s_object_name)\r\n\t s3.Object(aim_bucket_name, aim_object_name).copy_from(CopySource={'Bucket': resource_bucket_name, 'Key': s_object_name})\r\n\t s3.Object(resource_bucket_name, s_object_name).delete()\r\n\t sns.publish(TopicArn = '', Subject = 'Recog_result: ', Message = 'Someone else publish a different photo')\r\n","repo_name":"JellalYu/Customize-facial-recognition-system-with-AWS-Rekognition","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30345106178","text":"#!/usr/bin/python3\nimport clip\nimport torch\nimport sys\nfrom pyparsing import *\nimport numpy as np\n\nppc = pyparsing_common\n\nParserElement.enablePackrat()\nsys.setrecursionlimit(3000)\n\nword = Word(alphas)\nphrase = QuotedString(\"'\", escChar='\\\\')\ninteger = ppc.integer\n\noperand = word | phrase | integer\nplusop = oneOf(\"+ -\")\nsignop = oneOf(\"+ -\")\nmultop = oneOf(\"* /\")\n\nexpr = infixNotation(\n operand,\n [\n (multop, 2, opAssoc.LEFT),\n (plusop, 2, opAssoc.LEFT),\n ],\n)\n\n\ndef text_concepts_to_vector(model, concepts):\n operator = ''\n features = []\n if len(concepts) != 3:\n raise 'unbalanced expression - expected '\n for concept in concepts:\n if type(concept) == str:\n if concept in ['+', '-', '/', '*']:\n operator = concept\n else:\n with torch.no_grad():\n features.append(np.array(model.encode_text(clip.tokenize(concept))[0].tolist()))\n elif type(concept) == int:\n features.append(concept)\n else:\n features.append(np.array((text_concepts_to_vector(model, concept))))\n if operator == '+':\n return features[0] + features[1]\n elif operator == '-':\n return features[0] - features[1]\n elif operator == '/':\n return features[0] / features[1]\n elif operator == '*':\n return features[0] * features[1]\n else:\n raise f'unknown operator {operator}'\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nmodel, preprocess = clip.load(\"ViT-L/14\", device=device)\n\nif __name__ == '__main__':\n for text in sys.stdin:\n concepts = expr.parseString(text.strip())\n print(text_concepts_to_vector(model, concepts[0]).tolist())\n sys.stdout.flush()\n","repo_name":"ClickHouse/laion","sub_path":"udfs/embed_concept.py","file_name":"embed_concept.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"22015986541","text":"import coverage\nimport unittest\n\nfrom flask_script import Manager\nfrom users import create_app, db\nfrom users.api.models import User\n\nCOV = coverage.coverage(\n branch=True,\n include='users/*',\n omit=[\n 'users/tests/*'\n ]\n)\nCOV.start()\n\napp = create_app()\nmanager = Manager(app)\n\n@manager.command\ndef seed_db():\n \"\"\"Seeds the database.\"\"\"\n db.session.add(User(username='elmer', email=\"elmer.thomas@gmail.com\"))\n db.session.add(User(username='thinkingserious', email=\"elmer@thinkingserious.com\"))\n db.session.commit()\n\n@manager.command\ndef recreate_db():\n \"\"\"Recreates a database.\"\"\"\n db.drop_all()\n db.create_all()\n db.session.commit()\n\n@manager.command\ndef cov():\n \"\"\"Runs the unit tests with coverage.\"\"\"\n tests = unittest.TestLoader().discover('users/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n COV.html_report()\n COV.erase()\n return 0\n return 1\n\n@manager.command\ndef test():\n \"\"\"Runs the tests without code coverage.\"\"\"\n tests = unittest.TestLoader().discover('users/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"thinkingserious/flask-microservices-users","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"27245647758","text":"#!/usr/bin/env python3.5\n\n\"\"\"\nvtugeo aims to convert 2D (x-y) vtu unstructured grids to geodataframe \nstructures and vice versa. Furthermore, output as polygon shapefile is provided\nAll node and element fields from vtu are transfered to the geodataframe \nand can be written out to shapefile as attributes. Node based fields\nstart with an \"N_{i}\" where i is the number of corresponding node\nLIMITATION: UNTIL NOW ONLY PURE TRIANGULAR VTU GRIDS ARE ALLOWED\n\nV02: exception of field data is vector data catched\n improved readability\n\"\"\"\n\n__author__ = \"Erik Nixdorf\"\n__propertyof__ = \"Helmholtz-Zentrum fuer Umweltforschung GmbH - UFZ. \"\n__email__ = \"erik.nixdorf@ufz.de\"\n__version__ = \"0.2\"\n\n# Import functions\n\nimport pandas as pd\nimport geopandas as gpd\nimport sys\nimport meshio\nimport numpy as np\nfrom operator import itemgetter\nimport shapely.geometry\n\n\ndef unique_nd(a):\n \"\"\"\n A function which allows to find unique sets in n dimension\n inspired by https://stackoverflow.com/questions/8560440/\n removing-duplicate-columns-and-rows-from-a-numpy-2d-array\n \"\"\"\n order = np.lexsort(a.T)\n a = a[order]\n diff = np.diff(a, axis=0)\n ui = np.ones(len(a), 'bool')\n ui[1:] = (diff != 0).any(axis=1)\n return a[ui]\n\n\ndef vtu_to_geo(fctn_inpt='testmesh.vtu', inpt_flfrmt='vtu-ascii',\n srs_crs='EPSG:31468', elmt_type='triangle', Output=None):\n \"\"\"\n vtu unstructured grid to geodataframe\n \"\"\"\n # check whether the input is string or instance of vtk objects\n if isinstance(fctn_inpt, str):\n filename = fctn_inpt\n # read the mesh\n mesh = meshio.read(filename, file_format=inpt_flfrmt)\n else:\n sys.exit('Only explicite VTK and VTU files allowed as input!')\n\n # get triangles nodes, quads could exist as well, not covered yet\n if len(mesh.cells.keys()) == 1:\n try:\n mesh_nds = mesh.cells[elmt_type]\n except:\n sys.exit('Only triangular 2D Meshes supported ')\n else:\n sys.exit('More than one type of elements not supported')\n\n # Get the Nodes corresponding to the elements\n # https://www.geeksforgeeks.org/python-accessing-all-elements-at-given-list-of-indexes/\n nds_cord = list(itemgetter(*mesh_nds)(mesh.points))\n tri_geom = [shapely.geometry.Polygon(zip(nds_cord[i][:, 0],\n nds_cord[i][:, 1],\n nds_cord[i][:, 2]))\n for i in range(0, len(nds_cord))]\n # next we create the dataframe for the element fields\n df_elmnt_flds = pd.DataFrame()\n for key in mesh.cell_data[elmt_type]:\n # check whether scalar or vector input\n if len(mesh.cell_data[elmt_type][key].shape) == 1:\n df_elmnt_flds[key] = mesh.cell_data[elmt_type][key]\n else:\n print(key, 'could not used, no support of vector data input')\n\n # finally for the node_based informationn get the NodalValues as Element\n # based attributes with particular column for each node\n meshpnt_fldnms = list(mesh.point_data.keys())\n # create the dataframe\n df_pnt_flds = pd.DataFrame()\n # we have to define the order of the nodes in a polygon , in vtk it is\n # counter clockwise, however in shapefile it is clockwise\n if isinstance(Output, str):\n NodeOrder = [0, 2, 1]\n else:\n NodeOrder = [0, 1, 2]\n # for each of the fields we convert and append\n for meshpnt_fldnm in meshpnt_fldnms:\n fld_data = np.stack(itemgetter(*mesh_nds)\n (mesh.point_data[meshpnt_fldnm]))\n try:\n df_pnt_flds[['N_'+str(i)+'_'+meshpnt_fldnm\n for i in NodeOrder]] = pd.DataFrame(fld_data)\n except:\n print(meshpnt_fldnm, ' is vector data input, so support yet')\n print('... done writing fields')\n # Finally we merge all the dataframes to one geodataframe \n # create a geopandas dataframe\n gdf_mesh = gpd.GeoDataFrame(pd.concat([df_elmnt_flds, df_pnt_flds], axis=1),\n geometry=tri_geom, crs=srs_crs)\n if isinstance(Output, str):\n gdf_mesh.to_file(Output)\n return gdf_mesh\n\n\ndef geo_to_vtu(fctn_inpt='testmesh.shp',\n elmt_fldnms_long=['MaterialIDs', 'bulk_element_ids', 'bulk_face_ids'],\n pnt_fldnms_long=['bulk_node_ids'],\n Output='outputmsh.vtu',\n Output_flfrmt='vtu-binary'):\n \"\"\"\n This Function takes a geodataframe/shapefile and converts to a VTU mesh\n conventions: only triangles supported\n All fields starting with a number from 0 (to 2) belong to nodal values\n BULK NODE IDS ARE PROBABLY WRONGLY ASSIGNED\n elmt_fldnms_long and pnt_fldnms_long define some (more than 10 digit)\n OGS field names which we need later to replace\n CHECK BY INDEX\n @author: Erik\n \"\"\"\n if isinstance(fctn_inpt, str):\n gdf_inpt = gpd.read_file(fctn_inpt)\n else:\n gdf_inpt = fctn_inpt\n\n # extract cell_data arrays, which neither start with \"N_\" or geometry\n cell_fld_nms = [col_nm for col_nm in gdf_inpt.columns\n if col_nm.startswith('N_') is False\n and col_nm != 'geometry']\n print('...cell fieldnames ', cell_fld_nms, ' have been identified')\n # extract the nodal_array_nms and reshape datasets\n # We check for all point_datafields and append them to existing array\n pnt_fld_uniqs = set()\n for col_nm in gdf_inpt.columns:\n if col_nm.startswith('N_'):\n node_array_nms = col_nm\n pnt_fld_uniqs.add(col_nm[4:])\n print('...point fieldnames ', list(pnt_fld_uniqs), ' have been identified')\n # we check whether we have 2D or 3D Data\n if gdf_inpt['geometry'][0].has_z:\n dimension = 3\n dim_clm_name = ['X', 'Y', 'Z']\n clm_nms = ['Element_ID', 'X', 'Y', 'Z']\n else:\n dimension = 2\n dim_clm_name = ['X', 'Y']\n clm_nms = ['Element_ID', 'X', 'Y']\n # %% Extract all information from the shape which are relevant for VTU Mesh\n # first coordiantes for all nodes forming a polygon (mesh element)\n # https://stackoverflow.com/questions/20474549/\n # extract-points-coordinates-from-a-polygon-in-shapely\n pnts_per_elmnt = len(gdf_inpt['geometry'][0].exterior.coords[:-1])\n df_pnt_cell_flds_lst = [np.hstack((np.ones((pnts_per_elmnt, 1))*i,\n np.array(gdf_inpt['geometry'][i].exterior.coords)[:-1]))\n for i in range(0, gdf_inpt['geometry'].shape[0])]\n # create pandas array which contains all elements and nodes\n df_pnt_cell_flds = pd.DataFrame(np.vstack(df_pnt_cell_flds_lst),\n columns=clm_nms)\n # create a new df with all fields belonging to elements are repeated two t\n df_cell_flds = pd.concat([gdf_inpt[cell_fld_nms]] *\n pnts_per_elmnt).sort_index()\n # append the cell fld to the pnt_cell_flds\n df_pnt_cell_flds = pd.concat([df_pnt_cell_flds,\n df_cell_flds.reset_index(drop=True)], axis=1)\n # We check for all point_datafields and append to existing numpy array\n for pnt_fld_uniq in pnt_fld_uniqs:\n node_array_nms = ['N_'+str(i)+'_'+pnt_fld_uniq\n for i in range(0, dimension)]\n df_pnt_cell_flds[pnt_fld_uniq] = gdf_inpt[node_array_nms].to_numpy().flatten().reshape(-1, 1)\n print('... done processing point fields')\n # set the last column to new node_id\n # finding the unique points using our new unique2d function\n df_uniq_pts = pd.DataFrame(unique_nd(df_pnt_cell_flds[dim_clm_name].to_numpy()),\n columns=dim_clm_name)\n df_uniq_pts['Node_ID'] = np.linspace(0, len(df_uniq_pts)-1, len(df_uniq_pts))\n # merge the uniquepoints with the element based dataframe\n # in order to get the NodeID for each point in each element\n df_pnt_cell_flds = pd.merge(df_uniq_pts, df_pnt_cell_flds,\n how=\"inner\", on=dim_clm_name)\n print('... done merging point and cell flds')\n # %%Create the dataframe with the points\n df_msh_pnts = df_pnt_cell_flds.drop_duplicates(subset=['Node_ID']).sort_values(by=['Node_ID'])\n # delete element relevant fields\n df_msh_pnts.drop(columns=cell_fld_nms, inplace=True)\n # delete the element ID\n df_msh_pnts.drop(columns=['Element_ID', 'Node_ID'], inplace=True)\n # create the numpy array for point coordinates\n msh_pnt_arr = df_msh_pnts[dim_clm_name].to_numpy()\n # create the dictionary with pnt fields\n msh_pnt_flds = {}\n for pnt_fld_uniq in pnt_fld_uniqs:\n msh_pnt_fld = df_msh_pnts[pnt_fld_uniq].to_numpy()\n # check if the cell_array name to be replaced (10 digit bug in shp)\n try:\n pnt_fld_uniq = [s for s in pnt_fldnms_long if pnt_fld_uniq in s][0]\n except:\n pass\n # add to dictionary\n msh_pnt_flds[pnt_fld_uniq] = msh_pnt_fld\n # Next we create the msh cell dictionary and\n # the array which points element to node\n\n # sort by element id\n df_msh_cells = df_pnt_cell_flds.sort_values(by=['Element_ID'])\n # get node index per cell\n msh_cell_pntids_arr = df_msh_cells['Node_ID'].to_numpy().reshape(\n int(np.max(df_pnt_cell_flds['Element_ID']))+1, 3).astype(int)\n msh_cell_pntids = {\"triangle\": msh_cell_pntids_arr}\n # reduce the number of columns\n df_msh_cell_flds = df_msh_cells.drop_duplicates(subset=['Element_ID'])\n df_msh_cell_flds = df_msh_cell_flds[cell_fld_nms]\n # write the dictionary\n msh_cell_fld_dict = {}\n for cell_fld_nm in cell_fld_nms:\n msh_cell_fld = df_msh_cell_flds[cell_fld_nm].to_numpy()\n # check if the cell_array name to be replaced (10 digit bug in shp)\n try:\n cell_fld_nm = [s for s in elmt_fldnms_long if cell_fld_nm in s][0]\n except:\n pass\n # add to dictionary\n msh_cell_fld_dict[cell_fld_nm] = msh_cell_fld\n # %% create object and write\n mesh = meshio.Mesh(msh_pnt_arr, msh_cell_pntids)\n # add cellfield data\n mesh.cell_data['triangle'] = msh_cell_fld_dict\n # add point field data\n mesh.point_data = msh_pnt_flds\n # write out\n if isinstance(Output, str):\n meshio.write(Output, mesh, file_format=Output_flfrmt)\n","repo_name":"ErikNixdorf/vtugeo","sub_path":"vtugeo.py","file_name":"vtugeo.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23622322731","text":"INPUT_FILE = r'C:\\Downloads\\FromFirefox\\D-large.in'\r\nOUTPUT_FILE = r'C:\\Users\\Assaf\\Fun\\codeJam\\D-large.out'\r\n\r\nfrom math import factorial as fact\r\nfrom random import shuffle\r\n\r\ninputFile = file(INPUT_FILE, 'rb')\r\nnumQuestions = int(inputFile.readline())\r\noutputFile = file(OUTPUT_FILE, 'wb')\r\n\r\ndef findCycles(l):\r\n undone = set(l)\r\n result = []\r\n while undone:\r\n start = undone.pop()\r\n pos = start\r\n cycle = [pos]\r\n while l[pos] != start:\r\n pos = l[pos]\r\n cycle.append(pos)\r\n undone.remove(pos)\r\n result.append(cycle)\r\n return result\r\n\r\n# Just for testing\r\ndef goroSort(n):\r\n steps = 0\r\n l = range(n)\r\n shuffle(l)\r\n cycles = findCycles(l)\r\n for cycle in cycles:\r\n if len(cycle) > 1:\r\n steps += 1\r\n steps += goroSort(len(cycle))\r\n return steps\r\n\r\ndef solveQuestion(numbers):\r\n cycles = findCycles(numbers)\r\n total = 0\r\n for cycle in cycles:\r\n if len(cycle) > 1:\r\n total += len(cycle)\r\n \r\n return '%d.000000' % total\r\n\r\nfor q in xrange(numQuestions):\r\n outputFile.write(\"Case #%d: \" % (q+1))\r\n l = int(inputFile.readline())\r\n numbers = map(int, inputFile.readline().split(' '))\r\n numbers = map(lambda x:x-1, numbers)\r\n if len(numbers) != l:\r\n raise Exception(\"Input error N\")\r\n result = solveQuestion(numbers)\r\n outputFile.write(result)\r\n outputFile.write(\"\\n\")\r\n\r\noutputFile.close()\r\ninputFile.close()\r\n# print file(OUTPUT_FILE, 'rb').read()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_77/162.py","file_name":"162.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5438482131","text":"import numpy as np\nimport plotroutines as plot\n\n#------------------------------------------------------------------------------#\n\n# Insert here your data for the plotting, see the file 'color_regions.vtu'\n# for the coloring code of each region.\n\ntitles = ['$\\\\sim 4k$ cells - permeability 1e4', '$\\\\sim 4k$ cells - permeability 1e-4']\nconds = [0, 1]\n\nplaces_and_methods = {\n \"UiB\": [\"TPFA\", \"MPFA\", \"MVEM\", \"RT0\"],\n \"USTUTT\": [\"MPFA\", \"TPFA\\_Circ\"],\n \"LANL\": [\"MFD\"],\n# \"NCU\\_TW\": [\"Hybrid\\_FEM\"],\n \"UNICE\\_UNIGE\": [\"VAG\\_Cont\", \"HFV\\_Cont\", \"VAG\\_Disc\", \"HFV\\_Disc\"],\n \"ETHZ\\_USI\": [\"FEM\\_LM\"],\n \"UNICAMP\": [\"Hybrid\\_Hdiv\"],\n \"UNIL\\_USI\": [\"FE\\_AMR\\_AFC\"],\n# \"INM\": [\"EDFM\"],\n# \"DTU\": [\"FEM\\_COMSOL\"]\n};\n\nregions = np.array([1, 10, 11])\nregions_fig = {1: \"case2_region10pic.png\", 10: \"case2_region11pic.png\", 11: \"case2_region1pic.png\"}\n\n#------------------------------------------------------------------------------#\n\nfor cond, title in zip(conds, titles):\n\n fig = plot.plt.figure(cond+11, figsize=(16, 6))\n fig.subplots_adjust(hspace=0, wspace=0)\n if cond == 0:\n ylim = (0, 0.475)\n else:\n ylim = (0, 0.4)\n\n for region_pos, region in enumerate(regions):\n ax = fig.add_subplot(1, regions.size, region_pos + 1, ylim=ylim)\n\n for place in places_and_methods:\n for method in places_and_methods[place]:\n folder = \"../results/\" + place + \"/\" + method + \"/\"\n data = folder.replace(\"\\\\\", \"\") + \"/dot_cond_\" + str(cond) + \".csv\"\n label = place + \"\\_\" + method\n\n plot.plot_over_time(data, label, title, cond, region, region_pos, regions.size, ax,\n lineStyle=plot.linestyle[place][method],\n clr=plot.color[place][method],\n has_legend=False, fmt=\"%1.2f\")\n\n # save figures\n plot.save(cond, \"case2_cot_cond_\"+str(cond), starting_from=3*cond)\n\nncol = 4\nfor cond in conds:\n for place in places_and_methods:\n for method in places_and_methods[place]:\n label = \"\\\\texttt{\" + place + \"-\" + method + \"}\"\n plot.plot_legend(label, cond, plot.linestyle[place][method],\n plot.color[place][method], ncol)\n\n plot.save(cond, \"case2_cot_cond_\"+str(cond)+\"_legend\")\n plot.crop_pdf(\"case2_cot_cond_\"+str(cond)+\"_legend\")\n\n#------------------------------------------------------------------------------#\n#------------------------------------------------------------------------------#\n","repo_name":"gridfunction/fracturedPorousMedia","sub_path":"data/regular/scripts/pot.py","file_name":"pot.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"38329929884","text":"import json\nfrom bs4 import BeautifulSoup\n\n\nfrom django.db.models import F\nfrom django.views.generic import View\nfrom django.http import JsonResponse\n\nfrom blog.utils.response import BaseResponse\nfrom blog.models import ArticleUp\nfrom blog.models import Article\n\nclass ArticleUpView(View):\n \"\"\"点赞\"\"\"\n def post(self,requset):\n # 如果此人没有博客,新建博客\n ret = BaseResponse()\n user_id = requset.user.id\n article_id = requset.POST.get(\"article_id\")\n print(user_id,article_id)\n article_up = ArticleUp.objects.filter(user_id=user_id,article_id=article_id)\n if article_up:\n ret.code = 100\n article_up.delete()\n Article.objects.filter(id=article_id).update(up_count=F('up_count') - 1)\n return JsonResponse(ret.dict)\n try:\n ArticleUp.objects.create(user_id=user_id, article_id=article_id)\n Article.objects.filter(id=article_id).update(up_count=F('up_count')+1)\n except:\n ret.cdoe =300\n ret.msg = \"点赞失败\"\n return JsonResponse(ret.dict)","repo_name":"cpfyjjs/mysite","sub_path":"blog/views/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23558435231","text":"def tidy(n):\n prev = '0'\n for d in str(n):\n if d < prev:\n return False\n prev = d\n return True\n\nfor t in range(int(input())):\n n = int(input())\n a = 1\n while True:\n if tidy(n):\n break\n d = (n // a) % 10\n if d != 9:\n n -= (d + 1) * a\n a *= 10\n\n print('Case #%d: %d' % (t+1, n))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3386.py","file_name":"3386.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8733269574","text":"import csv\nimport math\nimport random\nimport sys\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# read all records from earthquake CSV file on https://www.usgs.gov/\ndef readEarthquakeDataFile(filename):\n\twith open(filename) as data_file:\n\t\tdata_samples = csv.reader(data_file)\n\t\ttitles = next(data_samples)\n\t\tseq = 0\n\t\tdata = {}\n\n\t\tfor line in data_samples:\n\t\t\tlat = float(line[1])\n\t\t\tlon = float(line[2])\n\t\t\tmag = float(line[4])\n\t\t\tdata[seq] = [lat, lon, mag]\n\t\t\tseq = seq + 1\n\treturn data\n\n# calculate Euclid distance\ndef calcEuclidDistance(xi, xj):\n\td = len(xi)\n\tsquare_sum = 0.0\n\tfor l in range(d):\n\t\tsquare_sum += (xj[l] - xi[l]) ** 2\n\treturn math.sqrt(square_sum)\n\n# calculate the average value from the samples within one cluster\ndef calcAverageDistance(clusterData):\n\tdist_x = 0.0\n\tdist_y = 0.0\n\tfor i in range(len(clusterData)):\n\t\tdist_x += clusterData[i][0]\n\t\tdist_y += clusterData[i][1]\n\treturn [dist_x/len(clusterData), dist_y/len(clusterData)]\n\n# randomly generate k cluster centers from the samples\ndef createIntialClusterCenter(k, data):\n\tseqs = [x for x in data.keys()]\n\tinitCenters = []\n\tfor i in range(k):\n\t\tseq = random.choice(seqs)\n\t\tinitCenters.append([data[seq][0], data[seq][1]])\n\treturn initCenters\n\n# Iterate and progressively form the k clusters\ndef performKMeansCluster(k, clusterCenters, data):\n\teachClusterData = defaultdict(list)\n\t# add the current sample to target cluster\n\tfor i in range(len(data)):\n\t\tmin_dist = sys.maxsize\n\t\tmin_k = 0\n\t\tfor j in range(k):\n\t\t\tdist = calcEuclidDistance([data[i][0], data[i][1]], \n\t\t\t\t[clusterCenters[j][0], clusterCenters[j][1]])\n\t\t\tif dist <= min_dist:\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_k = j\n\t\teachClusterData[min_k].append(data[i])\n\t# update the cluster center with the average value in one cluster.\n\tclusterCenters.clear()\n\tfor k in eachClusterData.keys():\n\t\tdist_center = calcAverageDistance(eachClusterData[k])\n\t\tclusterCenters.append(dist_center)\n\treturn eachClusterData\n\n# visualize the cluster via matplotlib's scatter graph\ndef visualizeEarthquakeClusters(eachClusterData):\n\t# draw the global map as the background\n\timg = mpimg.imread('./worldmap.jpeg')\n\tplt.imshow(img)\n\t# draw the bubble scatter graph\n\twfac = 780.0 / 2.0 / 180.0\n\thfac = 388.0 / 2.0 / 90.0\n\txx = []\n\tyy = []\n\tsz = []\n\tcol = []\n\tcol_map = ['red', 'blue', 'green', 'yellow', 'cyan', 'black']\n\tfor key in eachClusterData.keys():\n\t\tclusterData = eachClusterData[key]\n\t\tfor i in range(len(clusterData)):\n\t\t\txx.append(clusterData[i][1] * wfac + 780.0/2)\n\t\t\tyy.append(clusterData[i][0] * hfac + 388.0/2)\n\t\t\tsz.append(clusterData[i][2] * 3.0)\n\t\t\tcol.append(col_map[key])\n\tplt.scatter(x=xx, y=yy, s=sz, c=col)\n\tplt.show()\n\n# test entry\nif __name__ == \"__main__\":\n\tdata = readEarthquakeDataFile('./dataset/earthquakes_past_day.csv')\n\t# generate 6 cluster centers at the beginning\n\tclusterCenters = createIntialClusterCenter(6, data)\n\tfor i in range(7): # perform 7 iterations to form the final clusters\n\t\teachClusterData = performKMeansCluster(6, clusterCenters, data)\n\tvisualizeEarthquakeClusters(eachClusterData)\n","repo_name":"wenxiwu777/leetcode_practice","sub_path":"KMeansCluster.py","file_name":"KMeansCluster.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37129504061","text":"import time\nimport logging\nfrom functools import wraps\n\n\ndef trace(func):\n @wraps(func)\n def wrapper(self, request, context):\n start = time.time()\n method_name = func.__name__\n logging.info(\"%s request: %s\", method_name, str(request))\n result = func(self, request, context)\n interval = (time.time() - start) * 1000\n logging.info(\"%s response: %.2f ms\" % (\n method_name, result.code, result.msg, interval\n ))\n return result\n\n return wrapper\n","repo_name":"wongxinjie/python-snippets","sub_path":"grpc-demo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73162748675","text":"import os\nimport cv2\nimport boto3\nimport numpy as np\nimport sys\nimport tlsh\nimport psycopg2\n\nif not os.path.exists(\"config.py\"):\n os.system(\"chmod +x configen.sh\")\n os.system(\"./configen.sh\")\n\nimport config as c\n\nclass DataObject:\n '''\n Class to represent a keypoint to write to database\n\n self.pt: (Float, Float)\n self.size: Float\n self.angle: Float\n self.octave: Int\n self.class_id: Int\n self.desc: Numpy Array\n\n '''\n def __init__(self, pt, size, angle, response, octave, class_id, desc):\n '''\n Initalises variables for function\n '''\n self.pt = pt\n self.size = size\n self.angle = angle\n self.response = response\n self.octave = octave\n self.class_id = class_id\n self.desc = desc\n\n def __str__(self):\n '''\n Returns string representation for output to database\n '''\n return f'{self.pt}|{self.size}|{self.angle}|{self.response}|\\\n {self.octave}|{self.class_id}|{self.desc}'\n\ndef pack_keypoints(keypoints, desc):\n '''\n This will take a list of opencv keypoints and turn them into a list of \n DataObjects definined above\n\n keypoints: List\n desc: Numpy Array\n\n Returns: List (of DataObjects)\n '''\n kp_lst = []\n i = 0\n for point in keypoints:\n tmp_desc = np.array_str(desc[i]).replace('\\n','')\n tmp = DataObject(point.pt, point.size, point.angle, point.response, \\\n point.octave, point.class_id, tmp_desc)\n i += 1\n kp_lst.append(tmp.__str__())\n return kp_lst\n\ndef unpack_keypoints(kp_lst):\n '''\n Take list of DataObjects and converts back to list of opencv keypoints\n\n Returns: (List, Numpy Array)\n '''\n kp = []\n desc = []\n kp_lst = kp_lst.split('\\n')\n for item in kp_lst:\n item = item.split('|')\n tmp_x,tmp_y = eval(item[0])\n kp.append(cv2.KeyPoint(x=tmp_x,y=tmp_y,_size=float(item[1]),\n _angle=float(item[2]),_response=float(item[3]), \n _octave=int(item[4]),_class_id=int(item[5])))\n tmp_desc = ' '.join(item[6:])[1:-1]\n #print(np.fromstring(tmp_desc, sep='.'))\n tmp_str = ''\n for char in tmp_desc:\n if char == '.':\n tmp_str += char + ' '\n else:\n tmp_str += char\n desc.append(np.fromstring(tmp_str, sep=' '))\n return kp, np.array(desc) \n\ndef connect_postgres():\n '''\n Opens a connection to a postgres database\n\n Returns: Database connection object\n '''\n conn_str = f\"dbname='{c.dbname}' user='{c.user}' \\\n host='{c.host}' port={c.port} password='{c.password}'\"\n try:\n conn = psycopg2.connect(conn_str)\n f = open('panic.log', 'w')\n f.write('Connection success\\n')\n f.close()\n except psycopg2.Error as e:\n f = open('panic.log', 'w')\n f.write('Connection not success')\n f.close()\n return (\"Database connection error: \", e)\n return conn\n\ndef sanatise(string):\n string = '\\n'.join(string)\n out_string = ''\n for char in string:\n #if char == \"'\":\n # out_string += \"\\\"\"\n if char == ' ': \n pass\n else:\n out_string += char\n return out_string\n\ndef write_postgres(dhash, datapoint, url='Unknown'):\n '''\n Takes a hash of datapoint, datapoint and source url and writes that to the database\n\n dhash: String (Hash of datapoints)\n datapoint: String (Keypoints of an image)\n url: String (Source of image)\n\n Returns None\n '''\n conn = connect_postgres()\n cur = conn.cursor()\n datapoint = sanatise(datapoint)\n string = f\"INSERT INTO curartdata VALUES ('{str(dhash)}', '{url}', '{str(datapoint)}');\"\n cur.execute(string)\n conn.commit()\n conn.close()\n return\n\ndef query_postgres(dhash):\n '''\n Reads in a hash and queries database for a similar hash\n\n dhash: String (Hash of keypoints)\n\n Returns: String (Results of query)\n '''\n conn = connect_postgres()\n print(type(conn))\n print(conn)\n cur = conn.cursor()\n # levenshtein was 13\n cur.execute(f\"SELECT hash, url, datapoint \\\n FROM curartdata \\\n WHERE levenshtein(hash, \\'{dhash}\\') <= 39 \\\n ORDER BY levenshtein(hash, \\'{dhash}\\') \\\n LIMIT 5;\")\n #cur.execute(f\"SELECT hash, url, datapoint FROM curartdata;\")\n results = cur.fetchall()\n conn.close()\n return results\n\nif __name__ == '__main__':\n org_img = cv2.imread('orginal.jpg')\n kp,desc = get_keypoints(org_img)\n\n if len(sys.argv) > 1 and sys.argv[1] == '-g':\n org_results = cv2.drawKeypoints(org_img, kp, None, color=(0, 255, 0))\n cmp_results = cv2.drawKeypoints(org_img, kp_2, None, color=(0, 255, 0))\n cv2.imshow(\"Org\", org_results)\n cv2.imshow(\"Cmp\", cmp_results)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"gruunday/CURART","sub_path":"src/data_mngmnt.py","file_name":"data_mngmnt.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21976195935","text":"import os, cv2\nimport numpy as np\nfrom glob import glob\nimport torch, mmcv, argparse\n\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.parallel import collate, scatter\n\nfrom mmdet.apis.inference import LoadImage\nfrom mmdet.datasets.pipelines import Compose\n\nfrom mmdet.apis import init_detector\nfrom mmdet.models import build_detector\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, cv2, mmcv, torch, cvut\nfrom mmdet.datasets import build_dataset\nimport os\nfrom mmcv import Config\nimport tqdm\n\n#------------------------------------------------------------------------------\n# Utilization\n#------------------------------------------------------------------------------\n\ndef inference_detector(model, data):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n return result\n\n\n#------------------------------------------------------------------------------\n# ArgumentParser\n#------------------------------------------------------------------------------\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"cfg\", type=str, default=None,\n help=\"Config file\")\n\nparser.add_argument(\"--ckpt\", type=str, default=None,\n help=\"Checkpoint file\")\n\nparser.add_argument(\"--det_thr\", type=float, default=0.3,\n help=\"Detection threshold\")\n\nparser.add_argument(\"--seg_thr\", type=float, default=0.5,\n help=\"Segmentation threshold\")\n\nparser.add_argument(\"--data_dir\", type=str,\n default=\"/data/coco/images/val2017/\",\n help=\"Data directory\")\nparser.add_argument(\"--out_dir\", type=str, default='cache',\n help=\"font_scale to draw bounding boxes\")\n\nparser.add_argument(\"--num_imgs\", type=int, default=50,\n help=\"Number of images for visualization\")\n\nparser.add_argument(\"--thickness\", type=int, default=5,\n help=\"thickness to draw bounding boxes\")\n\nparser.add_argument(\"--font_scale\", type=int, default=4,\n help=\"font_scale to draw bounding boxes\")\n\nparser.add_argument(\"--device\", type=str, default='cuda',\n help=\"cpu or gpu\")\n\n\nargs = parser.parse_args()\n\n\n#------------------------------------------------------------------------------\n# Main\n#------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n # Build model\n model = init_detector(args.cfg, args.ckpt, device=args.device)\n os.makedirs(args.out_dir, exist_ok=True)\n\n # build dataset\n cfg = Config.fromfile(args.cfg)\n print(cfg)\n dataset = build_dataset(cfg.data.test)\n # Inference\n for i, data in tqdm.tqdm(enumerate(dataset)):\n data = scatter(collate([data], samples_per_gpu=1), [args.device])[0]\n result = inference_detector(model, data)\n\n # test\n image = mmcv.imread(data['img_metas'][0][0]['filename']) \n bboxes = np.vstack(result)\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(result)\n ]\n labels = np.concatenate(labels)\n \n score_thr = 0.3\n scores = bboxes[:, -1]\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds].tolist()\n image = cvut.draw_bboxes(image, bboxes[:,:4], labels=labels, classnames=dataset.CLASSES, thickness=2, font_size=2, font_thickness=2) \n\n # img = model.show_result(data['img_metas'][0][0]['filename'], result, score_thr=args.det_thr, show=False, thickness=args.thickness, font_scale=args.font_scale)\n\n out_file = os.path.join(args.out_dir, os.path.basename(data['img_metas'][0][0]['ori_filename']))\n # cv2.imwrite(out_file, img)\n cv2.imwrite(out_file, image)\n print(\"Output is saved at {}\".format(out_file))\n if i > args.num_imgs:\n break\n","repo_name":"TuanTNG/VDTSE","sub_path":"tools/visualize_testset.py","file_name":"visualize_testset.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"71019259076","text":"import heapq\nINF = 1e9\nN, M, X = list(map(int, input().split()))\ngraph = [[] for i in range(N+1)]\nfor i in range(M):\n a,b,c = list(map(int, input().split()))\n graph[a].append((b,c))\n\ndef dikstra(start):\n q = []\n dist = [INF]*(N+1)\n heapq.heappush(q,(start, 0))\n\n while q:\n ni, wi = heapq.heappop(q)\n if(wi > dist[ni]):\n continue\n for nk, wk in graph[ni]:\n if(wk+wi < dist[nk]):\n dist[nk] = wk+wi\n heapq.heappush(q,(nk,wk+wi))\n return dist\n \nxto_dist = dikstra(X)\nm = 0\nfor i in range(1,N+1):\n if(i == X):\n continue\n i_dist = dikstra(i)\n if(m < i_dist[X] + xto_dist[i]):\n m = i_dist[X]+xto_dist[i]\n\nprint(m)\n","repo_name":"algojunior/sunjungAn","sub_path":"Graph Theorem/1238_gold.py","file_name":"1238_gold.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20308780675","text":"def readFiles():\n with open(\"dane_6_1.txt\",\"r\") as file1:\n dane1=[x.strip() for x in file1.readlines()]\n with open(\"dane_6_2.txt\",\"r\") as file2:\n dane2=[list(x.split()) for x in file2.readlines()]\n with open(\"dane_6_3.txt\",\"r\") as file3:\n dane3=[list(x.split()) for x in file3.readlines()]\n return (dane1,dane2,dane3)\ndane1,dane2,dane3=readFiles()\nk=107\ndef codeWord(word,key):\n newWord=\"\"\n for letter in word:\n indexletter=ord(letter)+key\n while indexletter>90:\n indexletter-=26\n\n\n newWord+=chr(indexletter)\n return newWord\ndef unCodeWord(word):\n newWord = \"\"\n if len(word)==1:\n return word[0]\n\n word[1]=int(word[1])%26\n\n for letter in word[0]:\n\n indexletter=ord(letter)-word[1]\n while indexletter<65:\n indexletter+=26\n while indexletter>90:\n indexletter-=25\n\n newWord += chr(indexletter)\n return newWord\ndef checkIfSmaeKey(letter1,letter2):\n mainkey = ord(letter2[0]) - ord(letter1[0])\n if mainkey<0:\n mainkey=-mainkey\n\n\n for i in range(len(letter1)):\n\n key=(ord(letter2[i])%26-ord(letter1[i])%26)\n if key<0:\n key=-key\n\n\n\n if mainkey==key or 26-key==mainkey:\n pass\n else:\n return False\n return True\n\ndef zad61(dane1,key):\n with open(\"wyniki_6_1.txt\", \"w\") as file:\n for word in dane1:\n file.write(codeWord(word,key)+\"\\n\")\ndef zad62(dane2):\n with open(\"wyniki_6_2.txt\",\"w\") as file2:\n for i in dane2:\n file2.write(unCodeWord(i)+\"\\n\")\ndef zad63(dane3):\n with open(\"wynik_6_3.txt\", \"w\") as file:\n for word in dane3:\n if False== checkIfSmaeKey(word[0],word[1]):\n file.write(f\"{word[0]} {word[1]} \\n\")\n\nzad62(dane2)\nzad61(dane1,k)\nzad63(dane3)\n\n","repo_name":"aloix123/Korepetycje","sub_path":"matura2016/zad6.py","file_name":"zad6.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18831591315","text":"import cv2\r\nimport numpy as np\r\nimport wx\r\nfrom pynput.mouse import Controller, Button\r\n\r\nlower = np.array([130,86,36]) # Min HSV \r\nupper = np.array([154,216,255])\r\n\r\nkernelOpen = np.ones((5,5))\r\nkernelClose = np.ones((15,15))\r\nopenX, openY, openW, openH = 0,0,0,0\r\napp = wx.App(False)\r\nscrX, scrY = wx.GetDisplaySize()\r\nframeR = 50\r\nwCam = 640\r\ncamX = wCam - 2*frameR\r\nhCam = 480\r\ncamY = hCam - 2*frameR\r\nmouse = Controller()\r\npinch = 0\r\nmLocOld = np.array([0,0])\r\nmLocNew = np.array([0,0])\r\ndampner = 2.75\r\n\r\ncap = cv2.VideoCapture(1)\r\n# \"http://192.168.0.103:8080/video\"\r\nwhile True:\r\n success, img = cap.read() \r\n # img = cv2.resize(\r\n # img,(320,240))\r\n # img = cv2.flip(img,1)\r\n imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(imgHSV,lower,upper)\r\n imgres = cv2.bitwise_and(img,img,mask=mask)\r\n maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN,kernelOpen)\r\n maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE,kernelClose)\r\n\r\n maskFinal = maskClose.copy()\r\n conts, h = cv2.findContours(maskFinal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n # cv2.drawContours(img, conts, -1,(0,0,255),3)\r\n cv2.rectangle(img,(frameR, frameR), (wCam-frameR, hCam-frameR), (0,0,255),2)\r\n if len(conts)==2: \r\n if pinch ==1:\r\n pinch=0\r\n mouse.release(Button.left)\r\n x1,y1,w1,h1 = cv2.boundingRect(conts[0])\r\n cx1, cy1 = x1+w1//2, y1+h1//2\r\n x2,y2,w2,h2 = cv2.boundingRect(conts[1])\r\n cx2, cy2 = x2+w2//2, y2+h2//2\r\n cx, cy = (cx1+cx2)//2, (cy1+cy2)//2\r\n cv2.rectangle(img,(x1,y1), (x1+w1,y1+h1),(0,0,255),2)\r\n cv2.rectangle(img,(x2,y2), (x2+w2,y2+h2),(0,0,255),2)\r\n cv2.line(img,(cx1,cy1), (cx2,cy2),(0,0,255),2)\r\n cv2.circle(img, (cx,cy), 2, (255,255,255), 2)\r\n if cx in range(frameR,wCam-frameR) and cy in range(frameR,hCam-frameR):\r\n cv2.rectangle(img,(frameR, frameR), (wCam-frameR, hCam-frameR), (0,255,0),2)\r\n mLocNew = mLocOld+((cx,cy)-mLocOld)/dampner\r\n x3 = np.interp(mLocNew[0],(frameR,wCam-frameR),(0,scrX))\r\n y3 = np.interp(mLocNew[1],(frameR,hCam-frameR),(0,scrY))\r\n mouse.position=(x3, y3)\r\n # if mouse.position != (x3,y3):\r\n # pass\r\n mLocOld = mLocNew\r\n openX, openY, openW, openH = cv2.boundingRect(np.array([[x1,y1],[x1+w1,y1+h1],[x2,y2],[x2+w2,y2+h2]]))\r\n \r\n # For clicking\r\n elif len(conts)==1:\r\n x,y,w,h = cv2.boundingRect(conts[0])\r\n cv2.rectangle(img,(x,y), (x+w,y+h),(0,0,255),2) \r\n cx, cy = x+w//2, y+h//2\r\n if pinch == 0:\r\n if abs((w*h-openW*openH)*100/(w*h))<30:\r\n pinch=1\r\n mouse.press(Button.left)\r\n openX, openY, openW, openH = 0,0,0,0\r\n \r\n else:\r\n cv2.circle(img, (cx,cy), ((w+h)//4), (0,255,255), 2)\r\n mLocNew = mLocOld+((cx,cy)-mLocOld)/dampner\r\n x1 = np.interp(mLocNew[0],(frameR,wCam-frameR),(0,scrX))\r\n y1 = np.interp(mLocNew[1],(frameR,hCam-frameR),(0,scrY))\r\n if cx in range(frameR,wCam-frameR) and cy in range(frameR,hCam-frameR):\r\n cv2.rectangle(img,(frameR, frameR), (wCam-frameR, hCam-frameR), (0,255,0),2)\r\n mouse.position = x1, y1\r\n # if mouse.position != (x1,y1):\r\n # pass\r\n mLocOld = mLocNew\r\n\r\n # Display images\r\n cv2.imshow('Camera',img)\r\n # cv2.imshow('mask', mask)\r\n # cv2.imshow('result', imgres)\r\n # cv2.imshow('maskOpen',maskOpen)\r\n # cv2.imshow('maskClose',maskClose)\r\n if cv2.waitKey(1) & 0xFF==ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# ## Imported Libraries\r\n\r\n# ## Declaration of global variables\r\n\r\n# while True:\r\n# ## Logic for Object detection and tracking\r\n# :\r\n# :\r\n \r\n# if(len(conts)==2):\r\n# ## Logic for open gesture, moving mouse without click\r\n# :\r\n# :\r\n# elif(len(conts)==1):\r\n# ## Logic for close gesture, left button clicking\r\n# :\r\n# :\r\n \r\n# cv2.imshow('Camera',img)\r\n# if cv2.waitKey(1) & 0xFF==ord('q'):\r\n# break\r\n\r\n# cap.release()\r\n# cv2.destroyAllWindows()\r\n\r\n# app = wx.App(False)\r\n# scrX, scrY = wx.GetDisplaySize()\r\n# frameR = 50\r\n# wCam = 640\r\n# camX = wCam - 2*frameR\r\n# hCam = 480\r\n# camY = hCam - 2*frameR\r\n\r\n# cv2.rectangle(img,(frameR, frameR), (wCam-frameR, hCam-frameR), (0,0,255),2)","repo_name":"DV821/Virtual-Mouse-with-Object-Tracking","sub_path":"Virtual Mouse OT.py","file_name":"Virtual Mouse OT.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17376121150","text":"import pandas as pd\n\nfrom mining_tweets import retrievedUsers\n\nlib_df = pd.read_pickle('/Users/annalisa/PycharmProjects/MSc_Project/data/lib_df.pkl').drop_duplicates(\n subset='screen_name', keep='first')\nlib_df = lib_df[lib_df.screen_name.isin(retrievedUsers.retrievedLibUsers)]\n\n# descriptions containing a age-like number\nanyAge = lib_df.description.str.match(r'[1-9][0-9]')\nlib_age = lib_df[anyAge].loc[:, ['screen_name', 'description']]\nlib_age.to_csv('lib_age.csv')\n\nyears_old_age = lib_df[lib_df.description.str.match(r'years|yrs|old|born')].loc[:, ['screen_name', 'description']]\nyears_old_age.to_csv('lib_years_old_age.csv')\n\nwo_man = lib_df[lib_df.description.str.contains(r' woman | man ')].loc[:, ['screen_name', 'description']]\nwo_man.to_csv('lib_wo_man.csv')\n\n# descriptions containing the word 'retired'\nretired = lib_df[lib_df.description.str.contains('retired')].loc[:, ['screen_name', 'description']].to_csv(\n 'lib_retired.csv')\n\nprofessional = lib_df[lib_df.description.str.contains(r' exper.* | profession | professional ')].loc[:,\n ['screen_name', 'description']]\nprofessional.to_csv('/Users/annalisa/PycharmProjects/MSc_Project/unversioned/age/age_lib/lib_professional.csv')\n\nrandom_sample = lib_df.loc[:, ['screen_name', 'description']].sample(n=300, random_state=300)\nrandom_sample.to_csv('/Users/annalisa/PycharmProjects/MSc_Project/unversioned/age/age_lib/lib_random.csv')\n\nstudent = lib_df[lib_df.description.str.contains(r' [Ss]tudent.* | [Uu]niversit.* ')].loc[:,\n ['screen_name', 'description']]\nstudent.to_csv('/Users/annalisa/PycharmProjects/MSc_Project/unversioned/age/age_lib/lib_student.csv')\n","repo_name":"annalisamf/msc-computer-science-project","sub_path":"age/age_lib/lib_age_label.py","file_name":"lib_age_label.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74333537155","text":"from rest_framework import serializers\nfrom django.db.models import Prefetch\nfrom enchiridionapi.models import Playlist, PlaylistEpisode\nfrom enchiridionapi.serializers import LocalEpisodeSerializer\n\nclass PlaylistSerializer(serializers.ModelSerializer):\n episodes = serializers.SerializerMethodField()\n likes_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Playlist\n fields = ['id', 'user_id', 'name', 'description', 'episodes', 'likes_count']\n\n def get_episodes(self, obj):\n episode_playlist = obj.playlist_episodes\n serializer = LocalEpisodeSerializer([ep.episode for ep in episode_playlist], many=True)\n return serializer.data\n\n @staticmethod\n def setup_eager_loading(queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n queryset = queryset.prefetch_related(\n Prefetch(\n 'playlistepisode_set',\n queryset=PlaylistEpisode.objects.select_related('episode').order_by('order_number'),\n to_attr='playlist_episodes'\n )\n )\n return queryset\n","repo_name":"macleann/enchiridion-server","sub_path":"enchiridionapi/serializers/playlist_serializer.py","file_name":"playlist_serializer.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34581250801","text":"import cv2\nimport numpy as np\nimport time\nimport os\nimport pandas as pd \nfrom PIL import Image, ImageFont, ImageDraw\nimport matplotlib.pyplot as plt\n\ndirectory = os.path.dirname(__file__)\nroi_df = pd.read_pickle('/Users/juancachafeiro/Desktop/VideoBaker/my_df (1).p')\nprint(roi_df.head())\n\nvideo = cv2.VideoCapture('/Users/juancachafeiro/Movies/Us_CV/Unprocessed_Video.mp4')\n\ncenters = pd.DataFrame(columns=['Frame', 'Center'])\n\n\ndef pixels_to_meters(pixel_value, pixels_per_meter):\n\treturn pixel_value/pixels_per_meter\n\nvid_x = 720\nvid_y = 480\n\noutput_x = 1920\noutput_y = 1080\n\ncount = 0\nstart_x = 0\nstart_time = 0\nend_time = 0\nprev_x1 = 0\nfor index, row in roi_df.iterrows():\n\tret,frame = video.read()\n\tif ret == False:\n\t\tbreak;\n\troi_array = row['Roi']\n\n\tfor roi in roi_array:\n\t\t# Adjust Rois for Video Size Output\n\t\ty1 = int((roi[0] / vid_x)*output_y)\n\t\ty2 = int((roi[2] / vid_x)*output_y)\n\t\tx1 = int((roi[1] / vid_y)*output_x)\n\t\tx2 = int((roi[3] / vid_y)*output_x)\n\t\tw = np.absolute(x2-x1)\n\t\th = np.absolute(y2-y1)\n\t\ttan = w/h\n\t\tatan = h/w\n\t\t#First, check for rois that are outside of the view bounds\n\t\tif y1 < output_y * 0.4:\n\t\t\tcontinue\n\t\t# if tan > 3 or atan > 3:\n\t\t# \tcontinue\n\t\t# if prev_x1 != 0 and np.absolute(x1-prev_x1) > output_x * 0.1:\n\t\t# \tcontinue\n\t\t#print('Frame: {}, X1: {}, X2: {}, Y1: {}, Y2:{}'.format(count,x1,x2,y1,y2))\n\t\tcv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),3)\n\t\tprev_x1 = x1\n\t\tx1+=int((x2-x1)//2)\n\t\ty1+=int((y2-y1)//2)\n\t\tcenter = (x1,y1)\n\t\tcv2.circle(frame,center, 3, (0,0,255), -1)\n\t\tcenters = centers.append({'Frame': count, 'Center': center[0]}, ignore_index=True)\n\t\tif start_x == 0:\n\t\t\tstart_x = center[0]\n\t\t\tstart_time = count\n\t\t\t\n\tcv2.imshow('frame',frame)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\n\tcount+=1\n\n\nvideo_length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n\nx = centers['Frame'].tolist()\ny = centers['Center'].tolist()\n\nz = np.polyfit(x, y, 8)\np = np.poly1d(z)\n\nxp = np.linspace(1, video_length, video_length)\n\n\nplt.scatter(x, y, s=200, alpha = 0.8)\nplt.ylim(ymin=0,ymax=2000)\nplt.plot(xp, p(xp), '-')\nplt.show()\n\n#Select Track\nvideo.release();\nvideo = cv2.VideoCapture('/Users/juancachafeiro/Movies/Us_CV/Unprocessed_Video.mp4')\n\nret,frame = video.read()\n\nshowCrosshair = False\nfromCenter = False\nr = cv2.selectROI(\"Image\", frame, fromCenter, showCrosshair)\ntrack_width = r[2]\n\nvideo.release();\nvideo = cv2.VideoCapture('/Users/juancachafeiro/Movies/Us_CV/Unprocessed_Video.mp4')\ntrack_half_width = track_width//2\n\ncenter_of_track = start_x\ndistance_of_track_in_meters = 10\npixels_per_meter = track_width/distance_of_track_in_meters\n\n# BRAND THE VIDEO\ncount = 1\nsub_count = 0\nprev_x = start_x\nprev_speed = 0\n\nbaked_video_x = 576\nbaked_video_y = 720\n\nfourcc = cv2.VideoWriter_fourcc(*'a\\0\\0\\0')\nout = cv2.VideoWriter('/Users/juancachafeiro/Movies/Us_CV/output.mp4',fourcc, 15, (baked_video_x,baked_video_y))\nfont_path = os.path.join(directory, '/Roboto_Condensed/RobotoCondensed-Bold.ttf')\nmetrics_font = ImageFont.truetype(font_path, 50)\nfill_name = (249,29,67,255)\nfinal_speed=0\nsub_count = 0\nwhile True:\n\tret,frame = video.read()\n\tif ret == False:\n\t\tbreak;\n\tvid_x = int(np.floor(p(count)))\n\tif vid_x > output_x: \n\t\tvid_x = start_x\n\tif vid_x < 0:\n\t\tvid_x = start_x\n\t#calc_speed\n\tdistance = vid_x - prev_x\n\tpixel_speed = pixels_to_meters((distance/(1/60)), pixels_per_meter)*3.6\n\tsped_diff = pixels_to_meters((pixel_speed - prev_speed), pixels_per_meter)\n\t\n\tcenter=(vid_x, output_y//2)\n\tprint(vid_x)\n\ty = center[1]-(baked_video_y//2)\n\tprint('CropY: {}'.format(y))\n\tx = center[0]-(baked_video_x//2)\n\tprint('CropX: {}'.format(x))\n\tcount+=1\n\tif y < 0 or y + baked_video_y > output_y:\n\t\tcontinue\n\tif x < 0 or x + baked_video_x > output_x:\n\t\tcontinue\n\n\tcropped_frame = frame[y:y+baked_video_y, x:x+baked_video_x]\n\t#cv2.circle(frame,center, 63, (0,0,255), -1)\n\t#cv2.putText(cropped_frame,'Speed: {:.0f} km/h'.format(pixel_speed),(10,100), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),1,cv2.LINE_AA)\n\tpil_img = Image.fromarray(cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB))\n\td = ImageDraw.Draw(pil_img)\n\tsub_count+=1\n\tif sub_count > 5:\n\t\tsub_count = 0\n\t\tfinal_speed = int(pixel_speed)\n\tt = (count-start_time)/60\n\tif t < 0:\n\t\tt = 0\n\td.text((20, 100), '{0:.0f} Km/h'.format(final_speed), font=metrics_font, fill=fill_name)\n\td.text((20, 175), '{:.2f} s'.format(t), font=metrics_font, fill=fill_name)\n\tout.write(cv2.cvtColor(np.array(pil_img),cv2.COLOR_RGB2BGR))\n\tprev_x = vid_x\n\tprev_s = pixel_speed\n\tdel d\n\tif cv2.waitKey(4) & 0xFF == ord('q'):\n\t\tbreak\n\nvideo.release()\nout.release()\n","repo_name":"JuanCCS/video_baker","sub_path":"video_baker.py","file_name":"video_baker.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5868110228","text":"def test_count_emojis():\n nature_url = 'https://emojipedia.org/nature/'\n music_url = 'https://emojipedia.org/music/'\n science_url = 'https://emojipedia.org/science/'\n\n nature_count = count_emojis(nature_url)\n music_count = count_emojis(music_url)\n science_count = count_emojis(science_url)\n\n assert nature_count >= 0, \"Количество эмодзи в разделе Nature должно быть неотрицательным\"\n assert music_count >= 0, \"Количество эмодзи в разделе Music должно быть неотрицательным\"\n assert science_count >= 0, \"Количество эмодзи в разделе Science должно быть неотрицательным\"\n\n print(\"Тесты пройдены успешно\")\n\ntest_count_emojis()\n","repo_name":"juliamin316/d_z","sub_path":"dz_5/task1_test.py","file_name":"task1_test.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21511979945","text":"# import numpy as np\nfrom collections import deque\n\n# Trie Node\nclass TrieNode:\n def _init_(self):\n self.children = {}\n self.is_word = False\n\n# Trie Data Structure\nclass Trie:\n def _init_(self):\n self.root = TrieNode()\n\n def insert(self, word):\n current = self.root\n for char in word:\n if char not in current.children:\n current.children[char] = TrieNode()\n current = current.children[char]\n current.is_word = True\n\n def search(self, word):\n current = self.root\n for char in word:\n if char not in current.children:\n return False\n current = current.children[char]\n return current.is_word\n\n def get_suggestions(self, word, max_distance=2):\n suggestions = []\n queue = deque([(self.root, word, \"\", 0)])\n\n while queue:\n node, remaining_word, current_word, distance = queue.popleft()\n\n if distance > max_distance:\n continue\n\n if remaining_word == \"\":\n if node.is_word:\n suggestions.append(current_word)\n else:\n if remaining_word[0] in node.children:\n queue.append((node.children[remaining_word[0]], remaining_word[1:], current_word + remaining_word[0], distance))\n queue.extend(\n (child, remaining_word, current_word + char, distance + 1)\n for char, child in node.children.items()\n )\n\n return suggestions\n\n\n# Example usage\ndef main():\n # Create a Trie and insert some words\n trie = Trie()\n dictionary = [\"apple\", \"banana\", \"cherry\", \"grape\", \"mango\", \"orange\"]\n for word in dictionary:\n trie.insert(word)\n\n # Search for a word\n word = \"bannana\" # Misspelled word\n if trie.search(word):\n print(f\"{word} exists in the dictionary.\")\n else:\n suggestions = trie.get_suggestions(word)\n if suggestions:\n print(f\"{word} is misspelled. Suggestions: {', '.join(suggestions)}\")\n else:\n print(f\"No suggestions found for {word}.\")\n\n# if _name_ == \"_main_\":\nmain()","repo_name":"Psquare2000/redun","sub_path":"ayush.py","file_name":"ayush.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3152918307","text":"\"\"\"Creating a calculator application!\n\nStart date: July 31, 2023 6:01 PM\nAuthor: Dhyeya Padhya\"\"\"\n\ninputs = [] #user Input\ndef get_User_Input():\n \"\"\"Take in User Input.\n \n This function will take input from users in the form of numbers and a single string 'Done', which is when it will end.\n The inputs will be saved in a list in float form and throw an exception when anything other than the allowed inputs is \n entered by the user. In case an exception is thrown, it will then allow the user another opportunity to enter a number!\n Input - None\n Output - inputs: a list of inputs.\"\"\"\n user_Exit = False # will be true once the user enters 'done'\n \n while not user_Exit:\n user_Input = input(\"Enter a number if you wish or enter 'Done' to finish:\")\n if user_Input.lower() == \"done\":\n user_Exit = True #will become true and the loop would terminate since not True = False\n break\n else:\n try:\n inputs.append(float(user_Input))\n print(inputs)\n except ValueError:\n print(\"Invalid Input! Please enter a number or 'Done'!\")\n continue #allows user another chance\n return inputs\n\n#Main method to test all methods\ndef main():\n get_User_Input() #Run the main method and input numbers\n print(get_User_Input())\n inputs.clear() #empties the inputs list\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DeeP-008/python-calculator","sub_path":"src/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39844472578","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nfrom threading import Timer\nfrom flaskApp.modules import db_sqlite\nimport os\nimport time\nimport logging\n\n\nclass model():\n def __init__(self):\n self.table_name = 'price_list'\n self.mysqldb = db_sqlite.model()\n self.target = 'http://zzny.zhengzhou.gov.cn/ncpjg/index_1.jhtml'\n self.names = [] # 存放节点名\n self.urls = [] # 存放节点链接\n self.list_type = ['蔬菜', '粮食', '食用油', '肉类', '水产品', '禽蛋类']\n\n def get_soup(self, target_url):\n str_html = os.popen('curl '+ target_url).read()\n # print(str_html)\n soup = BeautifulSoup(str_html, 'html.parser')\n return soup\n\n def get_download_url(self):\n self.names = []\n self.urls = []\n soup = self.get_soup(self.target)\n list_a = soup.select('.list-line a')[0:1]\n for item in list_a:\n self.names.append(item.string.replace('\\n', '').strip())\n self.urls.append(item.get('href'))\n self.names.reverse()\n self.urls.reverse()\n # print(self.names)\n # print(self.urls)\n\n def get_contents(self, target_url):\n self.list_data = []\n soup = self.get_soup(target_url)\n str_time = soup.select('.sub-content .small-title')[0].text.strip().split('时间:')[1]\n list_table = soup.select('table')\n sql = 'select * from price_list where time=\"'+str_time+'\" limit 0,1'\n result = self.mysqldb.find_data(self.table_name, {}, sql)\n if len(result['rows']) > 0:\n return self.list_data\n for index, value in enumerate(self.list_type):\n str_type = value\n list_tr = list_table[index].select('tr')[1:-1]\n for item in list_tr:\n list_td = item.select('td')\n name = list_td[0].text.strip()\n price_all = 0\n num = 0\n for td in list_td[1:]:\n price = str(td.text.strip())\n try:\n price = float(price)\n except Exception as e:\n price = 0\n if price > 0:\n price_all+=price\n num+=1\n price = 0 if num ==0 else round(price_all/num, 2)\n obj = {'type': str_type, 'name': name, 'price': price, 'time': str_time}\n self.list_data.append(obj)\n # print(self.list_data)\n return self.list_data\n\n def run(self):\n Timer(24*3600, self.run).start()\n try:\n self.get_download_url()\n for i in self.urls:\n list_data = self.get_contents(i)\n if len(list_data) > 0:\n self.mysqldb.insert_data(self.table_name, list_data)\n except Exception as e:\n logging.error('my_bs4 run error: '+ str(e))\n\ndef main():\n # 18:00:00的时候开始执行\n print('mybs4 run')\n int_now = int(time.time())\n str_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int_now))\n int_time = int(time.mktime(time.strptime(str_time.split(' ')[0]+' 18:00:00', \"%Y-%m-%d %H:%M:%S\")))\n if int_now <= int_time:\n time.sleep(int_time-int_now)\n else:\n time.sleep(24*3600-int_now+int_time)\n mybs4 = model()\n mybs4.run()\n\n\n# if __name__ == '__main__':\n# main()\n","repo_name":"SpkCoder/myproject","sub_path":"price-server/flaskApp/modules/my_bs4.py","file_name":"my_bs4.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"810778285","text":"#!/usr/bin/python3\n\nASSET_ID = \"\"\n\n# Default list. Feel free to edit.\nNEVER_COLLECT = [\"\\.7z$\", \"\\.-wal$\", \"\\.accdb$\", \"\\.accde$\", \"\\.accdr$\", \"\\.accdt$\", \"\\.accdu$\", \"\\.asl$\", \"\\.bin$\", \"\\.csv$\", \"\\.dat$\", \"\\.db-shm$\", \"\\.db$\", \"\\.doc$\", \"\\.docb$\", \"\\.docm$\", \"\\.docx$\", \"\\.dot$\", \"\\.dotm$\", \"\\.DS_Store$\", \"\\.emlx$\", \"\\.img$\", \"\\.json$\", \"\\.key$\", \"\\.md$\", \"\\.mdb$\", \"\\.mpp$\", \"\\.msf$\", \"\\.msg$\", \"\\.numbers$\", \"\\.odb$\", \"\\.odp$\", \"\\.ods$\", \"\\.odt$\", \"\\.one$\", \"\\.oft$\", \"\\.ost$\", \"\\.otf$\", \"\\.pages$\", \"\\.pdf$\", \"\\.plist$\", \"\\.pot$\", \"\\.potx$\", \"\\.ppam$\", \"\\.pps$\", \"\\.ppsm$\", \"\\.ppsx$\", \"\\.ppt$\", \"\\.pptm$\", \"\\.pptx$\", \"\\.pst$\", \"\\.pub$\", \"\\.rar$\", \"\\.rtf$\", \"\\.sldm$\", \"\\.sldx$\", \"\\.sqlite-journal$\", \"\\.sqlite$\", \"\\.swp$\", \"\\.ttf$\", \"\\.tracev3$\", \"\\.txt$\", \"\\.vcf$\", \"\\.xla$\", \"\\.xlam$\", \"\\.xlm$\", \"\\.xls$\", \"\\.xlsb$\", \"\\.xlsm$\", \"\\.xlsx$\", \"\\.xlt$\", \"\\.xltm$\", \"\\.xltx$\", \"\\.xlw$\", \"\\.wpd$\", \"\\.xps$\", \"\\.zip\"]\nALWAYS_COLLECT = [\"\\.app$\", \"\\.appx$\", \"\\.appxbundle$\", \"\\.bat$\", \"\\.class$\", \"\\.cmd$\", \"\\.com$\", \"\\.crx$\", \"\\.dll$\", \"\\.dmg$\", \"\\.drv$\", \"\\.dylib$\", \"\\.ear$\", \"\\.efi$\", \"\\.elf$\", \"\\.exe$\", \"\\.hta$\", \"\\.iso$\", \"\\.jar$\", \"\\.java$\", \"\\.js$\", \"\\.lib$\", \"\\.lnk$\", \"\\.msi$\", \"\\.nar$\", \"\\.pkg$\", \"\\.pl$\", \"\\.ps1$\", \"\\.py$\", \"\\.pyc$\", \"\\.rb$\", \"\\.scr$\", \"\\.sct$\", \"\\.sfx$\", \"\\.sh$\", \"\\.so$\", \"\\.sys$\", \"\\.vb$\", \"\\.vba$\", \"\\.vbs$\", \"\\.vbscript$\", \"\\.war$\", \"\\.xpi$\", \"\\.zsh\"]\n\nimport os # Used for crawling directories\nimport json # JSON payloads\nimport subprocess # Required to call 'file' command\nimport re # Regex for extension matching\nimport requests # Calling REST API\nfrom requests.exceptions import ConnectTimeout\nimport sys # Reading CLI arguments\nimport hashlib # Calculating SHA256\nimport argparse # CLI argument handling\n\n\n \n# Create asset\ndef make_asset(args, asset_id):\n url = \"https://app.stairwell.com/v202112/assets\"\n payload = {\n \"label\": args.name,\n \"environment_id\": {\"id\": args.env_id}\n }\n headers = {\n \"Authorization\": args.api_key,\n \"Content-Type\": \"application/json\"\n }\n response = requests.request(\"POST\", url, json=payload, headers=headers)\n if \"create_time\" in response.text:\n respJson = response.json()\n print(\"Created asset ID: \" + respJson['id']['id'])\n exit(0)\n else:\n print(\"Error: \" + response.text)\n exit(0)\n\ndef sendToStairwell(file):\n file_to_upload = open(file, \"rb\")\n filebytes = file_to_upload.read()\n sha256_hash = hashlib.sha256(filebytes).hexdigest()\n\n stage_1_payload = {\n \"asset\": {\n \"id\": ASSET_ID,\n },\n \"files\": [\n {\n \"filePath\": file,\n \"expected_attributes\": {\n \"identifiers\": [\n {\n \"sha256\": str(sha256_hash)\n }\n ]\n }\n }\n ]\n}\n\n # Make the Stage 1 request\n try:\n response = requests.request(\"POST\", 'https://http.intake.app.stairwell.com/v2021.05/upload', data=json.dumps(stage_1_payload), timeout=5)\n except ConnectTimeout:\n print('Stage 1 request has timed out')\n return\n stage_1_response = response.json()\n stage_1_action = stage_1_response['fileActions'][0]['action']\n print('Incpetion API action reponse: ' + stage_1_action)\n\n # Check for \"UPLOAD\" in the stage 1 \"action\" response\n if (stage_1_action == 'UPLOAD'):\n # Build payload for upload\n stage_2_payload = stage_1_response['fileActions'][0]['fields']\n stage_2_payload['file'] = filebytes\n\n # Stage 2 attempt upload\n upload_url = stage_1_response['fileActions'][0]['uploadUrl']\n try:\n response_2 = requests.request(\"POST\", upload_url, files=stage_2_payload, timeout=20)\n except ConnectTimeout:\n print('Stage 2 request has timed out')\n return\n \n# Upload path\ndef upload(args, ASSET_ID):\n if ASSET_ID == \"\":\n sys.exit(\"The ASSET_ID variable is not set, exiting...\")\n print(\"Uploading recursively from path: \" + args.path)\n for root, dirs, files in os.walk(args.path, topdown=True):\n for name in files:\n print(os.path.join(root, name))\n filePath = os.path.join(root, name)\n \n # Run 'file' command to check file type\n result = str(subprocess.run([\"file\", filePath], stdout=subprocess.PIPE))\n \n # Never collect extensions\n if re.search(\"|\".join(NEVER_COLLECT), filePath):\n print(\"blocked extension\")\n \n # Must collect extensions\n elif re.search(\"|\".join(ALWAYS_COLLECT), filePath):\n print(\"allowed extension\")\n sendToStairwell(filePath)\n \n # File type detected as binary by 'file' command\n elif \"GNU/LINUX\" in result:\n print(result)\n sendToStairwell(filePath)\n \n # File type detected as sehll script by 'file' command\n elif \"shell script\" in result:\n print(result)\n sendToStairwell(filePath)\n \n # Not interesting\n else:\n print(\"Not interesting\")\n \ndef main():\n # create the top-level parser\n parser = argparse.ArgumentParser(prog='pyswell.py')\n subparsers = parser.add_subparsers(help='available commands')\n\n # By default, sub-parseres are mutually exclusive\n # Create parser for mkasset\n parser_mkasset = subparsers.add_parser('mkasset')\n parser_mkasset.add_argument('--name',\n required=True,\n help=\"Name of the asset to create\")\n parser_mkasset.add_argument('--env_id',\n required=True,\n help=\"Environment ID to create the asset in\")\n parser_mkasset.add_argument('--api_key',\n required=True,\n help=\"API key to create the asset with\")\n parser_mkasset.set_defaults(func=make_asset)\n\n # Create parser for upload command\n parser_upload = subparsers.add_parser('upload')\n parser_upload.add_argument('--path',\n required=True,\n help=\"Path to the directory to upload\")\n parser_upload.set_defaults(func=upload)\n\n # If no arguments have been passed, print help\n if len(sys.argv) == 1:\n parser.print_help()\n\n args = parser.parse_args()\n args.func(args, ASSET_ID)\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"stairwell-inc/customer-success","sub_path":"pyswell/pyswell.py","file_name":"pyswell.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12466300811","text":"#Answer to the question 2\n\ndef main():\n\n total = 0.0\n length = 0.0\n average = 0.0\n\n try:\n #Get the name of a file\n filename = input('Enter a file name: ')\n\n #Open the file\n infile = open(filename, 'r')\n\n #Read the file's contents\n contents = infile.read().strip().split()\n\n #Display the file's contents\n print(contents)\n\n #Read values from file and compute average\n for num in contents:\n amount = float(num)\n total += amount\n length = length + 1\n\n average = total / len(contents)\n\n #Close the file\n infile.close()\n\n #Print the amount of numbers in file and average\n print('The total ', total, '' )\n print('There were ', length, ' numbers in the file.' )\n print(format(average, ',.2f'))\n\n except IOError:\n print('An error occurred trying to read the file.')\n\n except ValueError:\n print('Non-numeric data found in the file')\n\n except:\n print('An error has occurred')\n\n\nmain()","repo_name":"Anvar1999/PythonCoursework","sub_path":"src/Test_review/midterm2_2.py","file_name":"midterm2_2.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73138165953","text":"import pickle\nimport cv2\nimport numpy as np\nfrom typing import List\nfrom state_fusion.deploy import all_colors\nfrom state_fusion.deploy_utils import video_feed\n\npath_bag = './saved_state_updates/states_2019_Nov_15__16:13.pkl'\n\n# Paso C\n# Agente dado estado\n# Disparo:\n# \tget_or_set_target (elige de los enemigos en vista o usa el que estaba)\n# \tcalcula vector unitario a tarjet\n# \tEnviar mouse a esa coordenada\n# \tSi esta dentro de cierto angulo dispara\n#\n#\n# Movimiento:\n# \t(Metodo basado en gradientes)\n# \tColoca peaks en cada posicion de enemigo\n# \tDado trayectorias y velocidades de balas. Calcula funcion de influencia.\n#\t(Ignora a proposito las caidas)\n#\tEvalua el gradiente local y da paso segun delta\n#\tCalcula vector de movimiento segun flechas\n#\n# Dodge:\n\t# Si ves caida adelante o gradientes de dano muy cerca activa dodge\n\t# Calcula mov de forma que llege a no caida o no golpe\n\n\ndef get_specific_frame(frame_n,cap):\n\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_n)\n\n success_grab = cap.grab()\n if success_grab:\n flag, frame = cap.retrieve()\n return frame\n return None,\n\n\n\"\"\"\n- siempre muestra el maximo t_proc\n- para cada key muestra el ultimo mensaje\n\n(Sea la lista de entidades)\n(Sea una lista historica de tiempos,bbs y trozos)\n(Puede ser util trayectoria de disparos)\n\nMeta modelo (posicion?,area?)\nConsigue N bbs que calcula features\nGenera match probabilistico de cada entidad con vector\nGenera probabilidad de nueva entidad\nGenera Actualizacion y desgaste de entidades antiguas\n\nGrafica entidades en modelo.\n\nAlgoritmo rectificacion historico\n\n(Se genera una entidad limpia. Posee posicion,velocidad) Si no es continuamente detectada desaparece.\n\n(Que pasa con nueva entidaad) (Crear entidad con posicion, velocidad)\n(Que pasa con vieja entidad detectada) (Actualizar entidad,posicion,velocidad)\n(Que pasa con vieja entidad no detectada) (Mantener entidad limpia en posicion antigua o desplazar segun velocidad?)\n(Que pasa con falso positivo) (vector colores no hace sentido se bota)\n\nPara cada bb calcular histograma de colores\n- Buscar match dado posicion, histograma color, tiempo.\n- Si no hay match dejar en historico solamente\n- Si hay match calcular desplazamiento dado centro de caja\n\"\"\"\n\ncap = cv2.VideoCapture(\"./screen_capture/video_data/out.mp4\")\n\nkeep_for_n_frame = 4\ninstancias_lista = []\n\nfrom collections import namedtuple\nfrom scipy.spatial.distance import cdist\n\nclass Measure:\n def __init__(self,img,vector,coord,t_proc,timestamp):\n self.img,self.vector,self.coord,self.t_proc,self.timestamp = img,vector,coord,t_proc,timestamp\nclass Entity:\n def __init__(self,id,frames_till_update,last_t_updated,n_med,measures):\n self.id,self.frames_till_update,self.last_t_updated,self.n_med,self.measures = id,frames_till_update,last_t_updated,n_med,measures\n\n\n\ndef calc_histogram_vector(img):\n # TODO take center\n chans = cv2.split(img)\n bins = 8\n c_vectors = []\n for c_img in chans:\n channel_hist = cv2.calcHist([c_img], [0], None, [bins], [0, 256])\n c_vectors.append(channel_hist/channel_hist.sum())\n\n vector_out = np.concatenate(c_vectors,axis=0).T\n return vector_out\n\nclass instance_updater:\n def __init__(self,wait_till_delete=4):\n self.all_entitys = [] # Type List[Entity]\n self.current_id = 0\n self.wait_till_delete = wait_till_delete\n\n self.old_entitys = []\n\n # Sea una lista de instancias (que se va limpiando segun keep_for_n_frames)\n # sea instancia (id_inst,frames_without_update,t_last_updated, n_med, lista de Medicion(img,vector,posx,posy,tiempo) )\n\n def calc_features(self,imgs):\n new_vectors = []\n for img in imgs:\n vector = calc_histogram_vector(img)\n new_vectors.append(vector)\n new_matrix = np.concatenate(new_vectors,axis=0)\n return new_matrix\n\n def calc_feature_dist(self,feature_matrix):\n\n features_entitys = []\n for ent in self.all_entitys:\n features_entitys.append(ent.measures[-1].vector)\n matrix_entitys = np.array(features_entitys)\n\n dists = cdist(feature_matrix,matrix_entitys)\n\n # norm to sum 1 per row\n norm_dist = dists/dists.sum(axis=1).reshape(-1,1)\n return norm_dist\n\n\n pass\n def calc_pos_time_dist(self,points,time):\n v0 = 4000 # 4 000 pixels / [s]\n\n positions = np.array(points)\n times = np.expand_dims(np.repeat(time,positions.shape[0]),-1)\n position_z = times*v0 # TODO posible overflow ??\n\n pos_matrix = np.concatenate([positions,position_z],axis=1)\n\n # entitys positions\n old_times = np.array([entity.measures[-1].timestamp for entity in self.all_entitys]).reshape(-1,1)\n old_z = old_times*v0 # TODO posible overflow ??\n old_bbs = [entity.measures[-1].coord for entity in self.all_entitys]\n old_points = np.array([((p0[0] + pf[0]) * 0.5, (p0[1] + pf[1]) * 0.5) for p0, pf in old_bbs])\n pos_olds = np.concatenate([old_points,old_z],axis=1)\n\n dists = cdist(pos_matrix,pos_olds)\n\n # norm to sum 1 per row\n norm_dist = dists / dists.sum(axis=1).reshape(-1, 1)\n return norm_dist\n\n def update_with_measures(self, points, imgs, t_now, t_proc):\n\n\n features = self.calc_features(imgs)\n\n has_elements = len(self.all_entitys) > 0\n\n # transform bbs to points (take mean)\n if has_elements:\n mean_points = [((p0[0]+pf[0])*0.5,(p0[1]+pf[1])*0.5) for p0,pf in data['coords']]\n\n\n dists_feature = self.calc_feature_dist(features)\n dists_point_time = self.calc_pos_time_dist(mean_points, t_now)\n\n th_dist = 0.15 # TODO th\n\n # import ipdb;ipdb.set_trace()\n\n # TODO how to ponderate\n\n weighted_distance = (dists_feature*0.5+dists_point_time*0.5)\n\n # calc min per row (per each new measure)\n reduce_min = weighted_distance.min(axis=1)\n\n # if the closest is closest than th is a match\n matches = weighted_distance.argmin(axis=1)\n lower_than_th = (reduce_min <= th_dist)\n else:\n lower_than_th = [False for x in points]\n\n # Si la medicion cumple eso se agrega a la instancias actualizando valores\n # Else crear nueva instancia con (t_last=noe, n_med=1, [Instancia]\n\n for ind,(point,img) in enumerate(zip(points,imgs)):\n newMeasure = Measure(img, features[ind], point,t_proc, t_now)\n\n if has_elements:\n args = (point,lower_than_th[ind],reduce_min[ind],matches[ind],th_dist)\n print('Measure: {0} is_match: {1} min_dist: {2} min_id: {3} th: {4} '.format(*args))\n\n if lower_than_th[ind]: # if measure has match add measure to respective entity\n selected_entity = self.all_entitys[matches[ind]]\n\n selected_entity.frames_till_update = 0\n selected_entity.n_med += 1\n selected_entity.measures.append(newMeasure)\n\n else: # else create new entity\n new_id = self.current_id\n self.current_id += 1\n\n new_entity = Entity(new_id, 0, t_now, 0, [])\n new_entity.n_med += 1\n new_entity.measures.append(newMeasure)\n\n self.all_entitys.append(new_entity)\n\n for entity in self.all_entitys:\n entity.frames_till_update += 1\n\n self.delete_old()\n\n\n def delete_old(self): # delete entitys with frames_till_update >= wait_till_delete\n\n to_delete = []\n for ind,entity in enumerate(self.all_entitys):\n if entity.frames_till_update >= self.wait_till_delete:\n print('Deleting {0}'.format(entity.id))\n to_delete.append(ind)\n\n entitys_to_keep = []\n for ind in range(len(self.all_entitys)):\n if ind in to_delete:\n self.old_entitys.append(self.all_entitys[ind])\n else:\n entitys_to_keep.append(self.all_entitys[ind])\n\n self.all_entitys = entitys_to_keep\n\n\n def get_last_instances(self,infer_non_updated=False) -> List[Measure]: # TODO infer olds\n out={}\n for ind,entity in enumerate(self.all_entitys): # TODO GIVE ID OF ENTITY AND DRAW\n if entity.frames_till_update == 1:\n out[entity.id] = entity.measures[-1]\n return out\n\nwith open(path_bag,'rb') as f:\n data = pickle.load(f)\n\nimg_cloned = np.zeros((600,800,3))\nwait_time = 0\nlast_t = -1\n\nupdater = instance_updater(wait_till_delete=4)\n\n\n\nfor msg in data:\n key,data = msg.popitem()\n\n time_now = data['timestamp']\n frame_str = data['t_proc']\n frame_n = int(float(frame_str.split('_')[0]))\n\n if frame_n > last_t:\n img_cloned = get_specific_frame(frame_n,cap).copy()\n last_t = frame_n\n\n if key == 'enemys':\n # update in\n try:\n updater.update_with_measures(data['coords'],data['imgs'],time_now,frame_str)\n measures = updater.get_last_instances()\n except Exception as e:\n raise e\n import ipdb\n ipdb.pm()\n\n\n # draw msg bbs TODO USE THINGY ???\n img_cloned_measure = get_specific_frame(frame_n, cap).copy()\n for t in data['coords']:\n x2 = cv2.rectangle(img_cloned_measure, t[0], t[1], (0,0,255), 2)\n cv2.imshow('t2',img_cloned_measure)\n\n\n # draw msg bbs\n for k,mt in measures.items():\n points = mt.coord\n cv2.rectangle(img_cloned, points[0], points[1], tuple(all_colors[key]), 2)\n cv2.putText(img_cloned, str(k), points[0], cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 0, 0), 2, cv2.LINE_AA)\n\n cv2.imshow('t3',img_cloned)\n cv2.waitKey(wait_time)","repo_name":"aferral/gungeon_bot","sub_path":"agent_and_actions/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":9784,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25939491294","text":"import re\nimport binascii\n\n\n#staring lenghts\ncode_len = 0\nstring_len = 0\n\n#part two length\nencoded_len = 0\n\n\nwith open('input2015_08.txt', 'r') as myfile:\n\tinput = myfile.read().split('\\n')\n\n\ndef code_to_string(s):\n\t#removes \" quotes at the beginning and at the end\n\ts = re.sub(r'^\\\"(.*)\\\"$', r'\\1', s)\n\n\t#replaces the '\\x__' in code with appropriate characters\n\th = re.finditer(r'\\\\x([0-9a-f]{2})', s) #makes iterator with all matches\n\tif h: #if anything was found\n\t\tk = re.findall(r'\\\\x([0-9a-f]{2})', s) #makes list of all hex codes found\n\t\tn = 0 # needed for k (because h is iteratro, cant be indexed)\n\t\tfor i in h: #for every object in iterator\n\t\t\ts = s[:i.start()-3*n] + chr(int(k[n], 16)) + s[i.end()-3*n:] #makes new string from begining till the start of found substring + char of the appropriate hex code + string from the end of found substirng till the end\n\t\t\tn += 1 \n\n\n\t#replaces all \\\\ and \\\" with \\ and \"\n\ts = re.sub(r'\\\\([\\\\\\\"])', r'\\1', s)\n\n\treturn(s)\n\n\n\ndef string_to_code(s):\n\t#replaces all \\ and \" with \\\\ and \\\"\n\ts = re.sub(r'([\\\\\\\"])', r'\\\\\\1', s)\n\n\t# encapsulates the entire string in \"\"\n\ts = re.sub(r'^(.*)$', r'\"\\1\"', s) #i'm not sure why here the \" is not given as escape character\n\n\treturn (s)\n\n\nfor i in input:\n\tcode_len += len(i)\n\tstring_len += len(code_to_string(i))\n\tencoded_len += len(string_to_code(i))\n\n\nprint(\"1. the difference of code lenght and string lenght: \", code_len-string_len)\nprint(\"2. the difference of further encoded code lenght and code lenght: \", encoded_len-code_len)","repo_name":"JureRot/adventofcode","sub_path":"2015/2015_08.py","file_name":"2015_08.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3362797424","text":"import json\nimport botocore\nfrom telegram.ext import Updater, MessageHandler, Filters\nfrom loguru import logger\nimport boto3\nfrom common.utils import search_download_youtube_video\n\n\nclass Bot:\n\n def __init__(self, token):\n # create frontend object to the bot programmer\n self.updater = Updater(token, use_context=True)\n\n # add _message_handler as main internal msg handler\n self.updater.dispatcher.add_handler(MessageHandler(Filters.text, self._message_handler))\n\n def start(self):\n \"\"\"Start polling msgs from users, this function never returns\"\"\"\n self.updater.start_polling()\n logger.info(f'{self.__class__.__name__} is up and listening to new messages....')\n self.updater.idle()\n\n def _message_handler(self, update, context):\n \"\"\"Main messages handler\"\"\"\n self.send_text(update, f'Your original message: {update.message.text}')\n\n def send_video(self, update, context, file_path):\n \"\"\"Sends video to a chat\"\"\"\n context.bot.send_video(chat_id=update.message.chat_id, video=open(file_path, 'rb'), supports_streaming=True)\n\n def send_text(self, update, text, chat_id=None, quote=False):\n \"\"\"Sends text to a chat\"\"\"\n if chat_id:\n self.updater.bot.send_message(chat_id, text=text)\n else:\n # retry https://github.com/python-telegram-bot/python-telegram-bot/issues/1124\n update.message.reply_text(text, quote=quote)\n\n\nclass QuoteBot(Bot):\n def _message_handler(self, update, context):\n to_quote = True\n\n if update.message.text == 'Don\\'t quote me please':\n to_quote = False\n\n self.send_text(update, f'Hi, Your original message: {update.message.text}', quote=to_quote)\n\n\nclass YoutubeObjectDetectBot(Bot):\n vdict = {}\n s3dict = {}\n\n def __init__(self, token):\n super().__init__(token)\n\n def _message_handler(self, update, context):\n\n try:\n chat_id = str(update.effective_message.chat_id)\n lower_txt = update.message.text.lower\n if not update.message.text.startswith(\"@\"):\n self.files_search(update, context)\n elif \"@addfile\" in update.message.text.lower():\n self.add_file(update, context)\n\n elif \"@addall\" in update.message.text.lower():\n self.add_all_files(update, context)\n\n elif \"@list\" in update.message.text.lower():\n self.list(update, context)\n\n elif \"@playlist\" in update.message.text.lower():\n self.playlist(update, context)\n\n elif \"@delfile\" in update.message.text.lower():\n self.delfile(update, context)\n\n elif \"@delall\" in update.message.text.lower():\n self.delallfiles(update, context)\n\n elif \"@commands\" in update.message.text.lower():\n self.commands(update, context)\n else:\n self.send_text(update, f'Wrong command ,Please try again.', chat_id=chat_id)\n\n except botocore.exceptions.ClientError as error:\n logger.error(error)\n self.send_text(update, f'Something went wrong, please try again...')\n\n def files_search(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n YoutubeObjectDetectBot.vdict = {}\n downloaded_videos = search_download_youtube_video(update.message.text, False, 7)\n i = 1\n for k, v in downloaded_videos.items():\n self.send_text(update, f'To upload the following video file write @addfile{i} ', chat_id=chat_id)\n self.send_text(update, f'*********************', chat_id=chat_id)\n self.send_text(update, v, chat_id=chat_id)\n self.send_text(update, f'*********************', chat_id=chat_id)\n YoutubeObjectDetectBot.vdict[i] = k\n i += 1\n\n def add_file(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n self.send_text(update, f'You choose {update.message.text}', chat_id=chat_id)\n mes = update.message.text.lower()\n p = mes.replace('@addfile', '')\n msg = str(YoutubeObjectDetectBot.vdict[int(p)])\n response = workers_queue.send_message(\n MessageBody=msg,\n MessageAttributes={\n 'chat_id': {'StringValue': chat_id, 'DataType': 'String'}\n }\n )\n logger.info(f'msg {response.get(\"MessageId\")} has been sent to queue')\n self.send_text(update, f'Hi, Your message is being processed...', chat_id=chat_id)\n\n def add_all_files(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n self.send_text(update, f'You choose to Add all files', chat_id=chat_id)\n for k, v in YoutubeObjectDetectBot.vdict.items():\n msg = v\n response = workers_queue.send_message(\n MessageBody=msg,\n MessageAttributes={\n 'chat_id': {'StringValue': chat_id, 'DataType': 'String'}\n }\n )\n\n logger.info(f'msg {response.get(\"MessageId\")} has been sent to queue')\n self.send_text(update, f'Hii, Your message is being processed...', chat_id=chat_id)\n\n def list(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n s3_client = boto3.client(\"s3\")\n bucket_name = config.get('videos_bucket')\n response = s3_client.list_objects_v2(Bucket=bucket_name)\n files = response.get(\"Contents\")\n if files is not None:\n c = 1\n for file in files:\n print(f\"file_name: {file['Key']}, size: {file['Size']}\")\n fs = file['Size'] / 1048576\n self.send_text(update, f\"{c}: file_name: {file['Key']}, size: {int(fs)}MB\", chat_id=chat_id)\n YoutubeObjectDetectBot.s3dict[c] = file['Key']\n c += 1\n else:\n self.send_text(update, f'list is empty', chat_id=chat_id)\n\n def playlist(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n s3_client = boto3.client(\"s3\")\n bucket_name = config.get('videos_bucket')\n response = s3_client.list_objects_v2(Bucket=bucket_name)\n files = response.get(\"Contents\")\n if files is not None:\n\n for file in files:\n downloaded_videos = search_download_youtube_video(file['Key'], False)\n for k, v in downloaded_videos.items():\n self.send_text(update, v, chat_id=chat_id)\n\n else:\n self.send_text(update, f'Playlist is empty', chat_id=chat_id)\n\n def delfile(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n s3 = boto3.resource('s3')\n mes = update.message.text.lower()\n p = mes.replace('@delfile', '')\n self.send_text(update, f'You choose to delete file {p}', chat_id=chat_id)\n key = str(YoutubeObjectDetectBot.s3dict[int(p)])\n s3.Object(config.get('videos_bucket'), key).delete()\n\n def delallfiles(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n s3 = boto3.resource('s3')\n self.send_text(update, f'You choose to delete all files', chat_id=chat_id)\n for key in YoutubeObjectDetectBot.s3dict:\n s3.Object(config.get('videos_bucket'), YoutubeObjectDetectBot.s3dict[key]).delete()\n\n def commands(self, update, context):\n chat_id = str(update.effective_message.chat_id)\n self.send_text(update, f'@list - list all files in Playlist and their size (S3 bucket)', chat_id=chat_id)\n self.send_text(update, f'@playlist - list all files in Playlist and their URLs (S3 bucket)', chat_id=chat_id)\n self.send_text(update, f'@addfile(x) - add the file you want to upload (run after the search)', chat_id=chat_id)\n self.send_text(update, f'@addall - upload all files (run after the search)', chat_id=chat_id)\n self.send_text(update, f'@delfile(x) - delete the chosen file (run after @list command)', chat_id=chat_id)\n self.send_text(update, f'@delall - delete all files (run after @list command)', chat_id=chat_id)\n\n\nif __name__ == '__main__':\n \"\"\"\n with open('C:/Users/shlomi/PycharmProjects/PolyBot/secrets/.telegramToken') as f:\n _token = f.read()\n\n with open('C:/Users/shlomi/PycharmProjects/PolyBot/common/config.json') as f:\n config = json.load(f)\n \"\"\"\n with open('secrets/.telegramToken') as f:\n _token = f.read()\n\n with open('common/config.json') as f:\n config = json.load(f)\n\n sqs = boto3.resource('sqs', region_name=config.get('aws_region'))\n workers_queue = sqs.get_queue_by_name(QueueName=config.get('bot_to_worker_queue_name'))\n my_bot = YoutubeObjectDetectBot(_token)\n my_bot.start()\n","repo_name":"shlomigd/PolyBot","sub_path":"services/bot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"13448662037","text":"import csv\nimport os\nimport shutil\nimport openpyxl\nimport sys\n\nif(not os.path.exists(r\"marksheets\")):\n os.mkdir(r\"marksheets\")\nlst_to_remove = os.listdir(\"marksheets\")\nfor i in lst_to_remove:\n if(i != \"concise_marksheet.xlsx\"):\n os.remove(f\"marksheets//{i}\")\nif not os.path.exists(r\"public//sample_input//master_roll.csv\"):\n print(\"please upload master_roll.csv file\")\n exit()\nif not os.path.exists(r\"public//sample_input//responses.csv\"):\n print(\"please upload responses.csv file\")\n exit()\nwith open(r\"public//sample_input//master_roll.csv\", 'r') as file:\n rows = csv.reader(file)\n MR = {line[0]: [line[1], 0, 0, 0] for line in rows if line[0] != \"roll\"}\n m_lst = sorted(MR.keys())\nwith open(r\"public//sample_input//responses.csv\", 'r') as file:\n rows = csv.reader(file)\n Options_Name = {option[6].upper(): [element for element in option[7:]]\n for option in rows if (option[6] != 'Roll Number' and option[6] != \"\")}\n try:\n i_p, i_n, Final_Score, Final_Count = float(\n sys.argv[1]), float(sys.argv[2]), [], []\n except:\n print(\"Please enter valid input\")\n exit()\n for Roll, options in Options_Name.items():\n Right, Wrong, Not_Attempt = 0, 0, 0\n for i, option in enumerate(options):\n try:\n if (option == Options_Name[\"ANSWER\"][i]):\n Right += 1\n elif(option):\n Wrong += 1\n else:\n Not_Attempt += 1\n except:\n print(\"Please provide ANSWER in the responses.csv file\")\n exit()\n Final_Count.append(f\"[{Right},{Wrong},{Not_Attempt}]\")\n Final_Score.append(f\"{Right*i_p+Wrong*i_n}/140\")\n try:\n MR[f\"{Roll}\"][1], MR[f\"{Roll}\"][2], MR[f\"{Roll}\"][3] = Right, Wrong, Not_Attempt\n except:\n pass\n\n\nimg = openpyxl.drawing.image.Image(\"public//Title.png\")\nright = openpyxl.styles.Alignment(horizontal='right')\ncenter = openpyxl.styles.Alignment(horizontal='center')\nbd = openpyxl.styles.Side(style='thin', color=\"000000\")\nelement = openpyxl.styles.Font(name='Century', size=12)\nthick = openpyxl.styles.Font(name='Century', size=12, bold=True)\nred = openpyxl.styles.Font(name='Century', size=12, color=\"ff0000\")\ngreen = openpyxl.styles.Font(name='Century', size=12, color=\"008000\")\nblue = openpyxl.styles.Font(name='Century', size=12, color=\"0000ff\")\nblack = openpyxl.styles.Font(name='Century', size=12, color=\"000000\")\nhighlight = openpyxl.styles.Border(left=bd, top=bd, right=bd, bottom=bd)\nfor key in Options_Name.keys():\n wb = openpyxl.Workbook()\n ws = wb[\"Sheet\"]\n ws.title = \"quiz\"\n ws.add_image(img)\n ws.merge_cells('A5:E5')\n ws[\"A5\"] = \"Mark Sheet\"\n ws[\"A5\"].alignment = center\n ws[\"A5\"].font = openpyxl.styles.Font(\n size=18, bold=True, name='Century', underline=\"single\")\n for i in [0, 1, 2, 3, 4]:\n ws.column_dimensions[chr(ord(\"A\")+i)].width = 18\n ws.append([\"Name:\", f\"{MR[key][0]}\", \"\", \"Exam:\", \"quiz\"])\n ws.append([\"Roll Numer:\", f\"{key}\", \"\", \"\", \"\"])\n ws.merge_cells('B6:C6')\n ws.append([])\n ws.append([\"\", \"Right\", \"Wrong\", \"Not Attempt\", \"Max\"])\n ws.append([\"No.\", f\"{MR[key][1]}\", f\"{MR[key][2]}\",\n f\"{MR[key][3]}\", f\"{sum(MR[key][1:])}\"])\n ws.append([\"Marking\", f\"{i_p}\", f\"{i_n}\", f\"{0}\", \"\"])\n ws.append([\"Total\", f\"{MR[key][1]*i_p}\", f\"{MR[key][2]*i_n}\",\n \"\", f\"{MR[key][1]*i_p+MR[key][2]*i_n}/{sum(MR[key][1:])*i_p}\"])\n for row in ws.iter_rows(min_row=9, max_row=12):\n for i, cell in enumerate(row):\n cell.font, cell.border, cell.alignment = thick, highlight, center\n ws[\"B6\"].font, ws[\"B7\"].font, ws[\"E6\"].font = thick, thick, thick\n ws[\"C10\"].font, ws[\"C11\"].font, ws[\"C12\"].font = red, red, red\n ws[\"E12\"].font, ws[\"E13\"].font, ws[\"E14\"].font = blue, blue, blue\n ws[\"B10\"].font, ws[\"B11\"].font, ws[\"B12\"].font = green, green, green\n ws[\"D10\"].font, ws[\"D11\"].font, ws[\"E10\"].font = black, black, black\n ws[\"A6\"].font, ws[\"A7\"].font, ws[\"D6\"].font, = element, element, element\n ws[\"A6\"].alignment, ws[\"A7\"].alignment, ws[\"D6\"].alignment, = right, right, right\n ws.append([\"Student Ans\", \"Correct Ans\", \"\", \"Student Ans\", \"Correct Ans\"])\n for row in ws.iter_rows(min_row=15, max_row=15):\n for i, cell in enumerate(row):\n if(i != 2):\n cell.font, cell.border, cell.alignment = thick, highlight, center\n for i, row in enumerate(ws.iter_cols(min_col=1, max_col=5, min_row=16, max_row=40)):\n for j, cell in enumerate(row):\n if(i == 1 or i == 4):\n if ((j+24*(i//3)) < len(Options_Name[\"ANSWER\"])):\n cell.value, cell.font = Options_Name[\"ANSWER\"][j+24*(\n i//3)], blue\n cell.border, cell.alignment = highlight, center\n if(i == 0 or i == 3):\n if key in Options_Name:\n if ((j+24*(i//2)) < len(Options_Name[key])):\n cell.value, cell.font = Options_Name[key][j+24*(\n i//2)], green\n cell.border, cell.alignment = highlight, center\n if cell.value != Options_Name[\"ANSWER\"][j+24*(i//2)]:\n cell.font = red\n wb.save(f\"marksheets//{key}.xlsx\")\nfor key in Options_Name:\n if (key in m_lst):\n m_lst.remove(key)\n\nfor key in m_lst:\n wb = openpyxl.Workbook()\n ws = wb[\"Sheet\"]\n ws.title = \"quiz\"\n ws.add_image(img)\n ws.merge_cells('A5:E5')\n ws[\"A5\"] = \"Mark Sheet\"\n ws[\"A5\"].alignment = center\n ws[\"A5\"].font = openpyxl.styles.Font(\n size=18, bold=True, name='Century', underline=\"single\")\n for i in [0, 1, 2, 3, 4]:\n ws.column_dimensions[chr(ord(\"A\")+i)].width = 18\n ws.append([\"Name:\", f\"{MR[key][0]}\", \"\", \"Exam:\", \"quiz\"])\n ws.append([\"Roll Numer:\", f\"{key}\", \"\", \"Status:\", \"Absent\"])\n ws.merge_cells('B6:C6')\n ws[\"B6\"].font, ws[\"B7\"].font, ws[\"E6\"].font, ws[\"E7\"].font = thick, thick, thick, red\n ws[\"A6\"].font, ws[\"A7\"].font, ws[\"D6\"].font, ws[\"D7\"].font = element, element, element, element\n ws[\"A6\"].alignment, ws[\"A7\"].alignment, ws[\"D6\"].alignment, ws[\"D7\"].alignment = right, right, right, right\n wb.save(f\"marksheets//{key}.xlsx\")\n\nshutil.make_archive(\"marksheets\", 'zip', \"marksheets\")\nprint(\"Successfully generated roll number wise marksheets\")\n","repo_name":"Aswin-Kumar66/p","sub_path":"pythonFiles/marksheet_Generator.py","file_name":"marksheet_Generator.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2969188553","text":"import pandas as pd\nimport MySQLdb\nimport talib as ta\n\ndef load_stock_code_name_mapping():\n\t\"\"\" \"\"\"\n\tstocks = dict()\n\twith open('data/sh_stock_code_full.txt', 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tname, code = line.strip().split('\\t')\n\t\t\tcode_str = \"sh.\" + code\n\t\t\tif not code.startswith( '6' ):\n\t\t\t\tcode_str = \"sz.\" + code\n\t\t\tstocks[code_str] = name\n\treturn stocks\n\ndef if_zhangtings(preclose, close, pctChg):\n\tif pctChg < 9.0:\n\t\treturn False\n\tif (preclose * 1.1 - close) < 0.01:\n\t\treturn True\n\treturn False\n\ndef if_continue_rise():\n\treturn 0\n\ndef if_junxian_good(dat_df):\n\t#10\n\t#5\n\ttypes=['SMA','EMA','WMA','DEMA','TEMA',\n\t'TRIMA','KAMA','MAMA','T3']\n\tdf_ma=pd.DataFrame(df.close)\n\tfor i in range(len(types)):\n\t df_ma[types[i]]=ta.MA(df.close,timeperiod=5,matype=i)\n\tdf_ma.tail()\n\n\treturn 0\n\ndef test():\n\tif_zhangtings()\n\nif __name__ == '__main__':\n test()","repo_name":"prometuse/myquant","sub_path":"stock/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36429990958","text":"# Program for the requirement,input: a4b3c2 and expected output: aaaabbbcc:\n\n\ns = input('enter the string: ')\noutput = ''\nfor ch in s:\n if ch.isalpha():\n x = ch\n else:\n d = int(ch)\n output = output + x * d\n print(output)\n","repo_name":"Prasan92/mywork","sub_path":"strings/str_in_out.py","file_name":"str_in_out.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42934879142","text":"from xml.etree import ElementTree\n\nimport sfml as sf\n\nfrom base64 import b64decode\nimport StringIO\nimport gzip\nimport struct\nimport time\nimport os\nimport logging as log\n\nclass TiledTile(object):\n def __init__(self, gid, texture):\n self.texture = texture\n self.gid = gid\n\nclass TiledCell(sf.Sprite):\n def __init__(self, pos, texture):\n super(TiledCell, self).__init__(texture)\n self.position = pos\n\nclass MapObject(object):\n def __init__(self, name, type, location, properties):\n self.name = name\n self.type = type\n self.location = location\n self.position = sf.Vector2f(self.location.left, self.location.top)\n self.properties = properties\n\n\nclass TiledLayerIterator:\n def __init__(self, layer):\n self.layer = layer\n self.i, self.j = 0, 0\n def next(self):\n if self.i == self.layer.rows:\n self.j += 1\n self.i = 0\n if self.j == self.layer.columns:\n raise StopIteration()\n value = self.layer[self.i, self.j]\n self.i += 1\n\n return value\n\nclass TiledLayer(object):\n def __init__(self, rows, columns):\n self.cells = {}\n self.rows = rows\n self.columns = columns\n self.visible = True\n self.drawable = sf.RectangleShape(sf.Vector2f(rows * 32, columns * 32))\n\n def __iter__(self):\n return TiledLayerIterator(self)\n\n def __getitem__(self, pos):\n return self.cells.get(pos)\n\nclass TiledTileset:\n def __init__(self, first_gid, image):\n self.first_gid = first_gid\n self.image = sf.Image.load_from_file(image)\n self.tiles = {}\n local_id = 0\n for y in range(0, self.image.height / 32):\n for x in range(0, self.image.width / 32):\n global_id = int(self.first_gid) + local_id\n area = sf.IntRect(x * 32, y * 32, 32, 32)\n tile_texture = sf.Texture.load_from_image(self.image, area)\n self.tiles.update({global_id : tile_texture})\n local_id += 1\n\nclass TiledMap:\n def __init__(self, filename, area = None):\n self.layers = []\n self.tiles = {}\n self.objects = {}\n self.area = area\n self.load_started = time.time()\n self.load_from_file(filename)\n\n def load_from_file(self, filename):\n tree = ElementTree.parse(filename)\n root = tree.getroot()\n\n map_dir = os.path.dirname(filename)\n\n self.width = int(root.attrib.get('width'))\n self.height = int(root.attrib.get('height'))\n self.tilewidth = int(root.attrib.get('tilewidth'))\n self.tileheight = int(root.attrib.get('tileheight'))\n self.pixel_width = int(self.width * self.tilewidth)\n self.pixel_height = int(self.height * self.tileheight)\n\n # Need to load tilesets.\n for tileset in root.findall('tileset'):\n firstgid = tileset.attrib.get('firstgid')\n source = tileset.attrib.get('source')\n tileset_file = os.path.join(map_dir, source)\n ts_tree = ElementTree.parse(os.path.join(map_dir, source))\n ts_root = ts_tree.getroot()\n image = ts_root.findall('image')[0].attrib.get('source')\n image = os.path.join(map_dir, image)\n ts = TiledTileset(firstgid, image)\n self.tiles.update(ts.tiles)\n\n log.info('Tilesets loaded')\n\n for layer in root.findall('layer'):\n tl = TiledLayer(self.width, self.height)\n tl.name = layer.attrib.get('name')\n\n data = layer.find('data').text.rstrip().lstrip()\n data = b64decode(data)\n data = gzip.GzipFile(fileobj=StringIO.StringIO(data))\n data = data.read()\n data = struct.unpack('<%di' % (len(data)/4,), data)\n \n for i, gid in enumerate(data):\n if gid < 1: continue\n x = i % self.width\n y = i // self.width\n \n if gid in self.tiles.keys():\n tl.cells[x,y] = TiledCell((x * 32, y * 32), self.tiles[gid])\n\n rt = sf.RenderTexture(self.pixel_width, self.pixel_height)\n rt.clear(sf.Color.TRANSPARENT)\n for cell in tl:\n if cell is not None:\n rt.draw(cell)\n\n rt.display()\n tl.drawable.set_texture(sf.Texture.load_from_image(rt.texture.copy_to_image()))\n \n self.layers.append(tl)\n \n log.info('Layers loaded')\n\n for group in root.findall('objectgroup'):\n for object in group.findall('object'):\n o = MapObject(object.attrib.get('name'),\n object.attrib.get('type'),\n sf.IntRect(int(object.attrib.get('x')),\n int(object.attrib.get('y')),\n int(object.attrib.get('width')),\n int(object.attrib.get('height'))),\n [])\n\n for properties in object.findall('properties'):\n for property in properties.findall('property'):\n o.properties.append({'name' : property.attrib.get('name'),\n 'value': property.attrib.get('value')})\n\n self.objects.update({o.name: o})\n\n log.info('Objects loaded')\n\n self.load_finished = time.time()\n load_time = self.load_finished - self.load_started\n log.info('Level finished loading in {0} seconds.'.format(load_time))\n\n def draw(self, target, states):\n for layer in self.layers:\n if layer.visible:\n target.draw(layer.drawable)\n","repo_name":"ateoto/pysfmltest","sub_path":"age/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71296130754","text":"# Twitter Tweet Scraper\n# This Python script allows you to scrape tweets from a specific Twitter user that contain certain keywords.\n# The scraped data would be stored in your desired directory.\n# Follow these steps to use the code:\n\n# 1. Prerequisites:\n# - Make sure you have obtained a Twitter API v2 Bearer Token. You can subscribe to Twitter API V2 Basic level.\n# - Store your Bearer Token in the 'config.ini' file. Replace 'your_bearer_token' with your actual token.\n\n# 2. Customize Your Search:\n# - Specify the Twitter username of the user you want to scrape tweets from by setting the 'user_name' variable.\n# - Define the search query keywords by setting the 'key_word' variable. This will find tweets that match both the username and the keyword.\n# - Replace 'csv_directory' and 'json_directory' with your desired directory paths for storing CSV and JSON files.\n\n# 3. Run the Code:\n# - Execute this script to scrape tweets based on your search criteria.\n# - The code will save the collected tweets to both a CSV and a JSON file with filenames in the format 'date_username_keyword'.\n# - Be sure to have proper permissions to read and write files in the specified directories.\n\n\n# Customize the code by modifying variables to meet your specific needs. Enjoy scraping and analyzing Twitter data!\n\n# Note: Please be mindful of Twitter's API usage policies.\n\nimport os\nimport requests\nimport json\nimport csv\nfrom datetime import datetime\nimport configparser\n\n# Define the Twitter username of the user you want to search tweets from\nuser_name = \"krakenfx\"\n# Define the search query keywords\nkey_word = \"Bitcoin\"\n# Replace with your desired directory path for CSV files\ncsv_directory = \"C:\\\\Users\\\\jesse\\\\Desktop\\\\DDHW\\\\CSV\"\n# Replace with your desired directory path for JSON files\njson_directory = \"C:\\\\Users\\\\jesse\\\\Desktop\\\\DDHW\\\\JSON\"\n\ndef save_files(tweet_data, csv_directory, json_directory):\n # Define the CSV filename\n date_str = datetime.now().strftime(\"%Y-%m-%d\")\n csv_filename = os.path.join(csv_directory, f\"{date_str}_{username}_{keyword}.csv\")\n\n # Write the tweet data to a CSV file\n with open(csv_filename, mode='w', newline='', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n # Write the header row\n writer.writerow([\"Timestamp\", \"Username\", \"Tweet Content\", \"Retweets\", \"Likes\", \"Replies\"])\n # Write the tweet data\n for tweet in tweet_data:\n writer.writerow([tweet[\"Timestamp\"], tweet[\"Username\"], tweet[\"Tweet Content\"],\n tweet[\"Retweets\"], tweet[\"Likes\"], tweet[\"Replies\"]])\n\n print(f\"Collected {len(tweet_data)} tweets and saved to {csv_filename}\")\n\n # Define the JSON filename\n json_filename = os.path.join(json_directory, f\"{date_str}_{username}_{keyword}.json\")\n\n # Write the tweet data to a JSON file\n with open(json_filename, 'w', encoding='utf-8') as json_file:\n json.dump(tweet_data, json_file, ensure_ascii=False, indent=4)\n\n print(f\"Saved the tweets to {json_filename}\")\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\n# Replace with your Twitter API v2 Bearer Token\nbearer_token = config['twitterAPI']['bearer_token']\n\nusername = user_name\nkeyword = key_word\n\n# Define the search query to find tweets about \"key_word\" from the specified user\nsearch_query = f\"from:{username} {keyword}\"\n\n# Set the Twitter API v2 endpoint for recent tweet search\nurl = \"https://api.twitter.com/2/tweets/search/recent\"\n\n# Define query parameters to include additional fields, including full tweet content\nparams = {\n \"query\": search_query,\n \"max_results\": 100, # Specify the number of results per response\n \"tweet.fields\": \"created_at,public_metrics,referenced_tweets\", # Include additional fields for full tweet content\n \"user.fields\": \"username\", # Include the username field to retrieve the username\n}\n\n# Set the request headers with the Bearer Token\nheaders = {\n \"Authorization\": f\"Bearer {bearer_token}\",\n}\n\n# Send a GET request to the endpoint\ntry:\n response = requests.get(url, params=params, headers=headers)\n response.raise_for_status() # Raise an exception for HTTP errors\nexcept requests.exceptions.RequestException as e:\n print(f\"An error occurred during the request: {e}\")\n response = None # Set the response to None to handle it later\n\nif response and response.status_code == 200:\n data = response.json()\n\n if \"data\" in data:\n # Define a list of tweet data to be written to CSV and JSON\n tweet_data = []\n\n for tweet in data[\"data\"]:\n timestamp = datetime.strptime(tweet[\"created_at\"], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n username = username\n tweet_content = tweet[\"text\"]\n retweets = tweet[\"public_metrics\"][\"retweet_count\"]\n likes = tweet[\"public_metrics\"][\"like_count\"]\n replies = tweet[\"public_metrics\"][\"reply_count\"]\n\n # Convert the datetime object to a string\n timestamp_str = timestamp.strftime(\"%Y-%m-%d\")\n\n # Append the tweet data to the list\n tweet_data.append({\n \"Timestamp\": timestamp_str,\n \"Username\": username,\n \"Tweet Content\": tweet_content,\n \"Retweets\": retweets,\n \"Likes\": likes,\n \"Replies\": replies\n })\n\n # Define the directory to save CSV and JSON files\n csv_dir = csv_directory\n json_dir = json_directory\n\n # Call the save_files function to save the CSV and JSON files\n save_files(tweet_data, csv_dir, json_dir)\n else:\n print(\"No tweet data found for the given search query.\")\nelse:\n if response:\n print(f\"Request returned an error: {response.status_code} {response.text}\")\n else:\n print(\"No response received.\")\n","repo_name":"JesseLovesGrace/MonkeyCatSunny","sub_path":"TwitterCrawler3.0.py","file_name":"TwitterCrawler3.0.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28921867462","text":"n=int(input())\ns='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nst='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'\nwhile n:\n i=n%26\n if i==0:\n st+='Z'\n n=(n//26)-1\n else:\n st+=s[i-1]\n n=(n//26)\nl=len(st)\nfor i in range(l-1,-1,-1):\n print(st[i],end=\"\")","repo_name":"SUSHMA-DEVI/codemind-python","sub_path":"Excel_Sheet_Column.py","file_name":"Excel_Sheet_Column.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28097686429","text":"class TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass Solution:\n def __init__(self):\n self.stack = list()\n\n def iterative_inorder(self, root):\n node = root\n\n while node:\n self.stack.append(node)\n if node.left:\n node = node.left\n elif self.stack:\n # print(node.val, end=' ')\n while self.stack and self.stack[-1].right is None:\n temp = self.stack.pop()\n print(temp.val, end=' ')\n if self.stack:\n temp = self.stack.pop()\n node = temp.right\n print(temp.val, end=' ')\n else:\n break\n else:\n break\n\n def iterative_inorder_3(self, root):\n node = root\n\n while node or self.stack:\n if node:\n self.stack.append(node)\n node = node.left\n elif self.stack:\n temp = self.stack.pop()\n print(temp.val, end=' ')\n node = temp.right\n else:\n break\n\n def iterative_inorder_2(self, root):\n node = root\n\n while node or self.stack:\n if node:\n self.stack.append(node)\n node = node.left\n else:\n node = self.stack.pop()\n print(node.val, end=' ')\n node = node.right\n\n def solve(self, A):\n self.iterative_inorder(A)\n print()\n self.iterative_inorder_2(A)\n print()\n self.iterative_inorder_3(A)\n return 0\n\n\nif __name__ == '__main__':\n node1 = TreeNode(1)\n node2 = TreeNode(2)\n node3 = TreeNode(3)\n node4 = TreeNode(4)\n node5 = TreeNode(5)\n node6 = TreeNode(6)\n node7 = TreeNode(7)\n node8 = TreeNode(8)\n node9 = TreeNode(9)\n node10 = TreeNode(0)\n\n node1.left = node2\n node1.right = node3\n\n node2.left = node4\n node2.right = node10\n\n node3.left = node5\n node3.right = node6\n\n node5.left = node7\n\n node6.left = node8\n node6.right = node9\n\n obj = Solution()\n ans = obj.solve(node1)\n","repo_name":"navkant/ds_algo_practice","sub_path":"scaler/tress/iterative_inOrder.py","file_name":"iterative_inOrder.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72213860993","text":"import settings\r\nimport environment\r\n\r\nfrom tornado.web import RequestHandler\r\n\r\nfrom service.service import article_service, tag_service\r\nfrom domain.domain import Article\r\n\r\n# 文章处理器\r\nclass ArticleHandler(RequestHandler):\r\n def get(self, article_id):\r\n print(self.__class__)\r\n article = article_service.find(article_id)\r\n if (article):\r\n self.render(settings.app_settings[\"article_page\"], **{\"article\": article})\r\n else:\r\n self.render(settings.app_settings[\"404_page\"])\r\n\r\nclass ArticleListByTagHandler(RequestHandler):\r\n def get(self, tag_id):\r\n if (not tag_id):\r\n self.render(settings.app_settings[\"404_page\"])\r\n else:\r\n articles = article_service.query_article_by_tag(tag_id)\r\n if (not articles):\r\n self.render(settings.app_settings[\"404_page\"])\r\n else:\r\n self.render(settings.app_settings[\"article_list_page\"], **{\"articles\": articles})\r\n\r\nclass ArticleWritingHandler(RequestHandler):\r\n def get (self):\r\n print(self.__class__)\r\n self.render(settings.app_settings[\"write_article_page\"])\r\n def post (self):\r\n title = self.get_argument(\"title\", None)\r\n content = self.get_argument(\"content\", None)\r\n article = Article()\r\n article.title = title\r\n article.content = content\r\n id = article_service.add(article)\r\n self.redirect(\"/article/\" + str(id))\r\n\r\n# 标签管理器\r\nclass TagHandler(RequestHandler):\r\n def get(self):\r\n tags = tag_service.list_all()\r\n if (tags):\r\n self.render(settings.app_settings[\"tag_list_page\"], **{\"tags\": tags})\r\n else:\r\n self.render(settings.app_settings[\"404_page\"])\r\n","repo_name":"hwangsyin/cbrc-devteam-blog","sub_path":"handlers/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10911486251","text":"import re\n\nimport pandas as pd\n\nfrom ... import SRC_DIR\n\nDATA_DIR = SRC_DIR / \"..\" / \"..\" / \"data\" / \"01_raw\" / \"cbo\"\n\n\ndef load_cbo_data(date=\"latest\", raw=False):\n \"\"\"\n Load economic projections from the Congressional\n Budget Office (CBO).\n\n See 10-year projections at:\n https://www.cbo.gov/data/budget-economic-data#4\n\n Parameters\n ----------\n date : str\n either \"latest\" or the month to load in format YYYY-MM\n \"\"\"\n\n # Pull the latest set of projections\n if date == \"latest\":\n path = sorted(DATA_DIR.glob(\"*\"), reverse=True)[0]\n # Pick a specific date\n else:\n if not re.match(\"[0-9]{4}-[0-9]{2}\", date):\n raise ValueError(\"Date should be in format YYYY-MM\")\n files = list(DATA_DIR.glob(f\"{date}*\"))\n if not len(files):\n raise ValueError(f\"No files found for date '{date}'\")\n path = files[0]\n\n # CSV or Excel\n fmt = str(path).split(\".\")[-1]\n assert fmt in [\"csv\", \"xlsx\"]\n\n # Excel\n if fmt == \"xlsx\":\n # Read the raw data\n cbo = pd.read_excel(\n path,\n sheet_name=\"1. Quarterly\",\n usecols=\"B:BH\",\n skiprows=6,\n ).dropna(how=\"all\", axis=0)\n\n if raw:\n return cbo\n\n # Columns to rename\n rename = {\n \"Real GDP\": \"RealGDP\",\n \"Price Index, Personal Consumption Expenditures (PCE)\": \"PCEPriceIndex\",\n \"Consumer Price Index, All Urban Consumers (CPI-U)\": \"CPIU\",\n \"GDP Price Index\": \"GDPPriceIndex\",\n \"Price of Crude Oil, West Texas Intermediate (WTI)\": \"OilPriceWTI\",\n \"FHFA House Price Index, Purchase Only\": \"FHFAHousePriceIndex\",\n \"Unemployment Rate, Civilian, 16 Years or Older\": \"UnemploymentRate\",\n \"Employment, Total Nonfarm (Establishment Survey)\": \"NonfarmEmployment\",\n \"Employment, Total Nonfarm (Establishment survey)\": \"NonfarmEmployment\",\n \"10-Year Treasury Note\": \"10YearTreasury\",\n \"3-Month Treasury Bill\": \"3MonthTreasury\",\n \"Federal Funds Rate\": \"FedFundsRate\",\n \"Income, Personal\": \"PersonalIncome\",\n \"Wages and Salaries\": \"Wage&Salaries\",\n \"Profits, Corporate, With IVA & CCAdj\": \"CorporateProfits\",\n \"Personal Consumption Expenditures\": \"PCE\",\n \"Nonresidential fixed investment\": \"NonresidentialInvestment\",\n \"Residential fixed investment\": \"ResidentialInvestment\",\n }\n\n # The names of the indicators to search for\n indicators = list(rename.keys())\n num_columns = len(list(set(rename.values())))\n\n # Rename first two columns\n X = cbo.rename(columns={\"Unnamed: 1\": \"var1\", \"Unnamed: 2\": \"var2\"}).assign(\n var1=lambda df: df[\"var1\"].fillna(df[\"var2\"]).str.strip()\n )\n\n # Do we have all of the indicators\n # NOTE: we drop duplicates here, keeping the \"Nominal\" and removing the \"Real\" duplicates\n matches = X.loc[X[\"var1\"].str.strip().isin(indicators)][\n \"var1\"\n ].drop_duplicates()\n assert len(matches) == num_columns\n\n # Format\n X = (\n X.loc[matches.index]\n .drop(labels=[\"var2\", \"Units\"], axis=1)\n .melt(id_vars=[\"var1\"], var_name=\"Date\")\n .assign(\n Date=lambda df: pd.to_datetime(df.Date),\n var1=lambda df: df.var1.str.strip()\n .str.lower()\n .map({k.lower(): v for k, v in rename.items()}),\n )\n .pivot_table(columns=\"var1\", index=\"Date\", values=\"value\")\n .assign(NonfarmEmployment=lambda df: df.NonfarmEmployment * 1e3)\n )\n\n # CSV format\n else:\n # Read the raw data\n cbo = pd.read_csv(path)\n if raw:\n return cbo\n\n rename = {\n \"real_gdp\": \"RealGDP\",\n \"pce_price_index\": \"PCEPriceIndex\",\n \"cpiu\": \"CPIU\",\n \"gdp_price_index\": \"GDPPriceIndex\",\n \"oil_price_wti_spot\": \"OilPriceWTI\",\n \"house_price_index_fhfa\": \"FHFAHousePriceIndex\",\n \"unemployment_rate\": \"UnemploymentRate\",\n \"empl_payroll_nf\": \"NonfarmEmployment\",\n \"treasury_note_rate_10yr\": \"10YearTreasury\",\n \"treasury_bill_rate_3mo\": \"3MonthTreasury\",\n \"fed_funds_rate\": \"FedFundsRate\",\n \"personal_income\": \"PersonalIncome\",\n \"wages_and_salaries\": \"Wage&Salaries\",\n \"corp_profits_adj\": \"CorporateProfits\",\n \"pce\": \"PCE\",\n \"nonres_fixed_invest\": \"NonresidentialInvestment\",\n \"res_fixed_invest\": \"ResidentialInvestment\",\n }\n # The names of the indicators to search for\n indicators = list(rename.keys())\n num_columns = len(list(set(rename.values())))\n\n # Trim to columns\n X = cbo[[\"date\"] + indicators].rename(columns={\"date\": \"Date\", **rename})\n\n # Format\n X = (\n X.assign(\n Date=lambda df: pd.to_datetime(df.Date),\n )\n .set_index(\"Date\")\n .assign(NonfarmEmployment=lambda df: df.NonfarmEmployment * 1e3)\n ).dropna()\n\n return X\n","repo_name":"PhilaController/five-year-plan-analysis","sub_path":"src/fyp_analysis/extras/datasets/cbo.py","file_name":"cbo.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5791388736","text":"supply_model_configs_grid = {\n\n \"city\": [\"Torino\", \"Milano\", \"Vancouver\"],\n\n \"data_source_id\": [\"big_data_db\"],\n\n \"n_vehicles\": [500],\n \"engine_type\": [\"electric\"],\n \"vehicle_model_name\": [\"Smart fortwo Electric Drive 2018\"],\n\n \"distributed_cps\": [True],\n \"cps_placement_policy\": [\"num_parkings\"],\n \"profile_type\": [\"single_phase_1\"], # works only if engine_type = electric\n\n \"country_energymix\": [\"Italy\"],\n \"year_energymix\": [\"2018\"],\n\n}\n","repo_name":"smartdatapolito/odysseus","sub_path":"odysseus/supply_modelling/supply_model_configs/default_config.py","file_name":"default_config.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"1710096462","text":"from flask import jsonify\nfrom app.libs.redprint import Redprint\n\nfrom app.libs.toke_auth import auth\nfrom app.models.department import Department\nfrom app.view_models.department import DepartmentCollection\nfrom app.validators.department import DepartmentForm,DepartmentEditForm,DepartmentDeleteForm\nfrom app.libs.error_code import Success\n\n\napi = Redprint('department')\n\n\n@api.route('/', methods=['GET'])\n@auth.login_required\ndef get_department(id):\n\tdepartment = Department.query.get_or_404(id)\n\treturn jsonify(department)\n\n\n@api.route('', methods=['GET'])\n@auth.login_required\ndef get_department_schema():\n\tdepartment = Department.query.all()\n\t\n\t# 使用ViewModel\n\tdepartment_view = DepartmentCollection()\n\tdepartment_view.get_department(department)\n\treturn jsonify(department_view)\n\n\n@api.route('', methods=['POST'])\n@auth.login_required\ndef add_department():\n\tform = DepartmentForm().validate_for_api()\n\tDepartment.create(form.name.data)\n\treturn Success(msg='部门添加成功')\n\n\n@api.route('',methods=['PUT'])\n@auth.login_required\ndef edit_department():\n\tform = DepartmentEditForm().validate_for_api()\n\tDepartment.edit(form.id.data, form.name.data)\n\treturn Success(msg='部门修改成功')\n\n\n@api.route('',methods=['DELETE'])\n@auth.login_required\ndef remove_department():\n\tform = DepartmentDeleteForm().validate_for_api()\n\tDepartment.delete(form.id.data)\n\t\n\treturn Success(msg='部门删除成功')\n\n","repo_name":"shenxi0723/cpm","sub_path":"app/api/v1/department.py","file_name":"department.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4235269333","text":"# -- coding: utf-8 --\nimport datetime\nimport re\n\nimport dateparser\n\nfrom .base_template import BaseTemplate\n\n\nclass SinfulSiteParser(BaseTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # locale.setlocale(locale.LC_TIME, 'ru_RU.UTF-8')\n self.parser_name = \"sinfulsite.com\"\n self.thread_name_pattern = re.compile(\n r'(\\d+).*html$'\n )\n self.pagination_pattern = re.compile(\n r'.*-(\\d+)\\.html$'\n )\n self.avatar_name_pattern = re.compile(r'.*/(\\S+\\.\\w+)')\n self.files = self.get_filtered_files(kwargs.get('files'))\n self.comments_xpath = '//div[contains(@class,\"anchor post\")]'\n self.header_xpath = '//div[contains(@class,\"anchor post\")]'\n self.date_xpath = './/div[@class=\"time fullwidth\"]//text()[1]'\n self.date_title_xpath = './/div[@class=\"time fullwidth\"]/span[1]/@title'\n self.date_extract_regex = r'(19|20)\\d\\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01]), (\\d{2}):(\\d{2}):(\\d{2})'\n self.author_xpath = './/div[contains(@class,\"authorbit\")]/div[3]//text()'\n self.title_xpath = '//div[@class=\"marginmid\"][1]/text()'\n self.post_text_xpath = './/div[contains(@class,\"textcontent\") or contains(@class,\"post_body\")]//descendant::text()[not(ancestor::div[@class=\"hidelock\"])]'\n self.avatar_xpath = './/div[@class=\"author_avatar\"]//img/@src'\n self.comment_block_xpath = './/div[@class=\"time fullwidth\"]/div[1]/a/text()'\n self.date_pattern = '%Y-%m-%d, %H:%M:%S'\n # main function\n self.main()\n\n def get_filtered_files(self, files):\n filtered_files = list(\n filter(\n lambda x: self.thread_name_pattern.search(x) is not None,\n files\n )\n )\n sorted_files = sorted(\n filtered_files,\n key=lambda x: (self.thread_name_pattern.search(x).group(1),\n self.pagination_pattern.search(x).group(1)))\n\n return sorted_files\n\n def parse_date(self, date_str):\n date = \"\"\n try:\n date = datetime.datetime.strptime(date_str, self.date_pattern).timestamp()\n except:\n try:\n date = float(date_str)\n except:\n err_msg = f\"WARN: could not figure out date from: ({date_str}) using date pattern ({self.date_pattern})\"\n print(err_msg)\n date = dateparser.parse(date_str).timestamp()\n return date\n\n def extract_date(self, date_str):\n m = re.match(self.date_extract_regex, date_str)\n if m:\n return m[0]\n return None\n\n def extract_str(self, string, regex_pattern):\n m = re.match(regex_pattern, string)\n if m:\n return m[0]\n return None\n\n def get_date(self, tag):\n date_title = tag.xpath(self.date_title_xpath)\n if date_title and self.extract_str(date_title[0], self.date_extract_regex):\n date = date_title[0].strip()\n else:\n date_block = tag.xpath(self.date_xpath)\n date = ''.join([date_val for date_val in date_block if date_val and '#' not in date_val]).strip()\n if not date:\n return \"\"\n\n if 'Yesterday' in date or 'Today' in date:\n time_str = date.split(',')[-1].strip()\n date = f\"{date_title[0]}, {time_str}\"\n extracted_date = self.extract_str(date, self.date_extract_regex)\n if extracted_date:\n date = extracted_date\n\n date = self.parse_date(date)\n if date:\n curr_epoch = datetime.datetime.now().timestamp()\n if date > curr_epoch:\n err_msg = f\"ERROR: the timestamp ({date}) is after current time ({curr_epoch})\"\n print(err_msg)\n raise RuntimeError(err_msg)\n return str(date)\n return \"\"\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"templates/sinfulsite_template.py","file_name":"sinfulsite_template.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24884239394","text":"# Deobfuscate used strings and comment them over the function's codeunit.\n# @category: Malware.SpyEye\n\n# EXPERIMENTAL ! Many edge cases not considered.\n# Addresses are to be changed accordingly\nfrom ghidra.program.model.data import ArrayDataType\nfrom ghidra.program.model.data import UnsignedLongDataType\nfrom ghidra.program.model.data import UnsignedCharDataType\ncount=0\n# Clear wrong data assignements\ncurrentProgram.getListing().clearCodeUnits(toAddr(0x00405028),toAddr(0x0041502b),False)\ncurrentProgram.getListing().clearCodeUnits(toAddr(0x00405e68),toAddr(0x00415ef8),False)\ncurrentProgram.getListing().clearCodeUnits(toAddr(0x0040502c),toAddr(0x0041502f),False)\ncurrentProgram.getListing().clearCodeUnits(toAddr(0x004050b8),toAddr(0x00415e63),False)\n\n#Create indices Array\ncurrentProgram.getListing().createData(toAddr(0x405028),ArrayDataType(UnsignedLongDataType.dataType,36,4))\n\n# Create Rounds Array\ncurrentProgram.getListing().createData(toAddr(0x405e68),ArrayDataType(UnsignedLongDataType.dataType,36,4))\n\n# Create Obfuscated Strings Array\ncurrentProgram.getListing().createData(toAddr(0x4050b8),ArrayDataType(ArrayDataType(UnsignedCharDataType.dataType,100,1),35,100))\n\nindicesTable = getDataAt(toAddr(0x00405028))\n\nfor ref in getReferencesTo(toAddr(0x00401000)):\n refAddr = ref.getFromAddress()\n try:\n prevInstr = currentProgram.getListing().getInstructionBefore(refAddr)\n if prevInstr.toString().startswith(\"PUSH\"):\n prevInstr = prevInstr.toString()\n arg = prevInstr.split(\" \")[1]\n elif prevInstr.toString().startswith(\"RET\"):\n pass\n else:\n prevInstr = prevInstr.getPrevious().toString()\n arg = prevInstr.split(\" \")[1]\n except:\n continue\n counter = 0\n for i in range(36):\n el = indicesTable.getComponent(i).toString()\n try:\n if int(el.split(\" \")[1].rstrip(\"h\"),16) == int(arg,16):\n break\n except:\n continue\n counter += 1\n rounds = int(getDataAt(toAddr(0x405e68)).getComponent(counter).toString().split(\" \")[1].rstrip(\"h\"),16)\n obfString = getDataAt(toAddr(0x4050b8)).getComponent(counter).bytes\n obfString = obfString.tostring()\n deobfString = \"\"\n for i in range(rounds-1,-1,-1):\n deobfString = chr((ord(obfString[i]) - ord(obfString[i-1]))&0xff) + deobfString\n codeUnit = currentProgram.getListing().getCodeUnitAt(refAddr)\n count+=1\n codeUnit.setComment(codeUnit.PRE_COMMENT, deobfString)\n\n","repo_name":"LeHackerman/GhidraScripts","sub_path":"SpyEye/deobfuscateSpyEyeStrings.py","file_name":"deobfuscateSpyEyeStrings.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38836257823","text":"## @ingroup Attributes-Cryogens\n# Cryogen.py\n# \n# Created: Feb 2020, K. Hamilton - Through New Zealand Ministry of Business Innovation and Employment Research Contract RTVU2004\n\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nfrom SUAVE.Core import Data\n\n# ----------------------------------------------------------------------\n# Class\n# ----------------------------------------------------------------------\n## @ingroup Attributes-Cryogens\nclass Cryogen(Data):\n \"\"\"Holds values for a cryogen\n \n Assumptions:\n None\n \n Source:\n None\n \"\"\"\n\n def __defaults__(self):\n \"\"\"This sets the default values.\n\n Assumptions:\n None\n\n Source:\n Values commonly available\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n None\n \"\"\" \n self.tag = 'Cryogen'\n self.density = 0.0 # kg/m^3\n self.specific_energy = 0.0 # MJ/kg\n self.energy_density = 0.0 # MJ/m^3\n self.temperatures = Data()\n self.temperatures.freeze = 0.0 # K\n self.temperatures.boiling = 0.0 # K","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Attributes/Cryogens/Cryogen.py","file_name":"Cryogen.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"5269283884","text":"import ViewerParams\nimport trace.TraceParams\nimport trace\nimport NavigatorGraphParams\n\ndef property_system_show():\n en=gv.pv.prp.propertyNames()\n while en.hasMoreElements():\n key=en.next()\n w.print(\"%s = %s\"%(key,gv.pv.prp.get(key)))\n\ndef property_system_get(key):\n \"\"\" Gets the value of a known system property.\n Usage: property_system_get('key') (note that 'key' is a string in quotes).\n Type 'help properties' for a general overview.\n \"\"\"\n return gv.pv.prp.get(key)\n\ndef property_system_set(key,value):\n \"\"\" Sets the value of a known system property.\n Usage: property_system_set('key',value) (note that 'key' is a string in quotes).\n Notes: All boolean values are represented in Jython as a 0(False) or 1(True).\n You must save the properties (property_system_save()) in order to have\n them take effect next load time. \n Type 'help properties' for a general overview.\n \"\"\"\n gv.pv.prp.setProperty(key,value)\n\ndef property_system_save():\n \"\"\" Saves system properties (which are automatically loaded next time the program is run.)\n Note that routine specific properties are not saved.\n Type help properties for a general overview on properties.\n \"\"\"\n file=open(\"properties.cfg\",\"w\")\n gv.pv.prp.store(file,\"GView config file\")\n\ntrace_properties=trace.TraceParams.getInstance()\nviewer_properties=ViewerParams.getInstance()\nnavigator_properties=NavigatorGraphParams.getInstance()\n\ndef property_show(tp):\n f=tp.getClass().getFields()\n for i in range(len(f)):\n w.print(\"%s = %s\"%(f[i].name,eval(\"tp.%s\"%f[i].name)))\n\ndef property_show_all():\n \"\"\" Prints all system (directories etc) and routine specific (viewer,navigator,etc)\n parameters, with minimal instructions on how to set them.\n See property_system_save(), property_system_set(), property_system_get()\n \n For a general overview, type: 'help properties' \n \"\"\"\n w.print(\"System properties : set using property_system_set(\\\"key\\\",value)\")\n w.print(\" ie 'property_system_set(\\\"framerate\\\",15)'\")\n property_system_show()\n w.print(\"\")\n w.print(\"Viewer properties : set by typing viewer_properties.'key'=value\")\n w.print(\" ie viewer_properties.AutoLoadLastRois=1\")\n property_show(viewer_properties)\n w.print(\"\")\n w.print(\"navigator properties : set by typing navigator_properties.'key'=value\")\n w.print(\" ie navigator_properties.AutoLoadLastChartRange=1\")\n property_show(navigator_properties)\n w.print(\"\")\n w.print(\"trace properties : set by typing trace_properties.'key'=value\")\n w.print(\" ie trace_properties.FlipData=1\")\n property_show(trace_properties)\n\n\n","repo_name":"gilbub/GView2","sub_path":"programs/java/gvdecoder/scripts/properties/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39613751344","text":"from django.db import migrations, models\nimport zds.notification.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"notification\", \"0013_clean_notifications\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"PingSubscription\",\n fields=[\n (\n \"answersubscription_ptr\",\n models.OneToOneField(\n parent_link=True,\n auto_created=True,\n primary_key=True,\n serialize=False,\n to=\"notification.AnswerSubscription\",\n on_delete=models.CASCADE,\n ),\n ),\n ],\n bases=(\"notification.answersubscription\", zds.notification.models.MultipleNotificationsMixin),\n ),\n ]\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/notification/migrations/0014_pingsubscription.py","file_name":"0014_pingsubscription.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"} +{"seq_id":"23853584660","text":"import os\nimport pandas as pd\nimport numpy as np\n\ndef load_data_by_fid(fid):\n '''\n return a dataframe that has the eid and the 'fid' variable \n '''\n df_tab1_i0_comp=pd.read_csv('/temp_project/ukbb/data/i0/ukb22598_i0_comp.csv')\n\n if int(fid) in df_tab1_i0_comp.fid.values.tolist():\n fid_num=fid\n \n var_description = df_tab1_i0_comp[df_tab1_i0_comp['fid']==int(fid_num)].Description.values[0]\n var_type=df_tab1_i0_comp[df_tab1_i0_comp['fid']==int(fid_num)].Type.values[0]\n\n var_type_list=['con','cur','dat','int','tex','tim','cas','cam']\n var_type_list_full=['Continuous','Curve','Date','Integer','Text','Time','Categorical (single)', 'Categorical (multiple)']\n\n path_p1='/temp_project/ukbb/data/i0/var_'\n\n if var_type in var_type_list_full:\n vtyp=var_type_list[var_type_list_full.index(var_type)]\n\n loadpath=path_p1+str(vtyp)+'/'\n os.chdir(path_p1+str(vtyp))\n list_folder=os.listdir() \n\n pname1=str(vtyp)+str(fid_num)+'i0.csv'\n pname2='vec_'+str(vtyp)+str(fid_num)+'i0.csv'\n\n if pname1 in list_folder:\n\n print('fid ' + str(fid_num) + ' is a single-measure '+str(var_type).lower()+' variable, which is \\n'+str(var_description))\n fpname=list_folder[list_folder.index(pname1)]\n df_load=pd.read_csv(loadpath+fpname)\n\n elif pname2 in list_folder:\n\n print('fid ' + str(fid_num) + ' is a single-measure '+str(var_type).lower()+' variable, which is \\n'+str(var_description))\n fpname=list_folder[list_folder.index(pname2)]\n df_load=pd.read_csv(loadpath+fpname, sep='\\t')\n return df_load\n \n else:\n print('fid not found, please try again')\n","repo_name":"postincredible/ukbb","sub_path":"ukbb_ldbf.py","file_name":"ukbb_ldbf.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20259865647","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\n\nfrom .decorators import unauthenticated_user, allowed_users\nfrom .models import *\nfrom .forms import ProjectForm, ContactForm, CreateUserForm\n# Create your views here.\n\n\n# @login_required(login_url='login')\n# @allowed_users(allowed_roles=['admin'])\ndef index(request):\n projects = Project.objects.all().order_by('-intial_date')\n context = {\n 'projects': projects,\n }\n return render(request, 'pages/index.html', context)\n\n\n@unauthenticated_user\ndef register(request):\n\n form = CreateUserForm()\n\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n # associate user with admin group upon registration\n username = form.cleaned_data.get('username')\n group = Group.objects.get(name='admin')\n user.groups.add(group)\n\n messages.success(request, 'Account was created for ' + username)\n return redirect('login')\n\n context = {'form': form}\n return render(request, 'pages/register.html', context)\n\n\n@unauthenticated_user\ndef loginPage(request):\n\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.info(request, 'Username OR password is incorrenct')\n context = {}\n return render(request, 'pages/login.html', context)\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef calculate(subtotal, no_of_signs, sign_permit, engineering, other_fees, discount, cash_discount):\n total = subtotal + (sign_permit * no_of_signs) + engineering + other_fees\n if discount > 0:\n total -= total * discount\n elif cash_discount > 0:\n total -= cash_discount\n else:\n return total\n return total\n\n\ndef calculatePercentage(deposit_percentage):\n completion_percentage = 100 - deposit_percentage\n\n return completion_percentage\n\n\n@login_required(login_url='login')\n# @allowed_users(allowed_roles=['admin'])\ndef addProject(request):\n form = ProjectForm()\n if request.method == 'POST':\n form_copy = request.POST.copy()\n # get data used to calculate total\n\n number_of_signs = int(form_copy['number_of_signs'])\n sign_permit = int(form_copy['sign_permit'])\n engineering = int(form_copy['engineering'])\n other_fees = int(form_copy['other_fees'])\n # project can have discount OR cash discount\n discount = (float(form_copy['discount']) * .01)\n cash_discount = int(form_copy['cash_discount'])\n # total after discount applied\n discount_total = int(form_copy['discount_total'])\n deposit_amount = int(form_copy['deposit_amount'])\n completion_amount = int(form_copy['completion_amount'])\n # calculate percentage\n deposit_percentage = int(form_copy['deposit_percentage'])\n form_copy['completion_percentage'] = 100 - deposit_percentage\n # calculate total sign price as subtotal\n\n # calculate subtotal given sign price\n sum = 0\n for i in range(number_of_signs):\n i += 1\n sign_order = 'mysign-' + str(i)\n sum += int(form_copy[sign_order])\n form_copy['subtotal'] = sum\n subtotal = int(form_copy['subtotal'])\n\n # if discount %, discount total = disocunt * subtotal\n # if cash discount, discount total = subtotal - cash discount\n if discount:\n form_copy['discount_total'] = discount * subtotal\n elif cash_discount:\n form_copy['discount_total'] = cash_discount\n\n # calculate total price\n # form_copy['final_total'] = subtotal\n form_copy['final_total'] = calculate(\n subtotal, number_of_signs, sign_permit, engineering, other_fees, discount, cash_discount)\n\n form_copy['deposit_amount'] = (form_copy['final_total'] - (form_copy['final_total'] *\n form_copy['completion_percentage'] * .01))\n\n form_copy['completion_amount'] = (form_copy['final_total'] -\n form_copy['deposit_amount'])\n\n print('Sum..........', sum)\n form = ProjectForm(form_copy)\n print(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n context = {\n 'form': form,\n }\n return render(request, 'pages/add_project_form.html', context)\n\n\n@login_required(login_url='login')\n# @allowed_users(allowed_roles=['admin'])\ndef addContact(request):\n form = ContactForm()\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n context = {\n 'form': form,\n }\n return render(request, 'pages/add_contact_form.html', context)\n\n\ndef project(request, pk):\n project = Project.objects.get(id=pk)\n contact = Contact.objects.get(client=project.contact_id)\n print(project)\n print(contact)\n context = {'project': project, 'contact': contact}\n\n return render(request, 'pages/project.html', context)\n\n\ndef contact(request, pk):\n contact = Contact.objects.get(client=pk)\n context = {'contact': contact}\n return render(request, 'pages/contact.html', context)\n\n\n@login_required(login_url='login')\n# @allowed_users(allowed_roles=['admin'])\ndef updateProject(request, pk):\n project = Project.objects.get(id=pk)\n print('Project...', project)\n form = ProjectForm(instance=project)\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, instance=project)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context = {\n 'form': form,\n }\n return render(request, 'pages/add_project_form.html', context)\n\n\n@login_required(login_url='login')\n# @allowed_users(allowed_roles=['admin'])\ndef deleteProject(request, pk):\n project = Project.objects.get(id=pk)\n if request.method == \"POST\":\n project.delete()\n return redirect('/')\n context = {'project': project}\n return render(request, 'pages/delete.html', context)\n","repo_name":"MisterJoz/project-manager","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41637903718","text":"import numpy as np\nfrom openpnm.core import Subdomain, ModelsMixin\nfrom openpnm.utils import Workspace, logging\nlogger = logging.getLogger(__name__)\nws = Workspace()\n\n\nclass GenericPhysics(Subdomain, ModelsMixin):\n r\"\"\"\n This generic class is meant as a starter for custom Physics objects\n\n It produces a blank object with no pore-scale models attached. Users can\n add models from the ``models`` module (or create their own).\n\n Parameters\n ----------\n network : OpenPNM Network object\n The network to which this Physics should be attached\n\n phase : OpenPNM Phase object\n The Phase object to which this Physics applies\n\n geometry : OpenPNM Geometry object\n The Geometry object that defines the pores/throats where this Physics\n should be applied.\n\n name : str, optional\n A unique string name to identify the Physics object, typically same as\n instance name but can be anything. If left blank, and name will be\n generated that includes the class name and an integer index.\n\n \"\"\"\n\n def __init__(self, project=None, network=None, phase=None,\n geometry=None, settings={}, **kwargs):\n\n # Define some default settings\n self.settings.update({'prefix': 'phys'})\n # Overwrite with user supplied settings, if any\n self.settings.update(settings)\n if phase is None:\n self.settings['freeze_models'] = True\n\n # Deal with network or project arguments\n if network is not None:\n if project is not None:\n assert network is project.network\n else:\n project = network.project\n\n super().__init__(project=project, **kwargs)\n\n network = self.project.network\n if network:\n if phase is None:\n logger.warning('No Phase provided, ' + self.name\n + ' will not be associated with a phase')\n else:\n self.set_phase(phase=phase)\n if geometry is None:\n logger.warning('No Geometry provided, ' + self.name\n + ' will not be associated with any locations')\n else:\n if (phase is None):\n logger.warning('Cannot associate with a geometry unless '\n + 'a phase is also given')\n else:\n self.set_geometry(geometry=geometry)\n\n def set_phase(self, phase=None, mode='swap'):\n r\"\"\"\n Sets the association between this physics and a phase.\n\n Parameters\n ----------\n phase : OpenPNM Phase object\n If mode is 'add' or 'swap', this must be specified so that\n associations can be recorded in the phase dictionary. If the\n mode is 'drop', this is not needed since the existing association\n can be used to find it.\n mode : str\n Options are:\n\n 'swap' - Associations will be made with the new phase, and\n the pore and throat locations from the current phase will be\n transferred to the new one.\n\n 'drop' - Associations with the existing phase will be removed.\n\n 'add' - If the physics does not presently have an associated\n phase, this will create associations, but no pore or throat\n locations will assigned. This must be done using the\n ``set_geometry`` method.\n\n Notes\n -----\n In all cases the property data will be deleted since it will not\n be relevant to the new phase, so the ``regenerate_models`` method\n must be run.\n\n \"\"\"\n if mode in ['add', 'swap']:\n if phase not in self.project:\n raise Exception(self.name + ' not in same project as given phase')\n try:\n old_phase = self.project.find_phase(self)\n phase['pore.'+self.name] = old_phase['pore.'+self.name]\n phase['throat.'+self.name] = old_phase['throat.'+self.name]\n old_phase.pop('pore.'+self.name, None)\n old_phase.pop('throat.'+self.name, None)\n self.clear()\n except Exception as e:\n logger.debug(e)\n phase['pore.'+self.name] = False\n phase['throat.'+self.name] = False\n elif mode in ['remove', 'drop']:\n self.update({'pore.all': np.array([], dtype=bool)})\n self.update({'throat.all': np.array([], dtype=bool)})\n phase = self.project.find_phase(self)\n phase.pop('pore.'+self.name, None)\n phase.pop('throat.'+self.name, None)\n self.clear()\n else:\n raise Exception('mode ' + mode + ' not understood')\n\n def set_geometry(self, geometry=None, mode='add'):\n r\"\"\"\n Sets the association between this physics and a geometry (i.e. a\n set of pores and throats that define a subdomain)\n\n Parameters\n ----------\n geometry : OpenPNM Geometry object\n The geometry defining the pores and throats to which this physics\n should be attached\n mode : str\n Options are:\n\n 'swap' - Associations will be made with the new geometry, and\n the pore and throat locations from the current geometry will be\n transferred to the new one.\n\n 'drop' - Associations with the current geometry will be removed.\n\n 'add' - If the physics does not presently have an associated\n geometry, this will create associations.\n\n \"\"\"\n if mode in ['add', 'swap']:\n if geometry not in self.project:\n raise Exception(self.name + ' not in same project as given geometry')\n try:\n old_geometry = self.project.find_geometry(self)\n Ps = self.network.pores(old_geometry.name)\n Ts = self.network.throats(old_geometry.name)\n self._set_locations(element='pore', indices=Ps, mode='drop')\n self._set_locations(element='throat', indices=Ts, mode='drop')\n except Exception as e:\n logger.debug(e)\n Ps = self.network.pores(geometry.name)\n Ts = self.network.throats(geometry.name)\n self._set_locations(element='pore', indices=Ps, mode='add')\n self._set_locations(element='throat', indices=Ts, mode='add')\n if mode in ['remove', 'drop']:\n phase = self.project.find_phase(self)\n phase['pore.'+self.name] = False\n phase['throat.'+self.name] = False\n self.update({'pore.all': np.array([], dtype=bool)})\n self.update({'throat.all': np.array([], dtype=bool)})\n self.clear()\n","repo_name":"halotudio/openPNM-copy2","sub_path":"openpnm/physics/GenericPhysics.py","file_name":"GenericPhysics.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17575576578","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\nrawdata = pd.read_csv('flag_data/flag.data')\n## encoding to data1\ndata = rawdata[['2','3','6','7','18','29','30']]\ndata = np.array(data)\nenc = preprocessing.OneHotEncoder()\nenc.fit(data)\ndata1 = enc.transform(data).toarray()\n## write into the rawdata\nraw_data = np.array(rawdata)\ndataout = np.delete(raw_data, [0,1,2,5,6,17,28,29], axis=1)\ndataout = np.hstack((dataout,data1))\n\n\n\n\n","repo_name":"beichen-xing/DataMining_Course_Projects","sub_path":"DataPrepocessing/converting_discrete_attributes_to_continuous.py","file_name":"converting_discrete_attributes_to_continuous.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7605333742","text":"import os\nimport sys\nimport inspect\nimport shutil\n\n__location__ = os.path.join(os.getcwd(), os.path.dirname(\n inspect.getfile(inspect.currentframe())))\n\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.abspath(\"../docs/sphinxext\"))\n\n# -- Run sphinx-apidoc ------------------------------------------------------\n# This hack is necessary since RTD does not issue `sphinx-apidoc` before running\n# `sphinx-build -b html . _build/html`. See Issue:\n# https://github.com/rtfd/readthedocs.org/issues/1139\n# DON'T FORGET: Check the box \"Install your project inside a virtualenv using\n# setup.py install\" in the RTD Advanced Settings.\n# Additionally it helps us to avoid running apidoc manually\n\ntry: # for Sphinx >= 1.7\n from sphinx.ext import apidoc\nexcept ImportError:\n from sphinx import apidoc\n\noutput_dir = os.path.join(__location__, \"api\")\nmodule_dir = os.path.join(__location__, \"../klayout_package/python/kqcircuits\")\ntry:\n shutil.rmtree(output_dir)\nexcept FileNotFoundError:\n pass\n\ntry:\n import sphinx\n from pkg_resources import parse_version\n\n template_dir = os.path.join(__location__, \"templates\", \"apidoc\")\n cmd_line_template = \"sphinx-apidoc -f -o {outputdir} {moduledir} -e --implicit-namespaces --templatedir={templatedir}\"\n cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir, templatedir=template_dir)\n\n args = cmd_line.split(\" \")\n if parse_version(sphinx.__version__) >= parse_version('1.7'):\n args = args[1:]\n\n apidoc.main(args)\nexcept Exception as e:\n print(\"Running `sphinx-apidoc` failed!\\n{}\".format(e))\n\nimport sphinx_rtd_theme\n\n# -- Project information -----------------------------------------------------\n\nimport re\nfrom kqcircuits._version import get_version\n\nproject = 'KQCircuits'\ncopyright = '2021-2023, IQM'\nauthor = 'IQM'\n\n# Set supplied KQC version if found from Git tags\nmatched_version = re.match(r'([0-9]+\\.[0-9]+\\.[0-9]+)\\.', get_version())\nif not matched_version:\n print(f'KQC version not matched for: {get_version()}')\nelse:\n version = matched_version.group(1)\n release = version\n\n# -- General configuration ---------------------------------------------------\n\nsource_suffix = ['.rst']\nmaster_doc = 'index'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx_rtd_theme',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.graphviz',\n 'kqc_elem_params',\n 'sphinx.ext.extlinks',\n]\n\ntodo_include_todos = True\n\npygments_style = 'trac'\n\ngraphviz_output_format = 'svg'\n\nautoclass_content = \"both\"\nautosummary_generate = True\nautodoc_member_order = 'bysource'\nautodoc_default_options = {'members': True,\n 'undoc-members': True,\n 'show-inheritance': True}\n\ndef add_param_details(app, what, name, obj, options, lines):\n global _parameters\n if what == \"class\" and hasattr(obj, \"get_schema\"):\n _parameters = obj.get_schema(noparents=True).keys()\n\ndef skip_params(app, what, name, obj, skip, options):\n if what == \"class\" and str(type(obj)) != \"\" and not skip and name in _parameters:\n return True\n\ndef setup(app):\n app.connect(\"autodoc-process-docstring\", add_param_details)\n app.connect(\"autodoc-skip-member\", skip_params)\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'api/modules.rst']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n 'style_nav_header_background': 'white',\n 'collapse_navigation': False,\n}\n\n# Add favicon and logo to site. Vectors are supported by modern browsers.\nhtml_favicon = 'images/logo.svg'\nhtml_logo = \"images/logo-small.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\nhtml_css_files = [\n 'css/custom.css'\n]\n\n# Add \"Edit on GitHub\" button\nhtml_context = {\n 'display_github': True,\n 'github_user': 'iqm-finland',\n 'github_repo': 'KQCircuits',\n 'github_version': 'main',\n 'conf_py_path': '/docs/'\n}\n\n# A string of reStructuredText that will be included at the end of every source file that is read.\n# This is a possible place to add substitutions that should be available in every file\nrst_epilog = \"\"\"\n.. |GIT_CLONE_URL| replace:: {url}\n\"\"\".format(\n url=os.environ.get('DOCS_GIT_CLONE_URL', f'https://github.com/{html_context[\"github_user\"]}/{html_context[\"github_repo\"]}'), # picks default if no ENV\n)\n\n# Base URL for files in the git repository\ngithub_url = f'https://github.com/{html_context[\"github_user\"]}/{html_context[\"github_repo\"]}/blob/{html_context[\"github_version\"]}/'\nextlinks = {'git_url': (os.environ.get('DOCS_GIT_URL', github_url), '%s')}\n","repo_name":"iqm-finland/KQCircuits","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"} +{"seq_id":"23569897321","text":"#!/usr/bin/env python\nimport argparse\nimport math\n\nparser = argparse.ArgumentParser()\nparser.add_argument('filename')\n\nargs = parser.parse_args()\n\n\ndef distances(slot_length: int):\n _max = slot_length // 2\n _min = math.ceil(slot_length / 2) - 1\n return _max, _min\n\n\ndef solve(number_of_stalls: int, people: int) -> str:\n print(f\"Placing {people} persons in {number_of_stalls} stalls...\")\n slots = dict([(number_of_stalls, 1)])\n last_distances = (0, 0)\n for i in range(people):\n slot = 0\n to_delete = []\n for slot_length, count in slots.items():\n if count:\n slot = slot_length\n slots[slot] -= 1\n break\n else:\n to_delete.append(slot_length)\n for slot_length in to_delete:\n del slots[slot_length]\n\n last_distances = distances(slot)\n\n try:\n slots[last_distances[0]] += 1\n except KeyError:\n slots[last_distances[0]] = 1\n\n try:\n slots[last_distances[1]] += 1\n except KeyError:\n slots[last_distances[1]] = 1\n return \" \".join(map(str, last_distances))\n\n\nsolutions = []\nwith open(f'{args.filename}.in', 'r') as input_file:\n input_file.readline()\n for line in input_file:\n solutions.append(solve(*map(int, line.strip().split())))\n\nwith open(f'{args.filename}.out', 'w') as output_file:\n for line in (f'Case #{i}: ' + solution for i, solution in enumerate(solutions, start=1)):\n output_file.write(line + '\\n')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2292.py","file_name":"2292.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19344004102","text":"################################### Credit ###################################\n# Much of this file involves modifications to code written by Michael W. Mull,\n# 2017 (https://github.com/mikemull/midaspy/)\n##############################################################################\n\nimport numpy as np\nimport pandas as pd\nfrom collections import abc\nfrom scipy.special import gamma\n\ndef polynomial_weights(poly):\n \"\"\"\n Initiate beta polynomial or exponential Almon polynomial exogenous weighting.\n \n Parameters\n ----------\n poly : str\n 'exp_almon', 'beta', or 'hyperbolic' polynomial weight method.\n \n Returns\n -------\n poly_class : midas.iolib.WeightMethod\n \"\"\"\n poly_class = {\n 'beta': BetaWeights(1., 5.),\n 'exp_almon': ExpAlmonWeights(-1., 0.),\n 'hyperbolic': HyperbolicWeights(.1)\n }\n return poly_class[poly]\n\n\nclass WeightMethod(object):\n \"\"\"\n Weight method instance class.\n \"\"\"\n def __init__(self):\n pass\n\n def weights(self):\n pass\n\n\nclass BetaWeights(WeightMethod):\n def __init__(self, theta1, theta2, theta3=None):\n self.theta1 = theta1\n self.theta2 = theta2\n self.theta3 = theta3\n\n def weights(self, nlags):\n \"\"\" \n Evenly-spaced Beta polynomial weighting method.\n \n Parameters\n ----------\n nlags : int\n Number of lag terms in projection matrix.\n \n Returns\n -------\n array : numpy.array\n Polynomial weights.\n \"\"\"\n eps = np.spacing(1)\n u = np.linspace(eps, 1.0 - eps, nlags)\n\n beta_vals = u ** (self.theta1 - 1) * (1 - u) ** (self.theta2 - 1)\n\n beta_vals = beta_vals / sum(beta_vals)\n\n if self.theta3 is not None:\n w = beta_vals + self.theta3\n return w / sum(w)\n\n return beta_vals\n\n def x_weighted(self, x, params):\n self.theta1, self.theta2 = params\n\n w = self.weights(x.shape[1])\n\n return np.dot(x, w), np.tile(w.T, (x.shape[1], 1))\n\n @property\n def num_params(self):\n return 2 if self.theta3 is None else 3\n\n @staticmethod\n def init_params():\n return np.array([1., 5.])\n\n \nclass ExpAlmonWeights(WeightMethod):\n def __init__(self, theta1, theta2):\n self.theta1 = theta1\n self.theta2 = theta2\n\n def weights(self, nlags):\n \"\"\"\n Exponential Almon polynomial weighting method.\n\n Parameters\n ----------\n nlags : int\n Number of lag terms in projection matrix.\n \n Returns\n -------\n array : numpy.array\n Polynomial weights.\n \"\"\"\n ilag = np.arange(1, nlags + 1)\n z = np.exp(self.theta1 * ilag + self.theta2 * ilag ** 2)\n return z / sum(z)\n\n def x_weighted(self, x, params):\n self.theta1, self.theta2 = params\n\n w = self.weights(x.shape[1])\n\n return np.dot(x, w), np.tile(w.T, (x.shape[1], 1))\n\n @property\n def num_params(self):\n return 2\n\n @staticmethod\n def init_params():\n return np.array([-1., 0.])\n\n \nclass HyperbolicWeights(WeightMethod):\n def __init__(self, theta):\n self.theta = theta\n\n def weights(self, nlags):\n \"\"\"\n Hyperbolic (gamma) polynomial weighting method.\n\n Parameters\n ----------\n nlags : int\n Number of lag terms in projection matrix.\n \n Returns\n -------\n array : numpy.array\n Polynomial weights.\n \"\"\"\n u = np.arange(1, nlags + 1)\n g = gamma(u + self.theta) / (gamma(u + 1) * gamma(self.theta))\n return g / sum(g)\n\n def x_weighted(self, x, param):\n self.theta = param\n\n w = self.weights(x.shape[1])\n\n return np.dot(x, w), np.tile(w.T, (x.shape[1], 1))\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def init_params():\n return np.array([.1])\n\n \ndef jacobian_wx(x, params, weight_method):\n \"\"\"\n Compute Jacobian of weighted exognenous variable.\n \n Parameters:\n x : pandas.DataFrame\n Exogenous variable lagged projection matrix.\n params : float or numpy.array\n Polynomial weighting parameter(s).\n weight_method : midaspy.iolib.WeightMethod\n Polynomial weighting instance based on spciefied method.\n \n Returns\n -------\n jacobian : numpy.ndarray\n Jacobian matrix\n \"\"\"\n eps = 1e-6\n jt = []\n for i, p in enumerate(params):\n dp = np.concatenate([params[0:i], [p + eps / 2], params[i + 1:]])\n dm = np.concatenate([params[0:i], [p - eps / 2], params[i + 1:]])\n jtp, _ = weight_method.x_weighted(x, dp)\n jtm, _ = weight_method.x_weighted(x, dm)\n jt.append((jtp - jtm) / eps)\n\n return np.column_stack(jt)\n \n\ndef ssr(a, x, y, yl, weight_methods):\n \"\"\"\n Compute errors of the MIDAS equation\n \n Parameters\n ----------\n a : numpy.array\n Regression coefficients and weighting parameters.\n x : dictionary of pandas.DataFrames\n All exogenous variables' higher-to-lower frequency projection matrices.\n y : pandas.DataFrame\n training set of endogenous target data.\n yl : pandas.DataFrame\n Autoregressive distributed endogenous lag terms.\n weight_methods : dictionary of strings\n Specified polynomial weighting method for each exogenous projection matrix.\n\n Returns\n -------\n error : numpy.array\n Error values for each predicted value relative to actual value.\n \"\"\"\n exog_vars = list(x.keys())\n alpha, betas, thetas, xws, num_exog = a[0], {}, {}, {}, len(exog_vars)\n error = y.values - alpha\n for var, i in zip(exog_vars, range(1, num_exog + 1)):\n betas[var] = a[i]\n num_params = [polynomial_weights(poly).num_params for poly in weight_methods.values()]\n thetas_order = np.cumsum(num_params) - num_params + num_exog + 1\n for var, i in zip(exog_vars, thetas_order):\n thetas[var] = a[i:i + polynomial_weights(weight_methods[var]).num_params]\n for var in exog_vars:\n weight_method = polynomial_weights(weight_methods[var])\n xw, w = weight_method.x_weighted(x[var], thetas[var])\n xws[var] = xw.reshape((len(xw), 1))\n for var in exog_vars:\n error -= betas[var] * xws[var]\n if yl is not None:\n error = error - np.dot(yl, a[-1 * yl.shape[1]:].reshape(a[-1 * yl.shape[1]:].shape[0], 1 if yl.shape[1] is not None else None))\n return error.reshape((len(error),))\n\ndef jacobian(a, x, y, yl, weight_methods):\n \"\"\"\n Compute Jacobian of the MIDAS equation\n\n Parameters\n ----------\n a : numpy.array\n Regression coefficients and weighting parameters.\n x : dictionary of pandas.DataFrames\n All exogenous variables' higher-to-lower frequency projection matrices.\n y : pandas.DataFrame\n training set of endogenous target data.\n yl : pandas.DataFrame\n Autoregressive distributed endogenous lag terms.\n weight_methods : dictionary of strings\n Specified polynomial weighting method for each exogenous projection matrix.\n\n Returns\n -------\n jac_e : numpy.ndarray\n Jacobian matrix.\n \"\"\"\n exog_vars = list(x.keys())\n alpha, betas, thetas, xws, jwxs, num_exog = a[0], {}, {}, {}, {}, len(exog_vars)\n for var, i in zip(exog_vars, range(1, num_exog + 1)):\n betas[var] = a[i]\n num_params = [polynomial_weights(poly).num_params for poly in weight_methods.values()]\n thetas_order = np.cumsum(num_params) - num_params + num_exog + 1\n for var, i in zip(exog_vars, thetas_order):\n thetas[var] = a[i:i + polynomial_weights(weight_methods[var]).num_params]\n for var in exog_vars:\n weight_method = polynomial_weights(weight_methods[var])\n xw, w = weight_method.x_weighted(x[var], thetas[var])\n xws[var], jwxs[var] = xw.reshape((len(xw), 1)), jacobian_wx(x[var], thetas[var], polynomial_weights(weight_methods[var]))\n jac_e = [np.ones((len(y), 1))]\n for var in exog_vars:\n jac_e.append(xws[var])\n for var in exog_vars:\n jac_e.append(betas[var] * jwxs[var])\n if yl is not None:\n jac_e.append(yl)\n jac_e = -1 * np.concatenate(jac_e, axis = 1)\n return jac_e\n\ndef low_freq_projection(x, xlag, horizon, target_dates):\n \"\"\"\n Project high frequency variable onto lower frequency of regression\n target through a lagged matrix\n \n Parameters\n ----------\n x : pandas.Series\n Exogenous variable data with datetimes index.\n xlag : intereger\n Number of lagged observations to include in the projection matrix\n horizon : integer\n Number of high-frequency observations prior to each target date.\n For example, with horizon of 1 and target date of 2012-12-31, the\n first lagged term in the projection matrix row corresponding to this\n date will be 2012-12-30, then 2012-12-29, and so on until there are\n xlag terms in the projection matrix.\n target_dates : pandas.core.indexes.datetimes.DatetimeIndex\n Low-frequency target dates. Used to select only the rows of the\n projection matrix which correspond to date of target values.\n \n Returns\n -------\n projection_matrix : pandas.DataFrame\n Lagged projection matrix of x containing xlag terms.\n \"\"\"\n projection_matrix = pd.DataFrame()\n for lag in range(horizon, xlag + horizon):\n projection_matrix = pd.concat([projection_matrix, x.shift(lag).rename(x.name+' t-{}'.format(lag))], axis=1)\n projection_matrix = projection_matrix.loc[target_dates]\n return projection_matrix\n\ndef nested_dict_iter(nested):\n \"\"\"\n Yield nested dictionary iterator from dictionary of dictionaries.\n \n Author: Jonathan Scott Enderle\n Source: https://stackoverflow.com/questions/10756427/\n \n Parameter\n nested : dictionary\n Dictionary of dictionaries.\n \n Returns\n -------\n generator\n Iterable object containing nested key and value pairs.\n \"\"\"\n for key, value in nested.items():\n if isinstance(value, abc.Mapping):\n yield from nested_dict_iter(value)\n else:\n yield key, value","repo_name":"Yoseph-Zuskin/midaspy","sub_path":"midaspy/iolib.py","file_name":"iolib.py","file_ext":"py","file_size_in_byte":10273,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"15801298374","text":"import sys\nimport os\nimport cv2\nimport numpy as np\nfrom time import sleep\nfrom time import time\nfrom pyzbar import pyzbar\n#########################################################\n#------- Grab frames from webcam -----------#\n#########################################################\n\n\nsleeptime = 0.001\nnumframes = 10*30\nfolder = '3D'\ncap = cv2.VideoCapture(0)\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\ndef QR_read(image):\n for barcode in barcodes:\n\t # extract the bounding box location of the barcode and draw the\n\t # bounding box surrounding the barcode on the image\n\t (x, y, w, h) = barcode.rect\n\t cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n \n\t # the barcode data is a bytes object so if we want to draw it on\n\t # our output image we need to convert it to a string first\n\t barcodeData = barcode.data.decode(\"utf-8\")\n\t barcodeType = barcode.type\n \n\t # draw the barcode data and barcode type on the image\n\t text = \"{} ({})\".format(barcodeData, barcodeType)\n\t cv2.putText(image, text, (x, y - 6), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t 0.5, (0, 255, 0), 2)\n \n\t # print the barcode type and data to the terminal\n\t print(\"[INFO] Found {} barcode: {}\".format(barcodeType, barcodeData))\n\n\niii = 0\noldtime = time()\nif __name__ == '__main__':\n success, frame = cap.read()\n if not success:\n print('Failed to capture video')\n sys.exit(1)\n while cap.isOpened(): \n success, frame = cap.read()\n if not success:\n break\n barcodes = pyzbar.decode(frame)\n QR_read(frame)\n cv2.imshow('Frames', frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\ncap.release()\n#out.release()\ncv2.destroyAllWindows()\n\n\n\n","repo_name":"garethnisbet/T-BOTS","sub_path":"Python/Development/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"72898454274","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as anim\nimport numpy as np\n\nFPS = 30\nELEMENTS = 40\n\nnp.random.seed(123)\ny = np.random.randint(0, 1000, size=(ELEMENTS,))\nx = np.array([i for i in range(y.size)])\n\nfig = plt.figure(figsize=(y.size, y.size))\nbars = plt.bar(x, y)\nstep = 0\n\n\ndef update(sort_step):\n global step\n for rect, height in zip(bars, sort_step):\n rect.set_height(height)\n print(f'Step {step}: {sort_step}')\n step += 1\n\n\ndef create_anim(sortmethod):\n global step\n\n sort = np.array(list(sortmethod(y.copy())))\n print(f\"\\nSort {sortmethod.__name__} is done\")\n\n step = 0\n\n animation = anim.FuncAnimation(fig, update, frames=sort, repeat=False)\n animation.save(f'{sortmethod.__name__}.mp4', writer=anim.FFMpegWriter(fps=FPS))\n\n\n# O(n^2), O(n) sometimes if break\ndef sort_bubble(y) -> object:\n yield y.copy()\n for i in range(y.size):\n swapped = True\n for j in range(y.size - 1):\n if y[j] > y[j + 1]:\n y[j], y[j + 1] = y[j + 1], y[j]\n swapped = False\n yield y.copy()\n\n if swapped: break\n\n\n# O(n^1.5)\ndef sort_selection(y) -> object:\n yield y.copy()\n for i in range(y.size):\n minval = i\n for j in range(i + 1, y.size):\n if y[j] < y[minval]: minval = j\n y[i], y[minval] = y[minval], y[i]\n yield y.copy()\n\n\n# O(n^1.40)\ndef sort_insertion(y) -> object:\n yield y.copy()\n for i in range(y.size - 1):\n if y[i] > y[i + 1]:\n y[i], y[i + 1] = y[i + 1], y[i]\n j = i\n while j > 0 and y[j] < y[j - 1]:\n y[j], y[j - 1] = y[j - 1], y[j]\n j -= 1\n yield y.copy()\n\n\ndef quick_sort(y, low, high):\n if low < high:\n i = low\n for j in range(low, high):\n if y[j] <= y[high]:\n y[j], y[i] = y[i], y[j]\n i += 1\n yield y.copy()\n y[high], y[i] = y[i], y[high]\n\n for j in quick_sort(y, low, i - 1): yield j\n\n for j in quick_sort(y, i + 1, high): yield j\n\n\n# TODO: I must be outputting something wrong since anim takes more than insertion\n# Worst O(n^2) best O(nLog(n))\ndef sort_quick(y) -> object:\n yield y.copy()\n for i in quick_sort(y, 0, y.size - 1): yield i\n yield y.copy()\n\n\n# O(nlog(n))\ndef sort_merge(y) -> object:\n if len(y) < 2: return\n yield y.copy()\n\n mid = len(y) // 2\n left, right = y[:mid].copy(), y[mid:].copy()\n\n for i in sort_merge(left):\n if len(i) == len(y):\n yield i\n break\n\n for i in sort_merge(right):\n if len(i) == len(y):\n yield i\n break\n\n i = j = k = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n y[k] = left[i]\n i += 1\n else:\n y[k] = right[j]\n j += 1\n k += 1\n yield y.copy()\n\n while i < len(left):\n y[k] = left[i]\n i += 1\n k += 1\n yield y.copy()\n\n while j < len(right):\n y[k] = right[j]\n j += 1\n k += 1\n yield y.copy()\n\n\ndef get_int(max):\n import random\n from time import time\n\n random.seed(time())\n return random.randint(0, max)\n\n\ndef shuffle_for(y) -> object:\n yield y.copy()\n for i in reversed(range(y.size - 1)):\n j = get_int(i)\n y[i], y[j] = y[j], y[i]\n yield y.copy()\n\n\n# TODO: Fix probability shuffles\ndef shuffle_probability(y, val):\n yield y.copy()\n for i in range(y.size):\n j, accumulator = 0, 0\n while accumulator < val:\n accumulator += y[j]\n j = (j + 1) % y.size\n y[i], y[j] = y[j], y[i]\n yield y.copy()\n\n\ndef shuffle_proportional(y) -> object:\n val = get_int((y.size - 1) * (y.sum() - 1))\n return shuffle_probability(y, val)\n\n\ndef shuffle_inv_proportionality(y) -> object:\n val = get_int(y.sum() - 1)\n return shuffle_probability(y, val)\n\n\ncreate_anim(sort_bubble)\ncreate_anim(sort_selection)\ncreate_anim(sort_insertion)\ncreate_anim(sort_quick)\ncreate_anim(sort_merge)\n\ny = list(sort_merge(y)).pop()\n\ncreate_anim(shuffle_for)\ncreate_anim(shuffle_proportional)\ncreate_anim(shuffle_inv_proportionality)\n","repo_name":"Ronterox/ProgNeeds","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41483018828","text":"# test basic creation of buckets with objects\nimport os, sys\n\nsys.path.append(os.path.abspath(os.path.join(__file__, \"../../../..\")))\nfrom v2.lib.resource_op import Config\nimport v2.lib.resource_op as s3lib\nfrom v2.lib.s3.auth import Auth\nimport v2.utils.log as log\nimport v2.utils.utils as utils\nfrom v2.utils.utils import HttpResponseParser\nimport traceback\nimport argparse\nimport yaml\nimport v2.lib.manage_data as manage_data\nfrom v2.lib.exceptions import TestExecError\nfrom v2.utils.test_desc import AddTestInfo\nfrom v2.lib.s3.write_io_info import IOInfoInitialize, BasicIOInfoStructure\nimport time\nimport simplejson\n\nTEST_DATA_PATH = None\n\n\ndef test_exec(config):\n test_info = AddTestInfo('create m buckets with n objects')\n try:\n test_info.started_info()\n # get user\n with open('user_details') as fout:\n all_users_info = simplejson.load(fout)\n for each_user in all_users_info:\n # authenticate\n auth = Auth(each_user)\n rgw_conn = auth.do_auth_using_client()\n rgw = auth.do_auth()\n bucket_list = []\n buckets = rgw_conn.list_buckets()\n log.info('buckets are %s' % buckets)\n for each_bucket in buckets['Buckets']:\n bucket_list.append(each_bucket['Name'])\n for bucket_name in bucket_list:\n # create 'bucket' resource object\n bucket = rgw.Bucket(bucket_name)\n log.info('In bucket: %s' % bucket_name)\n if config.test_ops['create_object'] is True:\n # uploading data\n log.info('s3 objects to create: %s' % config.objects_count)\n for oc in range(config.objects_count):\n s3_object_name = utils.gen_s3_object_name(bucket_name, oc)\n log.info('s3 object name: %s' % s3_object_name)\n s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)\n log.info('s3 object path: %s' % s3_object_path)\n s3_object_size = utils.get_file_size(config.objects_size_range['min'],\n config.objects_size_range['max'])\n data_info = manage_data.io_generator(s3_object_path, s3_object_size)\n if data_info is False:\n TestExecError(\"data creation failed\")\n log.info('uploading s3 object: %s' % s3_object_path)\n upload_info = dict({'access_key': each_user['access_key']}, **data_info)\n # object_uploaded_status = bucket.upload_file(s3_object_path, s3_object_name)\n object_uploaded_status = s3lib.resource_op({'obj': bucket,\n 'resource': 'upload_file',\n 'args': [s3_object_path, s3_object_name],\n 'extra_info': upload_info})\n if object_uploaded_status is False:\n raise TestExecError(\"Resource execution failed: object upload failed\")\n if object_uploaded_status is None:\n log.info('object uploaded')\n if config.test_ops['download_object'] is True:\n log.info('trying to download object: %s' % s3_object_name)\n s3_object_download_name = s3_object_name + \".\" + \"download\"\n s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name)\n log.info('s3_object_download_path: %s' % s3_object_download_path)\n log.info('downloading to filename: %s' % s3_object_download_name)\n # object_downloaded_status = bucket.download_file(s3_object_path, s3_object_name)\n object_downloaded_status = s3lib.resource_op({'obj': bucket,\n 'resource': 'download_file',\n 'args': [s3_object_name,\n s3_object_download_path],\n })\n if object_downloaded_status is False:\n raise TestExecError(\"Resource execution failed: object download failed\")\n if object_downloaded_status is None:\n log.info('object downloaded')\n if config.test_ops['delete_bucket_object'] is True:\n log.info('listing all objects in bucket: %s' % bucket.name)\n # objects = s3_ops.resource_op(bucket, 'objects', None)\n objects = s3lib.resource_op({'obj': bucket,\n 'resource': 'objects',\n 'args': None})\n log.info('objects :%s' % objects)\n # all_objects = s3_ops.resource_op(objects, 'all')\n all_objects = s3lib.resource_op({'obj': objects,\n 'resource': 'all',\n 'args': None})\n log.info('all objects: %s' % all_objects)\n for obj in all_objects:\n log.info('object_name: %s' % obj.key)\n log.info('deleting all objects in bucket')\n # objects_deleted = s3_ops.resource_op(objects, 'delete')\n objects_deleted = s3lib.resource_op({'obj': objects,\n 'resource': 'delete',\n 'args': None})\n log.info('objects_deleted: %s' % objects_deleted)\n if objects_deleted is False:\n raise TestExecError('Resource execution failed: Object deletion failed')\n if objects_deleted is not None:\n response = HttpResponseParser(objects_deleted[0])\n if response.status_code == 200:\n log.info('objects deleted ')\n else:\n raise TestExecError(\"objects deletion failed\")\n else:\n raise TestExecError(\"objects deletion failed\")\n # wait for object delete info to sync\n time.sleep(60)\n log.info('deleting bucket: %s' % bucket.name)\n # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete')\n bucket_deleted_status = s3lib.resource_op({'obj': bucket,\n 'resource': 'delete',\n 'args': None})\n log.info('bucket_deleted_status: %s' % bucket_deleted_status)\n if bucket_deleted_status is not None:\n response = HttpResponseParser(bucket_deleted_status)\n if response.status_code == 204:\n log.info('bucket deleted ')\n else:\n raise TestExecError(\"bucket deletion failed\")\n else:\n raise TestExecError(\"bucket deletion failed\")\n test_info.success_status('test passed')\n sys.exit(0)\n except Exception as e:\n log.info(e)\n log.info(traceback.format_exc())\n test_info.failed_status('test failed')\n sys.exit(1)\n except TestExecError as e:\n log.info(e)\n log.info(traceback.format_exc())\n test_info.failed_status('test failed')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n project_dir = os.path.abspath(os.path.join(__file__, \"../../..\"))\n test_data_dir = 'test_data'\n TEST_DATA_PATH = (os.path.join(project_dir, test_data_dir))\n log.info('TEST_DATA_PATH: %s' % TEST_DATA_PATH)\n if not os.path.exists(TEST_DATA_PATH):\n log.info('test data dir not exists, creating.. ')\n os.makedirs(TEST_DATA_PATH)\n parser = argparse.ArgumentParser(description='RGW S3 Automation')\n parser.add_argument('-c', dest=\"config\",\n help='RGW Test yaml configuration')\n args = parser.parse_args()\n yaml_file = args.config\n config = Config()\n config.max_objects = None\n if yaml_file is None:\n config.objects_count = 2\n config.objects_size_range = {'min': 10, 'max': 50}\n else:\n with open(yaml_file, 'r') as f:\n doc = yaml.load(f)\n config.objects_count = doc['config']['objects_count']\n config.objects_size_range = {'min': doc['config']['objects_size_range']['min'],\n 'max': doc['config']['objects_size_range']['max']}\n config.test_ops = doc['config']['test_ops']\n log.info('objects_count: %s\\n'\n 'objects_size_range: %s\\n'\n % (config.objects_count, config.objects_size_range))\n log.info('test_ops: %s' % config.test_ops)\n test_exec(config)\n","repo_name":"sunilangadi2/ceph-qe-scripts","sub_path":"rgw/v2/tests/multisite/test_Mbuckets_with_Nobjects.py","file_name":"test_Mbuckets_with_Nobjects.py","file_ext":"py","file_size_in_byte":9772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"1718763573","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom urlparse import urlparse\nimport re\nimport json\n\nfrom chromote import Chromote\n\nSTATUS_SELECTOR = \".player-controls .spoticon-pause-16\"\nARTIST_SELECTOR = \".track-info .track-info__artists a\"\nTRACK_SELECTOR = \".track-info .track-info__name a\"\n\nclass Py3status(object):\n def __tab_eval(self, tab, code):\n res = json.loads(tab.evaluate(code))\n\n if res[\"result\"].has_key(\"exceptionDetails\"):\n return \"\"\n\n return res[\"result\"][\"result\"][\"value\"]\n\n def muzyka(self):\n chrome = Chromote()\n\n for tab in chrome.tabs:\n u = urlparse(tab.url)\n\n if re.match(r\"[^.]*.spotify.com\", u.netloc):\n if self.__tab_eval(tab, 'document.querySelector(\"' + STATUS_SELECTOR + '\").getAttribute(\"title\")') == \"Pause\":\n artist_name = self.__tab_eval(tab, 'document.querySelector(\"' + ARTIST_SELECTOR + '\").innerHTML')\n track_name = self.__tab_eval(tab, 'document.querySelector(\"' + TRACK_SELECTOR + '\").innerHTML')\n\n return {\n \"full_text\": u\"♪ \" + artist_name + \" - \" + track_name,\n \"cached_until\": 1,\n }\n\n return {\n \"full_text\": \"\",\n \"cached_until\": 1,\n }\n\nif __name__ == \"__main__\":\n from py3status.module_test import module_test\n\n module_test(Py3status)\n","repo_name":"davirtavares/muzyka","sub_path":"muzyka.py","file_name":"muzyka.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41856818540","text":"import xarray as xr\nfrom ..utils import append_attr\nfrom tempfile import gettempdir\n\n\ndef lon_180W_180E(ds, lon_name=\"lon\"):\n \"\"\"\n Regrid the data to [-180 : 180] from [0 : 360]\n \"\"\"\n from numpy import isclose\n\n lon = ds[lon_name].values\n lon180 = (lon - 180) % 360 - 180\n if isclose(lon, lon180).all():\n return ds\n ds = append_attr(ds, \"regridded to [-180 : 180] from [0 : 360]\")\n return ds.assign_coords(**{lon_name: lon180}).sortby(lon_name)\n\n\ndef lon_0E_360E(ds, lon_name=\"lon\"):\n \"\"\"\n Regrid the data to [0 : 360] from [-180 : 180]\n \"\"\"\n from numpy import isclose\n\n lon = ds[lon_name].values\n lon360 = lon % 360\n if isclose(lon, lon360).all():\n return ds\n ds = ds.assign_coords(**{lon_name: lon360}).sortby(lon_name)\n ds = append_attr(ds, \"regridded to [0 : 360] from [-180 : 180]\")\n return ds\n\n\ndef coord_05_offset(ds, center=0.5, coord_name=\"lon\"):\n \"\"\"\n Interpolate data to grid centers.\n Only works for 1deg data\n\n Parameters\n ----------\n ds: xr.Dataset\n the dataset with a coordinate variable variable\n center: float\n the desired center point of the grid points between 0 - 1\n coord_name: str [lon]\n the name of the coordinate\n\n Returns\n -------\n xr.Dataset: interpolated onto the new grid with the new\n coord being the old coord + center\n \"\"\"\n\n def has_coords(ds, checklist=[\"time\", \"lat\", \"lon\"]):\n \"\"\"\n Check that data has coordinates\n \"\"\"\n matches = {key: (key in ds.coords) for key in checklist}\n if all(matches.values()):\n return 1\n else:\n return 0\n\n center = center - (center // 1)\n if has_coords(ds):\n coord = ds[coord_name].values\n mod = coord - (coord // 1)\n # use the modulus to determine if grid centers are correct\n if any(mod != center):\n ds = ds.interp({coord_name: coord + center})\n ds = ds.sel(lat=slice(-90, 90))\n\n return ds\n\n\ndef regrid(\n ds,\n weights_path=gettempdir(),\n res=1,\n like=None,\n mask=None,\n keep_attrs=True,\n verbose=True,\n recommendation=\"raise\",\n overwrite_weights=False,\n **kwargs,\n):\n \"\"\"\n Regrid data using xesmf\n\n Weights can be reused making this method extremely fast for regridding large amounts\n of data. Weights are automatically saved to disk.\n\n Parameters\n ----------\n ds: xr.Dataset\n weights_path: path-like str\n Can be one of three options\n 1. path to the directory where the weights will be saved (default names)\n 2. path to a file - will be created if it does not exist\n The default file name is the a similar format to the xesmf default\n name + a hash based on the lat and lon values of both datasets.\n The default directory is a temporary directory - warning - this will\n persist unless deleted. recommended that you change the dir.\n res: float\n resolution of the grid to interpolate to\n like: xr.Dataset\n dataset to use as a template for the new grid - ignores res if given\n **kwargs:\n passed to xesmf.Regridder. The method can be overwridden by passing\n method=''. extrap_method is set to nearest_s2d by default\n \"\"\"\n\n def get_latlon_str(ds):\n import numpy as np\n\n coords_list = [np.array(ds.coords[k]) for k in (\"lat\", \"lon\")]\n coords = np.concatenate(coords_list)\n coords_str = str(coords)\n return coords_str\n\n def make_hash(string, hash_length=6):\n from hashlib import sha1\n\n hash = sha1(string.encode(\"UTF-8\")).hexdigest()[:hash_length]\n return hash\n\n def make_default_filename(method, ds_in, ds_out):\n # e.g. bilinear_400x600_300x400.nc\n iy = ds_in.lat.size\n ix = ds_in.lon.size\n oy = ds_out.lat.size\n ox = ds_out.lon.size\n\n hash = make_hash(get_latlon_str(ds_in) + get_latlon_str(ds_out))\n filename = f\"xesmf-weights_{method}_in{iy}x{ix}_out{oy}x{ox}_{hash}.nc\"\n return filename\n\n def weights_to_netcdf(regridder, filename):\n \"\"\"Save weights to disk as a netCDF file.\"\"\"\n w = regridder.weights.data\n dim = \"n_s\"\n ds = xr.Dataset(\n {\n \"S\": (dim, w.data),\n \"col\": (dim, w.coords[1, :] + 1),\n \"row\": (dim, w.coords[0, :] + 1),\n }\n )\n encoding = {k: {\"zlib\": True, \"complevel\": 1} for k in ds.data_vars}\n ds.to_netcdf(filename, encoding=encoding)\n\n def vprint(*args, **kwargs):\n if verbose:\n print(*args, **kwargs)\n\n import os\n import xesmf as xe\n import xarray as xr\n from ..files.utils import is_path_exists_or_creatable\n from warnings import filterwarnings\n\n filterwarnings(\"ignore\", category=UserWarning, module=\"xesmf\")\n\n assert \"lat\" in ds.coords, \"Data must have lat coordinate\"\n assert \"lon\" in ds.coords, \"Data must have lon coordinate\"\n\n if (like is None) and (mask is None):\n like = xe.util.grid_global(res, res, cf=True)\n elif mask is not None:\n assert isinstance(mask, xr.DataArray), \"mask must be an xr.DataArray\"\n like = mask.astype(int).to_dataset(name=\"mask\")\n assert \"mask\" in ds.data_vars, \"Data must have mask variable if `mask` is given\"\n elif like is not None and mask is not None:\n raise ValueError(\"`like` and `mask` cannot both be given\")\n\n _is_interp_best(ds.lat, ds.lon, like.lat, like.lon, recommendation)\n\n method = kwargs.pop(\"method\", \"bilinear\")\n\n m = \"bilinear and conservative interpolation\"\n vprint(f\"xesmf will be used for {m}\")\n\n # THIS SECTION IS TO DEAL WITH SAVING THE WEIGHTS\n # if the given path is not a file, then create a default filename\n path_valid = is_path_exists_or_creatable(weights_path)\n file_exist = os.path.isfile(weights_path)\n if not path_valid:\n raise ValueError(\n f\"{weights_path} is not path-like. Must be \"\n \"a creatable or existing file or directory\"\n )\n elif path_valid and not file_exist:\n default_sname = make_default_filename(method, ds, like)\n weights_path = os.path.join(weights_path, default_sname)\n file_exist = os.path.isfile(weights_path)\n else:\n # path is valid and file exists\n # this is just so that I know I'm not missing any options\n pass\n weights_path = os.path.abspath(os.path.expanduser(weights_path))\n\n if file_exist and overwrite_weights:\n vprint(f\"Overwriting weights file: {weights_path}\")\n os.remove(weights_path)\n file_exist = False\n kwargs[\"weights\"] = None\n elif file_exist:\n vprint(f\"Loading weights from file {weights_path}\")\n kwargs[\"weights\"] = xr.open_dataset(weights_path)\n else:\n vprint(f\"Creating weights (could take some time) and saving to {weights_path}\")\n kwargs[\"weights\"] = None\n\n props = dict(extrap_method=\"nearest_s2d\")\n props.update(**kwargs)\n\n try:\n regridder = xe.Regridder(ds, like, method, **props)\n except ValueError as e:\n raise e(\n \"invalid entry in coordinates array. \"\n \"Weights file may not match the desired resolution\"\n )\n\n if not file_exist:\n weights_to_netcdf(regridder, weights_path)\n\n if not file_exist:\n weights_to_netcdf(regridder, weights_path)\n\n interpolated = regridder(ds)\n new_attrs = interpolated.attrs\n\n if keep_attrs:\n interpolated.attrs = {}\n interpolated = interpolated.assign_attrs(**ds.attrs)\n if isinstance(ds, xr.Dataset):\n for k in interpolated.data_vars:\n interpolated[k] = interpolated[k].assign_attrs(**ds[k].attrs)\n\n interpolated = interpolated.assign_attrs(regrid_weights=weights_path, **new_attrs)\n interpolated = append_attr(interpolated, f\"regridded with xesmf using {method}\")\n\n return interpolated\n\n\ndef interp(ds, res=1, like=None, method=\"linear\", recommendation=\"warn\", **kwargs):\n \"\"\"\n Interpolate and fill the longitude gap in a dataset\n\n Parameters\n ----------\n ds: xr.Dataset\n lon_name: str\n name of the longitude coordinate\n roll_by: int\n number of grid points to roll the data by - must be more than the gap\n if interpolating from low to high resolution this will be a problem\n **kwargs:\n passed to xr.interp\n \"\"\"\n if (\"lat\" in kwargs) or (\"lon\" in kwargs):\n like = xr.DataArray(\n dims=[\"lat\", \"lon\"],\n coords={\"lat\": kwargs.pop(\"lat\"), \"lon\": kwargs.pop(\"lon\")},\n )\n elif like is None:\n like = _make_like_array(res)\n\n assert (\"lat\" in like.coords) and (\n \"lon\" in like.coords\n ), \"'like' must have lat and lon coordinates\"\n\n _is_interp_best(ds.lat, ds.lon, like.lat, like.lon, recommendation)\n\n props = dict(**kwargs)\n props.update(method=method)\n roll_by = int(like.lon.size // 3)\n interpolated = (\n ds.interp_like(like, **props)\n .roll(**{\"lon\": roll_by}, roll_coords=False)\n .interpolate_na(\"lon\", limit=int(roll_by / 2))\n .roll(**{\"lon\": -roll_by}, roll_coords=False)\n )\n\n interpolated = append_attr(\n interpolated,\n f\"interpolated to {res}deg resolution using {method} interpolation\",\n )\n\n return interpolated\n\n\ndef coarsen(ds, res_out=1.0):\n \"\"\"\n Coarsen a dataset to a given resolution\n Will return an error if coarsening is not suitable\n\n Parameters\n ----------\n ds: xr.Dataset\n res_out: float\n desired resolution\n\n Returns\n -------\n xr.Dataset\n \"\"\"\n\n from ..utils import append_attr\n import numpy as np\n\n res_in = np.around(float(ds.lat.diff(\"lat\").mean()), 4)\n res_out = np.around(res_out, 4)\n ratio = res_out / res_in\n if abs(ratio - np.round(ratio)) > 0.05:\n raise ValueError(\n f\"The input resolution ({res_in}) and \"\n f\"output resolution ({res_out}) are not \"\n \"divisible to an intiger\"\n )\n coarsen_step = np.int32(np.round(ratio))\n\n coord_func = lambda x, **kwargs: np.round(np.mean(x, **kwargs), 3)\n\n ds = append_attr(\n ds, f\"coarsened resolution from {res_in:.3g}deg to {res_out:.3g}deg\"\n )\n coarse = ds.coarsen(lat=coarsen_step, lon=coarsen_step, coord_func=coord_func)\n\n return coarse\n\n\ndef _create_time_bnds(time_left):\n import numpy as np\n\n t = time_left.values\n dt = np.nanmedian(np.diff(t))\n t = np.concatenate([t, [t[-1] + dt]])\n\n time_bnds = xr.DataArray(\n np.c_[t[:-1], t[1:]],\n dims=[time_left.name, \"bnds\"],\n coords={time_left.name: time_left},\n attrs={\n \"description\": (\n \"time bands. note that time dimension \" \"is left aligned to the band\"\n )\n },\n )\n return time_bnds\n\n\ndef resample(ds, func=\"mean\", **kwargs):\n \"\"\"\n Resample time resolution and add a time_bnds coordinate\n\n Parameters\n ----------\n ds: xr.Dataset\n time_res: str\n time resolution in the format of 'D'\n where int is the number of days\n func: str\n function to apply to the data\n\n Returns\n -------\n ds: xr.Dataset\n \"\"\"\n from ..utils import append_attr\n\n ds_res = ds.resample(**kwargs)\n ds_out = getattr(ds_res, func)(keep_attrs=True)\n dim = list(kwargs)[0]\n res = kwargs[dim]\n if isinstance(ds, xr.DataArray):\n ds_out = append_attr(ds_out, f\"resampled {dim} to {res} using `{func}`\")\n elif isinstance(ds, xr.Dataset):\n ds_out[\"time_bands\"] = _create_time_bnds(ds_out.time)\n ds_out = ds_out.append_attrs(\n history=f\"resampled {dim} to {res} using `{func}` and added time_bands\"\n )\n return ds_out\n\n\ndef _is_interp_best(iy, ix, oy, ox, recommendation=\"warn\"):\n from warnings import warn\n\n idx = ix.diff(\"lon\", 1).median().values\n idy = iy.diff(\"lat\", 1).median().values\n odx = ox.diff(\"lon\", 1).median().values\n ody = oy.diff(\"lat\", 1).median().values\n ratio_x = odx / idx\n ratio_y = ody / idy\n if (ratio_x > 2) | (ratio_y > 2):\n message = (\n \"The output grid is less than half the resolution of the input grid. \"\n \"Interpolation may not be the best approach. \"\n f\"Consider using da.coarsen(lat={ratio_y:.0f}, lon={ratio_x:.0f}).mean()\"\n )\n if recommendation == \"warn\":\n warn(message)\n elif recommendation == \"raise\":\n raise ValueError(message)\n elif recommendation == \"ignore\":\n pass\n\n\ndef _make_like_array(resolution):\n import xarray as xr\n import numpy as np\n\n r = resolution\n grids = xr.DataArray(\n dims=[\"lat\", \"lon\"],\n coords={\n \"lat\": np.arange(-90 + r / 2, 90, r),\n \"lon\": np.arange(-180 + r / 2, 180, r),\n },\n )\n\n return grids\n\n\ndef estimate_grid_spacing(coord):\n \"\"\"\n Estimate the grid spacing from a coordinate\n\n Parameters\n ----------\n coord: xr.DataArray\n coordinate\n\n Returns\n -------\n float\n grid spacing\n \"\"\"\n import numpy as np\n\n delta_x = np.diff(np.array(coord))\n delta_x_u = np.unique(delta_x)\n if len(delta_x_u) > 1:\n return np.median(delta_x)\n elif len(delta_x_u) == 1:\n return delta_x_u[0]\n","repo_name":"lukegre/all_my_code","sub_path":"all_my_code/munging/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":13380,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"9668337660","text":"print(\"Saluados usuario, ingrese el numero del menu al que desea ingresar\")\nprint(\"1= suma o resta, 2= potencia, 3 = promedio, presione cualquier otro numero para salir del programa\")\nmenu=input()\nif menu == \"1\":\n print(\"Que operacion desea realizar: suma ó resta\")\n operacion = input()\n operacion = operacion.lower()\n if operacion == \"suma\":\n print(\"ingrese los valores a sumar\")\n num1= int(input(\"Ingrese el primer valor: \"))\n num2= int(input(\"Ingrese el segundo valor: \"))\n suma=num1+num2 \n print(\"El resutado de\",num1,\"+\",num2,\"=\",suma)\n elif operacion == \"resta\":\n print(\"ingrese los valores a restar\")\n num1= int(input(\"Ingrese el primer valor: \"))\n num2= int(input(\"Ingrese el segundo valor: \"))\n resta=num1-num2 \n print(\"El resutado de\",num1,\"-\",num2,\"=\",resta)\nelif menu==\"2\":\n print(\"Ingrese los valores de la base y del exponente\")\n num1= int(input(\"Ingrese el valor de la base: \"))\n num2= int(input(\"Ingrese el valor del exponente: \"))\n pot=num1**num2\n print(\"El resultado del exponente de base\",num1,\"con exponente\",num2,\"es igual a\",pot)\nelif menu==\"3\":\n print(\"Ingrese las notas a promediar\")\n num1= int(input(\"Ingrese la nota al 80%: \"))\n num2= int(input(\"Ingrese la nota al 20%: \"))\n prom = num1*0.8 + num2*0.2\n print(\"El promedio entre\",num1,\"y\",num2,\"da como resultado:\",prom)\nprint(\"Adios que tenga un buen dia\")","repo_name":"Matias-Gutierrez/practica-con-python","sub_path":"1°Semestre/clases/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23630161391","text":"from string import maketrans \r\ninp=\"ynficwlbkuomxsevzpdrjgthaq\"\r\nout=\"abcdefghijklmnopqrstuvwxyz\"\r\ntrans = maketrans(inp, out)\r\na=open('C:/Users/chetan/Desktop/A-small-attempt2.in','r')\r\nb=a.readline()\r\nd=a.readlines()\r\ni=0\r\nwhile i a new type # 一种创建一个新类\n 参数1/类名 -str ,即类的__name__属性\n 参数2/继承的父类 -tuple ,可不传,即类的__base__属性\n 参数3/方法和属性 -dict,即类的__dict__属性\n'''\nprint(Test) # \nprint(Test.__bases__) # (,)\n'''# Python 为所有类都提供了一个 bases 属性,\n通过该属性可以查看该类的所有直接父类,该属性返回所有直接父类组成的元组。\n注意是直接父类!!!\n'''\n\n# 元类的方法调用\nt = Test()\nt.function01() # -----这个是self-----\n\n\n# 用type元类创建类,等效于下面这个\nclass Test1:\n attr = 100\n __attr2 = 200\n\nprint(Test1) # \n\n'''\nTest = type('Test111', (object,), {\"attr\": 100, \"__attr2\": 200, \"function01\": func})\nprint(Test) # \n Test111 是真正的类名\n Test 是对类名的引用\n'''","repo_name":"langdawang678/Py","sub_path":"A_OOP/c19a7使用type动态定义类.py","file_name":"c19a7使用type动态定义类.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12808541433","text":"#!/usr/bin/env python\n# import os, sys\n# sys.path.append(os.path.abspath('infomap'))\n\n# from __future__ import print_function # Python 3 print function in Python 2\nfrom infomap import infomap\n\nconf = infomap.init(\"--two-level -v -N2\")\n# Add output directory (and output name) to automatically write result to file\n# conf = infomap.init(\"--two-level -v -N2 . --out-name test\")\n\nprint(\"Creating network...\")\nnetwork = infomap.Network(conf)\n\nnames = list(\"ABCDEF\")\nnetwork.addNodes(names)\n\nnetwork.addLink(0, 1)\nnetwork.addLink(0, 2)\nnetwork.addLink(0, 3)\nnetwork.addLink(1, 0)\nnetwork.addLink(1, 2)\nnetwork.addLink(2, 1)\nnetwork.addLink(2, 0)\nnetwork.addLink(3, 0)\nnetwork.addLink(3, 4)\nnetwork.addLink(3, 5)\nnetwork.addLink(4, 3)\nnetwork.addLink(4, 5)\nnetwork.addLink(5, 4)\nnetwork.addLink(5, 3)\n\nprint(\"Num links: %d\" % network.numLinks())\n\nnetwork.finalizeAndCheckNetwork()\n\ntree = infomap.HierarchicalNetwork(conf)\n\ninfomap.run(network, tree)\n\nprint(\"Found %d top modules with codelength: %f\" % (tree.numTopModules(), tree.codelength()))\n\ncommunities = {}\nclusterIndexLevel = 1 # 1, 2, ... or -1 for top, second, ... or lowest cluster level\nprint(\"Tree:\")\nfor node in tree.treeIter(clusterIndexLevel):\n\tprint(\"%d %s %f %s\" % (node.clusterIndex(), \" \" * node.depth(), node.data.flow, node.data.name))\n\tif node.isLeafNode():\n\t communities[node.originalLeafIndex] = node.clusterIndex()\n\nprint(\"Communities: %s\" % communities)\n\nprint(\"Done!\")\n","repo_name":"RapidsAtHKUST/CommunityDetectionCodes","sub_path":"NonOverlappingCodes/2009-Community-Infomap-MapEquation/examples/python/Infomap.py","file_name":"Infomap.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"61"} +{"seq_id":"14106307724","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n arr = [0] * len(prices)\n ans = 0\n minValue = prices[0]\n for i in range(1, len(prices)):\n arr[i] = max(arr[i - 1], prices[i] - minValue)\n minValue = min(minValue, prices[i])\n ans = max(ans, arr[i])\n nPrices = prices.copy()\n for i in range(1, len(prices)):\n nPrices[i] -= arr[i - 1]\n minValue = nPrices[0]\n for i in range(1, len(prices)):\n ans = max(ans, prices[i] - minValue)\n minValue = min(minValue, nPrices[i])\n return ans\n","repo_name":"Sol-cito/LeetCoding","sub_path":"best-time-to-buy-and-sell-stock-iii/best-time-to-buy-and-sell-stock-iii.py","file_name":"best-time-to-buy-and-sell-stock-iii.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36370625474","text":"import base64\nimport codecs\nimport hashlib\nimport json\nimport time\nimport random\nimport string\nfrom datetime import datetime, date\n\n\ndef transformation(name, key):\n def wrapper(func):\n MY_TRANSFORMATIONS.append((name, key, func))\n return func\n return wrapper\n\n\nMY_TRANSFORMATIONS = []\n\n\n@transformation(\"MD5\", \"md5\")\ndef md5_transform(v, i, secs):\n \"\"\"计算字符串 v 的 MD5 哈希值\"\"\"\n return hashlib.md5(v.encode()).hexdigest()\n\n\n@transformation(\"Reverse\", \"reverse\")\ndef reverse_transform(v, i, secs):\n \"\"\"翻转字符串 v\"\"\"\n return v[::-1]\n\n\n@transformation(\"Base64 Encode\", \"base64_encode\")\ndef base64_encode_transform(v, i, secs):\n \"\"\"对字符串 v 进行 Base64 编码\"\"\"\n return base64.b64encode(v.encode()).decode()\n\n\n@transformation(\"Base64 Decode\", \"base64_decode\")\ndef base64_decode_transform(v, i, secs):\n \"\"\"对字符串 v 进行 Base64 解码\"\"\"\n return base64.b64decode(v.encode()).decode()\n\n\n@transformation(\"Eval\", \"eval\")\ndef eval_transform(v, i, secs):\n \"\"\"对字符串 v 执行 eval 操作\"\"\"\n try:\n return str(eval(v))\n except Exception as e:\n return \"Error: {str(e)}\"\n\n\n@transformation(\"Time<->Timestamp\", \"time_timestamp\")\ndef time_timestamp_transform(v, i, secs):\n \"\"\"\n 实现时间字符串和时间戳之间的互转\n 如果 v 是一个合法的时间字符串,将其转换成对应的时间戳字符串\n 如果 v 是一个合法的时间戳字符串,将其转换成对应的时间字符串\n 如果 v 是空字符串,返回当前时间的字符串,格式为 %Y-%m-%d %H:%M:%S\n 否则返回空字符串\n \"\"\"\n\n if not v:\n # 如果 v 是空字符串,返回当前时间的字符串\n return datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n try:\n # 尝试将 v 解析为时间字符串\n if \".\" in v:\n dt = datetime.strptime(v, \"%Y-%m-%d %H:%M:%S.%f\")\n else:\n dt = datetime.strptime(v, \"%Y-%m-%d %H:%M:%S\")\n return str(int(time.mktime(dt.timetuple())))\n except ValueError:\n pass\n\n try:\n # 尝试将 v 解析为时间戳字符串\n timestamp = int(v)\n dt = datetime.fromtimestamp(timestamp)\n return dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return \"\"\n\n\n\n@transformation(\"Unicode -> UTF-8\", \"unicode_to_utf8\")\ndef unicode_to_utf8_transform(v, i, secs):\n \"\"\"将 Unicode 编码的字符串 v 转换成 UTF-8 编码的字符串\"\"\"\n try:\n return v.encode('utf-8').decode('unicode_escape')\n except Exception as e:\n return \"Error occurred: {str(e)}\"\n\n\n@transformation(\"UTF-8 -> Unicode\", \"utf8_to_unicode\")\ndef utf8_to_unicode_transform(v, i, secs):\n \"\"\"将 UTF-8 编码的字符串 v 转换成 Unicode 编码的字符串\"\"\"\n try:\n return v.encode('unicode_escape').decode('utf-8')\n except Exception as e:\n return \"Error occurred: {str(e)}\"\n\n@transformation(\"ReplaceChars\", \"replace_chars\")\ndef replace_chars(v, i, secs):\n # 获取所有大小写字母和数字的字符串\n chars = string.ascii_lowercase + string.ascii_uppercase\n # 生成替换规则,用字典来存储\n replace_dict = {}\n for char in chars:\n if char.islower():\n replace_char = random.choice(string.ascii_lowercase.replace(char, ''))\n else:\n replace_char = random.choice(string.ascii_uppercase.replace(char, ''))\n replace_dict[char] = replace_char\n # 生成数字替换规则,加入到替换规则字典中\n replace_dict.update({str(i): str(random.randint(1, 9)) for i in range(10)})\n # 调用translate()方法替换字符串中的字符\n return v.translate(str.maketrans(replace_dict))","repo_name":"entimm/enmulti","sub_path":"nice_transform_funcs.py","file_name":"nice_transform_funcs.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21150836751","text":"import matplotlib as mpl\n\nmpl.use(\"Agg\")\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nimport random\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n\n# POLYNOMIAL LINEAR REGRESSION \ndef polynomialLinearRegression(Xtest, Xtrain, Ytest, Ytrain, dataSet):\n \n # Formatting the dataSets for analysis\n XTrain = Xtrain[:,:].transpose()[1:,:].tolist()[0]\n XTest = Xtest[:,:].transpose()[1:,:].tolist()[0]\n YTrain = Ytrain[:,:].transpose()[1:,:].tolist()[0]\n YTest = Ytest[:,:].transpose()[1:,:].tolist()[0]\n\n # PLR doesn't need a train test split, so the individual components \n # are combined to form the original dataset (except now its pre-processed)\n X_combined = np.r_[XTrain, XTest]\n Y_combined = np.r_[YTrain, YTest]\n\n # Manually casting to int \n X_combined = np.array(X_combined, dtype='int')\n Y_combined = np.array(Y_combined, dtype='int')\n\n lin_reg = LinearRegression()\n lin_reg.fit(X_combined.reshape(-1,1), Y_combined)\n\n # Creating a polynomial regressor\n poly_reg = PolynomialFeatures(degree=3)\n # Transforming X from just X to X + its polynomial terms\n X_Comb_Poly = X_combined.reshape(-1,1)\n X_poly = poly_reg.fit_transform(X_Comb_Poly)\n\n # New linear regression fitted onaugmented X matrix and \n # original Y vector. \n lin_reg2 = LinearRegression()\n lin_reg2.fit(X_poly, Y_combined)\n\n plt.title(f'Polynomial Linear Regression for {dataSet}')\n\n # Scattering actual results\n plt.scatter(X_combined, Y_combined, color = 'red', label ='Actual')\n\n # Plotting predicted values via linear regression\n plt.plot(X_combined, \n lin_reg.predict(X_combined.reshape(-1,1)), \n color = 'blue', \n label = 'Linear')\n\n print(f'{X_poly}============')\n\n X_combined_Plot = X_combined.tolist()\n X_poly_Plot = X_poly.tolist()\n\n # Plotting predicted values via polynomial regression\n plt.plot(sorted(X_combined_Plot),\n lin_reg2.predict(sorted(X_poly_Plot)), \n # lin_reg2.predict(poly_reg.fit_transform(X_combined.reshape(-1,1))), \n color = 'green', \n label = 'Poylnomial')\n\n\n plt.legend() \n\n filename = f'{random.randint(100,999)}'\n plt.savefig(f'../QuickML/webapp/static/{filename}.jpg')\n\n x = f'../QuickML/webapp/static/{filename}.jpg'\n\n # clears the mat plot lib cache so other figures can be \n # created and saved \n plt.clf()\n\n return x","repo_name":"Vladi756/QuickML","sub_path":"sourceCode/regression/Polynomial_Linear_Regression.py","file_name":"Polynomial_Linear_Regression.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"603858168","text":"#by WarmeWindel https://github.com/WarmeWindel!\r\n\r\nfrom time import sleep\r\nimport pyautogui\r\n\r\nmessage = 0\r\ncount = 0\r\ndelay = 0\r\n\r\n\r\nmessage = input(\"Enter your message:\")\r\ncount = int(input(\"Enter how many times you want to type your message:\"))\r\ndelay = int(input(\"Enter your delay:\"))\r\n\r\nprint(\"starting\")\r\nprint(\"click where youw want to type! starting in 5seconds\")\r\nsleep(5)\r\n\r\nwhile count >= 0:\r\n pyautogui.write(message)\r\n sleep(delay)\r\n pyautogui.press('enter')\r\n count -1 \r\nelse:\r\n exit() \r\n\r\nexit() \r\n\r\n\r\n","repo_name":"WarmeWindel/spambot","sub_path":"spamer.py","file_name":"spamer.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30833160948","text":"#dependencies\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import metrics\r\n#from sklearn.cross_validation import train_test_split\r\n#from sklearn.model_selection import cross_val_score\r\nfrom sklearn import metrics\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nlambdas = list(1/np.logspace(-30,np.log(5),num=1000,endpoint = True,base =10))\r\n\r\n#reading data from file\r\ndata = pd.read_csv(\"train.csv\")\r\nX = data['X'].tolist()\r\ny = data['y'].tolist()\r\nprint(max(y),\"maxy\")\r\nprint(min(y),\"miny\")\r\nprint(np.mean(y),\"meany\")\r\n\r\n#making X features in dataframe\r\ndata_x_features = []\r\ncolumns = ['X1','X2','X3','X4','X5','X6','X7','X8','X9']\r\n\r\n#creating data features i.e. powers of x\r\nfor i in range(len(columns)):\r\n temp = [X[j]**i for j in range(len(X))]\r\n data_x_features.append(temp)\r\n\r\n#inserting all data in dataframe\r\ndf = pd.DataFrame(columns = columns)\r\nfor i in range(len(columns)):\r\n df[columns[i]] = data_x_features[i]\r\n \r\n#converting y values to classes\r\nmean = np.mean(y)\r\ny = [1 if i >= mean else 0 for i in y]\r\ndf['y'] = y\r\ndf = df.sample(frac=1).reset_index(drop=True)\r\nX_ = np.array(df.drop(['y'],1))\r\ny_ = np.array(df['y'])\r\n\r\nprint(len(X_))\r\nprint(len(y_))\r\n#cross validation\r\nkfolds = 10\r\nsplit = int(len(X_)/kfolds)\r\n\r\n\r\nlambda_score_pair = []\r\nfor curr_lambda in lambdas:\r\n cross_val_scores=[]\r\n for i in range(kfolds):\r\n X_test = X_[split*i:split+split*i]\r\n Y_test = y_[split*i:split+split*i]\r\n X_train = np.delete(X_,[i for i in range(split*i,split+split*i)],0)\r\n Y_train = np.delete(y_,[i for i in range(split*i,split+split*i)],0)\r\n \r\n logreg = LogisticRegression(penalty = 'l1', C = curr_lambda,fit_intercept=True, max_iter=5000,\r\n solver = 'liblinear')\r\n \r\n cross_val_scores.append(logreg.fit(X_train,Y_train).score(X_test,Y_test))\r\n lambda_score_pair.append( np.mean(cross_val_scores))\r\n\r\nprint(max(lambda_score_pair),\"maxl\")\r\nmod_lambda_score_pair = [(1-i) for i in lambda_score_pair]\r\nprint(min(mod_lambda_score_pair),\"minerror\")\r\nprint(max(mod_lambda_score_pair),\"maxerror\")\r\nprint(np.mean(mod_lambda_score_pair),\"meanerror\")\r\nind = (mod_lambda_score_pair.index(min(mod_lambda_score_pair)))\r\nprint(ind)\r\nmod_lambdas = [1/i for i in lambdas]\r\n#print(mod_lambdas)\r\nplt.plot(mod_lambdas,mod_lambda_score_pair)\r\nplt.show()\r\n\r\nbest_lambda = mod_lambdas[ind]\r\nprint(best_lambda,\"best\")\r\ntest_data = pd.read_csv('test.csv')\r\nt_X = test_data['X'].tolist()\r\nt_y = test_data['y'].tolist()\r\nprint(len(test_data),\"test\")\r\n#making X features in dataframe\r\nt_data_x_features = []\r\ncolumns = ['X1','X2','X3','X4','X5','X6','X7','X8','X9']\r\n\r\n#creating data features i.e. powers of x\r\nfor i in range(len(columns)):\r\n temp = [t_X[j]**i for j in range(len(t_X))]\r\n t_data_x_features.append(temp)\r\n\r\n#inserting all data in dataframe\r\nt_df = pd.DataFrame(columns = columns)\r\nfor i in range(len(columns)):\r\n t_df[columns[i]] = t_data_x_features[i]\r\n \r\n#converting y values to classes\r\nmean = np.mean(t_y)\r\nt_y = [1 if i >= mean else 0 for i in t_y]\r\nt_df['y'] = t_y\r\nt_df = t_df.sample(frac=1).reset_index(drop=True)\r\nt_X_ = np.array(t_df.drop(['y'],1))\r\nt_y_ = np.array(t_df['y'])\r\n\r\nlogreg_t = LogisticRegression(penalty = 'l2', C = best_lambda,fit_intercept=True, max_iter=200,\r\n solver = 'liblinear')\r\nfit_t = logreg_t.fit(X_train,Y_train)\r\nprint(fit_t.score(t_X_,t_y_))\r\n\r\nres = list(fit_t.predict(t_X_))\r\nTP = 0\r\nFP = 0\r\nTN = 0\r\nFN = 0\r\n\r\nfor i in range(len(res)):\r\n if list(t_y_)[i] == 0 and res[i] == 0:\r\n TN += 1\r\n if list(t_y_)[i] == 1 and res[i] == 1:\r\n TP += 1\r\n if list(t_y_)[i] == 1 and res[i] == 0:\r\n FN += 1\r\n if list(t_y_)[i] == 0 and res[i] == 1:\r\n FP += 1\r\n\r\nprint(TP,TN,FP,FN)\r\nprint((FN+FP)/(FN+FP+TN+TP))\r\n \r\n\r\n \r\n\r\n\r\n","repo_name":"Arghyadeep/Machine-learning-UCSC","sub_path":"Homework1/hw1_ver2.py","file_name":"hw1_ver2.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37349885006","text":"import requests\n\n\ndef getTopFoodRatedOutlets(city):\n response = requests.get(\"https://jsonmock.hackerrank.com/api/food_outlets?city=Seattle&page=1\")\n result = response.json()\n total_pages = result['total_pages']\n obj = {}\n for i in range(1, total_pages + 1):\n query = {'city': city, 'page': i}\n response = requests.get(\"https://jsonmock.hackerrank.com/api/food_outlets\", params=query)\n result = response.json()\n for j in range(len(result['data'])):\n obj[result['data'][j]['name']] = result['data'][j]['user_rating']['average_rating']\n\n print(obj)\n sorted_obj = sorted(obj.items(), key=lambda x:x[1], reverse=True)\n print(sorted_obj)\n\n\ngetTopFoodRatedOutlets(\"seattle\")\n","repo_name":"akhileshwar1/algo-data-structs","sub_path":"hacker_rank/food_api.py","file_name":"food_api.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2027960244","text":"'''\nFaça um programa que peça ao usuário digitar um número inteiro,\ninforme se este número é par ou ímpar. Caso o usuário não\ndigite um número inteiro, informe que não é um número inteiro.\n'''\n#Solução\n\nnum1 = input('Digite um número: ')\nif (num1.isdigit()) :\n num1 = int(num1)\n r = num1%2\n if(r==0):\n print('O número {0} é par {1}'.format(num1, r))\n else:\n print('O número {0} é ímpar {1}'.format(num1, r))\nelse:\n print('Digite um valor válido')\n\n'''\nFaça um programa que pergunte a hora ao usuário e, \nbaseando-se no horário descrito, exiba a saudação apropriada. ex.: Bom \ndia 0-11, Boa tarde 12-17 e Boa noite 18-23\n'''\nhora = int(input('Olá Digite a Hora: '))\nminuto = int(input('Digite o Mínuto: '))\nif (hora >=0 and hora <=11) :\n print('Bom dia!!!')\nelif (hora >11 and hora<=17 and minuto <=59) :\n print('Boa tarde!!!')\nelse:\n print('Boa noite!!! ')\n'''\nFaça um programa que peça o primeiro nome do usuário. Se o nome tiver\n 4 letras ou menos escreva “Seu nome é Curto”; se tiver entre 5 e 6 letras, \nescreva “seu nome é médio”; maior que 6 escreva “Seu nome é extenso\n'''\nusuario = input('Digite o seu nome: ')\nqtCaractere = len(usuario)\nif(qtCaractere<=4):\n print('Seu nome é curto')\nelif (qtCaractere<=7):\n print('Nome médio')\nelse:\n print ('Nome extenso')\n\n","repo_name":"luizhenriquefernandes/python","sub_path":"exercicios/2exercicios.py","file_name":"2exercicios.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18844064910","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"create_list\",views.create_list,name=\"create_list\"),\n path(\"view_list/\",views.listing_page,name=\"listing_page\"),\n path(\"comments/\",views.comment_view,name=\"comment_view\"),\n path(\"watchlist\",views.watchlist_view,name=\"watchlist\"),\n path(\"cateogry\",views.cateogry,name=\"grouplist\"),\n path(\"cateogry/\",views.cateogry,name=\"grouplist\"),\n path(\"close/\",views.close,name=\"close_listing\"),\n path(\"mylist\",views.mylist,name=\"mylist\"),\n path(\"wonlist\",views.wonlist,name=\"wonlist\")\n]\n","repo_name":"mhodsaifansari/commerce","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39552126133","text":"\r\nimport time\r\nfrom numpy.random import randint\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef selection_sort( array ):\r\n counter = 0\r\n n = len( array )\r\n for i in range( n - 1 ): \r\n min_idx = i\r\n for j in range( i + 1, n ):\r\n if array[j] < array[min_idx] :\r\n min_idx = j\r\n counter += 1\r\n if min_idx != i :\r\n temp = array[i]\r\n array[i] = array[min_idx]\r\n array[min_idx] = temp\r\n\r\n return [array, counter]\r\n\r\n\r\ndef insertion_sort(array):\r\n counter = 0\r\n for step in range(1, len(array)):\r\n key = array[step]\r\n j = step - 1 \r\n counter += 1 \r\n while j >= 0 and key < array[j]:\r\n array[j + 1] = array[j]\r\n j = j - 1\r\n array[j + 1] = key\r\n return [array, counter]\r\n\r\n\r\ndef merge_sort(array):\r\n counter = 0\r\n if len(array) > 1:\r\n r = len(array)//2\r\n L = array[:r]\r\n M = array[r:]\r\n\r\n merge_sort(L)\r\n merge_sort(M)\r\n\r\n i = j = k = 0\r\n\r\n while i < len(L) and j < len(M):\r\n counter += 1\r\n if L[i] < M[j]:\r\n array[k] = L[i]\r\n i += 1\r\n else:\r\n array[k] = M[j]\r\n j += 1\r\n k += 1\r\n counter += 1\r\n\r\n while i < len(L):\r\n array[k] = L[i]\r\n i += 1\r\n k += 1\r\n counter += 1\r\n counter += 1 \r\n\r\n while j < len(M):\r\n counter += 1\r\n array[k] = M[j]\r\n j += 1\r\n k += 1\r\n counter += 1\r\n\r\n return [array, counter]\r\n\r\n\r\ndef shell_sort(array):\r\n counter = 1\r\n n = len(array)\r\n interval = n // 2\r\n while interval > 0:\r\n for i in range(interval, n):\r\n temp = array[i]\r\n j = i\r\n counter += 1\r\n while j >= interval and array[j - interval] > temp:\r\n array[j] = array[j - interval]\r\n j -= interval\r\n array[j] = temp\r\n interval //= 2\r\n return [array, counter]\r\n\r\n\r\nlist_values = [7, 8, 9, 10, 11, 12, 13, 14, 15]\r\n\r\nselection_count = []\r\ninsertion_count = []\r\nmerge_count = []\r\nshell_count = []\r\n\r\nselection_time = []\r\ninsertion_time = []\r\nmerge_time = []\r\nshell_time = []\r\n\r\nfor value in list_values:\r\n\r\n array = randint(0, 100000, 2**value)\r\n\r\n # array = []\r\n # for val in range(0, 2**value):\r\n # array.append(value)\r\n # array.reverse()\r\n\r\n # list_count = np.array([0, 100000])\r\n # list_x = [0, 7, 7, 9, 10, 11, 12, 13, 14, 15]\r\n\r\n # print(\"Selection sort\")\r\n start_time = time.time()\r\n new_array = selection_sort(array)\r\n end_time = time.time() - start_time\r\n selection_time.append(end_time)\r\n # selection_count.append(int(new_array[1]))\r\n # print(end_time)\r\n # print(new_array[0])\r\n # print(new_array[1])\r\n\r\n # print(\"Insertion sort\")\r\n start_time = time.time()\r\n new_array = insertion_sort(array)\r\n end_time = time.time() - start_time\r\n insertion_time.append(end_time)\r\n # insertion_count.append(int(new_array[1]))\r\n # print(end_time)\r\n # print(new_array[0])\r\n # print(new_array[1])\r\n\r\n # print(\"Merge sort\")\r\n start_time = time.time()\r\n new_array = merge_sort(array)\r\n end_time = time.time() - start_time\r\n merge_time.append(end_time)\r\n # merge_count.append(int(new_array[1]))\r\n # print(end_time)\r\n # print(new_array[0])\r\n # print(new_array[1])\r\n\r\n # print(\"Shell sort\")\r\n start_time = time.time()\r\n new_array = shell_sort(array)\r\n end_time = time.time() - start_time\r\n shell_time.append(end_time)\r\n # shell_count.append(int(new_array[1]))\r\n # print(end_time)\r\n # print(new_array[0])\r\n # print(new_array[1])\r\n\r\n# print(selection_count)\r\n# print(insertion_count)\r\n# print(merge_count)\r\n# print(shell_count)\r\n\r\nplt.xlabel(\"Array size\")\r\n# plt.ylabel(\"Number of comparisons\")\r\nplt.ylabel(\"Running time\")\r\n\r\nplt.yscale('log',base=2)\r\n\r\n# plt.plot(list_values, selection_count, marker='.', color='darkturquoise')\r\n# plt.plot(list_values, insertion_count, marker='.', color='darkslateblue')\r\n# plt.plot(list_values, merge_count, marker='.', color='wheat')\r\n# plt.plot(list_values, shell_count, marker='.', color = 'orchid')\r\n\r\nplt.plot(list_values, selection_time, marker='.', color='darkturquoise')\r\nplt.plot(list_values, insertion_time, marker='.', color='darkslateblue')\r\nplt.plot(list_values, merge_time, marker='.', color='wheat')\r\nplt.plot(list_values, shell_time, marker='.', color = 'orchid')\r\n\r\n\r\nplt.show()\r\n\r\n","repo_name":"be-unkind/algorithm_lab1","sub_path":"lab_1.py","file_name":"lab_1.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4978699382","text":"#!/usr/bin/python3\n\nif __name__ == \"__main__\":\n from sys import argv, exit\n from calculator_1 import add, sub, mul, div\n\n if len(argv) != 4:\n print(\"Usage: ./100-my_calculator.py \")\n exit(1)\n if argv[2] not in \"+-*/\":\n print(\"Unknown operator. Available operators: +, -, * and /\")\n exit(1)\n\n o = {\n \"+\": add,\n \"-\": sub,\n \"*\": mul,\n \"/\": div\n }\n a = int(argv[1])\n b = int(argv[3])\n op = argv[2]\n\n print(\"{} {} {} = {}\".format(a, op, b, o[op](a, b)))\n","repo_name":"espoupou/alx-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20548451662","text":"import logging\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport pyro\nimport torch\nfrom pyro import distributions as pdist\n\nfrom sbibm.tasks.simulator import Simulator\nfrom sbibm.tasks.task import Task\n\n\nclass GaussianLinearUniform(Task):\n def __init__(\n self, dim: int = 10, prior_bound: float = 1.0, simulator_scale: float = 0.1\n ):\n \"\"\"Gaussian Linear Uniform\n\n Inference of mean under uniform prior.\n\n Args:\n dim: Dimensionality of parameters and data.\n prior_bound: Prior is uniform in [-prior_bound, +prior_bound].\n simulator_scale: Standard deviation of noise in simulator.\n \"\"\"\n super().__init__(\n dim_parameters=dim,\n dim_data=dim,\n name=Path(__file__).parent.name,\n name_display=\"Gaussian Linear Uniform\",\n num_observations=10,\n num_posterior_samples=10000,\n num_reference_posterior_samples=10000,\n num_simulations=[100, 1000, 10000, 100000, 1000000],\n path=Path(__file__).parent.absolute(),\n )\n\n self.prior_params = {\n \"low\": -prior_bound * torch.ones((self.dim_parameters,)),\n \"high\": +prior_bound * torch.ones((self.dim_parameters,)),\n }\n\n self.prior_dist = pdist.Uniform(**self.prior_params).to_event(1)\n self.prior_dist.set_default_validate_args(False)\n\n self.simulator_params = {\n \"precision_matrix\": torch.inverse(\n simulator_scale * torch.eye(self.dim_parameters)\n )\n }\n\n def get_prior(self) -> Callable:\n def prior(num_samples=1):\n return pyro.sample(\"parameters\", self.prior_dist.expand_by([num_samples]))\n\n return prior\n\n def get_simulator(self, max_calls: Optional[int] = None) -> Simulator:\n \"\"\"Get function returning samples from simulator given parameters\n\n Args:\n max_calls: Maximum number of function calls. Additional calls will\n result in SimulationBudgetExceeded exceptions. Defaults to None\n for infinite budget\n\n Return:\n Simulator callable\n \"\"\"\n\n def simulator(parameters):\n return pyro.sample(\n \"data\",\n pdist.MultivariateNormal(\n loc=parameters,\n precision_matrix=self.simulator_params[\"precision_matrix\"],\n ),\n )\n\n return Simulator(task=self, simulator=simulator, max_calls=max_calls)\n\n def _sample_reference_posterior(\n self,\n num_samples: int,\n num_observation: Optional[int] = None,\n observation: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n \"\"\"Sample reference posterior for given observation\n\n Uses closed form solution with rejection sampling\n\n Args:\n num_samples: Number of samples to generate\n num_observation: Observation number\n observation: Instead of passing an observation number, an observation may be\n passed directly\n\n Returns:\n Samples from reference posterior\n \"\"\"\n assert not (num_observation is None and observation is None)\n assert not (num_observation is not None and observation is not None)\n\n if num_observation is not None:\n observation = self.get_observation(num_observation=num_observation)\n\n log = logging.getLogger(__name__)\n\n reference_posterior_samples = []\n\n sampling_dist = pdist.MultivariateNormal(\n loc=observation, precision_matrix=self.simulator_params[\"precision_matrix\"]\n )\n\n # Reject samples outside of prior bounds\n counter = 0\n while len(reference_posterior_samples) < num_samples:\n counter += 1\n sample = sampling_dist.sample()\n if not torch.isinf(self.prior_dist.log_prob(sample).sum()):\n reference_posterior_samples.append(sample)\n\n reference_posterior_samples = torch.cat(reference_posterior_samples)\n acceptance_rate = float(num_samples / counter)\n\n log.info(\n f\"Acceptance rate for observation {num_observation}: {acceptance_rate}\"\n )\n\n return reference_posterior_samples\n\n\nif __name__ == \"__main__\":\n task = GaussianLinearUniform()\n task._setup()\n","repo_name":"amortizedgbi/amortizedgbi","sub_path":"packages/sbibm/sbibm/tasks/gaussian_linear_uniform/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25514987781","text":"import re\n\n\ndef get_manhattan_distance(x1, y1, x2, y2):\n return abs(x1 - x2) + abs(y1 - y2)\n\n\ndef solve_1(input):\n dir_to_co = {\n \"L\": (0, -1),\n \"R\": (0, +1),\n \"U\": (1, 0),\n \"D\": (-1, 0)\n }\n\n xh, yh = (0, 0)\n xt, yt = (0, 0)\n tail_coordinates = set()\n\n command = input.pop(0)\n while command is not None:\n direction, am = re.match(r\"^(.) ([0-9]+)$\", command).groups()\n\n for _ in range(int(am)):\n delta = dir_to_co[direction]\n xh, yh = (xh + delta[0], yh + delta[1])\n if xh == xt and get_manhattan_distance(xh, yh, xt, yt) > 1:\n if direction == \"R\":\n yt += 1\n else:\n yt -= 1\n elif yh == yt and get_manhattan_distance(xh, yh, xt, yt) > 1:\n if direction == \"U\":\n xt += 1\n else:\n xt -= 1\n elif get_manhattan_distance(xh, yh, xt, yt) > 2:\n # need to make diagonal movement\n for opt in [(1, 1), (-1, 1), (-1, -1), (1, -1)]:\n x_temp, y_temp = (xt + opt[0], yt + opt[1])\n if get_manhattan_distance(xh, yh, x_temp, y_temp) <= 2:\n xt, yt = x_temp, y_temp\n break\n tail_coordinates.add((xt, yt))\n if len(input):\n command = input.pop(0)\n else:\n command = None\n\n return len(list(tail_coordinates))\n\n\ndef solve_2(input):\n pass\n\n\nif __name__ == \"__main__\":\n day = \"d9\"\n with open(f\"../inputs/{day}/input_simple.txt\") as fh:\n lines = [line.strip(\"\\n\") for line in fh.readlines()]\n print(f\"solution to puzzle 1 {solve_1(lines[:])}\")\n print(f\"solution to puzzle 2 {solve_2(lines[:])}\")\n with open(f\"../inputs/{day}/input.txt\") as fh:\n lines = [line.strip(\"\\n\") for line in fh.readlines()]\n print(f\"solution to puzzle 1 {solve_1(lines[:])}\")\n print(f\"solution to puzzle 2 {solve_2(lines[:])}\")\n","repo_name":"ManuDeBuck/advent-of-code-2022","sub_path":"solutions/d9.py","file_name":"d9.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30419619048","text":"\"\"\"Base class and in-built dataset types.\"\"\"\nimport os\nimport pickle\nfrom types import SimpleNamespace\n\nfrom packaging.utils import canonicalize_name\nfrom packaging.version import parse as parse_version\n\nfrom textflow.utils import PluginManager\nfrom textflow.utils.text import Tokenizer\n\n__all__ = [\n 'Dataset',\n 'datasets',\n 'MultiLabelDataset',\n 'SequenceLabelingDataset',\n]\n\ndatasets = PluginManager()\n\nIB_TAGS = ['I', 'B']\n\nSYS_MAJORITY = 'sys.majority'\n\n\nclass Dataset:\n def __init__(self, annotation_sets=None, tokenizer=None, validator='sys.majority'):\n if annotation_sets is not None:\n self.records = self.build_dataset(annotation_sets, tokenizer=tokenizer)\n self.validator = validator\n else:\n self.records = []\n self.validator = None\n\n def build_dataset(self, annotation_sets, tokenizer):\n \"\"\"Builds dataset from provided annotation sets\n\n :param annotation_sets: an iterable of annotation sets\n :param tokenizer: tokenizer function that returns (start, end, token string) of\n :return: records in dataset with labels by each annotator\n \"\"\"\n raise NotImplementedError\n\n def build_item_tuples(self):\n \"\"\"Make item tuples for annotation agreement\n\n :return: label item tuples\n \"\"\"\n raise NotImplementedError\n\n @property\n def groups_(self):\n group_set = set()\n for d in self.records.values():\n for user, _ in d.labels.items():\n group_set.add(user)\n return group_set\n\n @property\n def classes_(self):\n \"\"\"List all classes of dataset if defined else return None\n\n :return: list of unique classes\n \"\"\"\n label_set = set()\n for d in self.records.values():\n for _, labels in d.labels.items():\n label_set.update(labels)\n return label_set\n\n @property\n def X(self):\n \"\"\"Gets feature/independent variable of dataset\n\n :return: an iterable of feature/independent variable\n \"\"\"\n raise NotImplementedError\n\n @property\n def y(self):\n \"\"\"Gets target/dependent variable of dataset\n\n :return: an iterable of target/dependent variable\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _latest_version(root_path, filename):\n max_version = parse_version('0.0.1')\n for fn in os.listdir(root_path):\n filename_name = '{filename}-'.format(filename=filename)\n if fn.startswith(filename_name):\n fv = filename[len(filename_name):]\n fvp = parse_version(fv)\n if max_version < fvp:\n max_version = fvp\n return max_version\n\n def save(self, path, version=None):\n root_path, filename = os.path.splitext(path)\n filename = canonicalize_name(filename)\n if version is None:\n version = Dataset._latest_version(root_path, filename)\n path = os.path.join(root_path, '{}-{}'.format(filename, version))\n with open(path, 'wb') as fp:\n pickle.dump(self, fp)\n\n @classmethod\n def load(cls, path, version=None):\n root_path, filename = os.path.splitext(path)\n filename = canonicalize_name(filename)\n if version is None:\n version = Dataset._latest_version(root_path, filename)\n path = os.path.join(root_path, '{}-{}'.format(filename, version))\n with open(path, 'rb') as fp:\n result = pickle.load(fp)\n if isinstance(result, cls):\n raise TypeError('Expected type \\'{}\\', got \\'{}\\' instead'.format(cls.__name__, result.__name__))\n return result\n\n\n@datasets.register('sequence_labeling')\nclass SequenceLabelingDataset(Dataset):\n def build_dataset(self, annotation_sets, tokenizer=None):\n \"\"\"Builds dataset from provided annotation sets\n\n :param annotation_sets: an iterable of annotation sets\n :param tokenizer: tokenizer function that returns (start, end, token string) of\n :return: records in dataset with labels by each annotator\n \"\"\"\n if tokenizer is None:\n tokenizer = Tokenizer()\n records = dict()\n for annotation_set in annotation_sets:\n document = annotation_set.document\n if document.id in records:\n document = records[document.id]\n else:\n document = SimpleNamespace(\n id=document.id,\n id_str=document.id_str,\n text=document.text,\n tokens=tokenizer.tokenize(document.text),\n labels=dict(),\n )\n # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n # set labels\n token_index = {}\n for tid, (s, e, _) in enumerate(document.tokens):\n for i in range(s, e):\n token_index[i] = tid\n user = annotation_set.user\n if '__{}__'.format(user.username) in document.labels:\n labels = document.labels['__{}__'.format(user.username)]\n else:\n labels = [('O', None) for _ in document.tokens]\n for annotation in annotation_set.annotations:\n label_value = annotation.label.value\n annotation_span = annotation.span\n for aix in range(annotation_span.start, annotation_span.start + annotation_span.length):\n bio_tag = IB_TAGS[aix == annotation_span.start]\n # update only tags marked as other\n if (aix in token_index) and (labels[token_index[aix]] == ('O', None)):\n labels[token_index[aix]] = (bio_tag, label_value)\n document.labels['__{}__'.format(user.username)] = labels\n # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n records[document.id] = document\n for i in records:\n labels = records[i].labels.values()\n majority_vote = []\n prv_label = 'O'\n for ll in zip(*labels):\n # count lbl values\n lbl_counts, lbl_val = [], list(map(lambda x: x[1], ll))\n for lbl in set(lbl_val):\n lbl_counts.append((lbl, lbl_val.count(lbl)))\n maj_lbl, maj_lbl_count = sorted(lbl_counts, key=lambda x: x[-1])[-1]\n # count BIO tags\n tag_counts, tag_val = [], list(map(lambda x: x[0], ll))\n for tag in set(tag_val):\n tag_counts.append((tag, tag_val.count(tag)))\n maj_tag, _ = sorted(tag_counts, key=lambda x: x[-1])[-1]\n min_num = len(ll) // 2 + 1\n # Update word position tag (BIO)\n if maj_lbl_count >= min_num:\n if maj_lbl is not None:\n if prv_label[0] in ['O', '?']:\n maj_tag = 'B'\n else:\n # keep the majority tag\n pass\n else:\n maj_tag = 'O'\n else:\n # disagreement\n # -- unable to identify majority\n maj_tag, maj_lbl = '?', None\n prv_label = (maj_tag, maj_lbl)\n majority_vote.append(prv_label)\n records[i].labels[SYS_MAJORITY] = majority_vote\n return records\n\n def build_item_tuples(self):\n \"\"\"Make item tuples for annotation agreement\n\n :return: label item tuples\n \"\"\"\n result = []\n for d in self.records.values():\n for coder, labels in d.labels.items():\n if coder == SYS_MAJORITY:\n continue\n for index, (label, (_, _, token)) in enumerate(zip(labels, d.tokens)):\n result.append((coder, '{}_{}'.format(d.id, index), label[-1]))\n return result\n\n @property\n def classes_(self):\n \"\"\"List all classes of dataset if defined else return None\n\n :return: list of unique classes\n \"\"\"\n label_set = set()\n for d in self.records.values():\n for _, labels in d.labels.items():\n label_set.update([label[-1] for label in labels])\n return label_set\n\n @property\n def X(self):\n \"\"\"Gets tokens for each sentence\n\n :return: list of tokens for each sentence\n \"\"\"\n # select third position in tokens (i.e. index 2) to get string token\n # token tuple struct (start, end, token)\n X = [list(zip(*self.records[r].tokens))[2] for r in self.records]\n return X\n\n @staticmethod\n def _format_labels(tags):\n \"\"\"Format labels by converting None to 'O'.\n\n :param tags: list of tags\n :return: formatted list of tags\n \"\"\"\n return [t + ('' if l is None else '_{}'.format(l)) for t, l in tags]\n\n @property\n def y(self):\n \"\"\"Gets (multi-class) labels for each token of each sentence\n\n :return: list of labels for each token of each sentence\n \"\"\"\n # noinspection PyPep8Naming\n X = [self._format_labels(self.records[r].labels[self.validator]) for r in self.records if\n self.validator in self.records[r].labels]\n return X\n\n\n@datasets.register('document_classification')\nclass MultiLabelDataset(Dataset):\n def build_dataset(self, annotation_sets, tokenizer=None):\n \"\"\"Builds dataset from provided annotation sets\n\n :param annotation_sets: an iterable of annotation sets\n :param tokenizer: tokenizer function that returns (start, end, token string) of\n :return: records in dataset with labels by each annotator\n \"\"\"\n if tokenizer is None:\n tokenizer = Tokenizer()\n records = dict()\n for annotation_set in annotation_sets:\n document = annotation_set.document\n if document.id in records:\n document = records[document.id]\n else:\n document = SimpleNamespace(\n id=document.id,\n id_str=document.id_str,\n text=document.text,\n tokens=tokenizer.tokenize(document.text),\n labels=dict(),\n )\n # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n # set labels\n user = annotation_set.user\n if 'users.{}'.format(user.username) in document.labels:\n labels = document.labels['users.{}'.format(user.username)]\n else:\n labels = []\n for annotation in annotation_set.annotations:\n label_value = annotation.label.value\n labels.append(label_value)\n document.labels['users.{}'.format(user.username)] = labels\n # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\n records[document.id] = document\n for i in records:\n # labels by different coders\n labels = records[i].labels.values()\n label_counts = {}\n for ls in labels:\n for ll in set(ls):\n if ll not in label_counts:\n label_counts[ll] = 0\n label_counts[ll] += 1\n # minimum number of labels needed\n # to consider for annotation (half of the number of available annotations)\n min_num = len(labels) // 2 + 1\n majority_vote = [k for k, v in label_counts.items() if v >= min_num]\n records[i].labels[SYS_MAJORITY] = majority_vote\n return records\n\n def build_item_tuples(self):\n \"\"\"Make item tuples for annotation agreement\n\n :return: label item tuples\n \"\"\"\n result = []\n for d in self.records.values():\n for coder, labels in d.labels.items():\n if coder == SYS_MAJORITY:\n continue\n for label in labels:\n result.append((coder, str(d.id), str(label)))\n return result\n\n @property\n def X(self):\n \"\"\"Gets tokens for each sentence\n\n :return: list of tokens for each sentence\n \"\"\"\n X = [self.records[r].text for r in self.records]\n return X\n\n @property\n def y(self):\n \"\"\"Gets (multi-) label each sentence\n\n :return: list of labels of each document\n \"\"\"\n y = [self.records[r].labels[self.validator] for r in self.records]\n return y\n","repo_name":"citizen-helper/textflow","sub_path":"textflow/model/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"15369456408","text":"\"\"\"\n\nColumn Definition: \n\nstationName == name\nstation_id == station_id\nlat = lat\nlon = lon\n\"\"\"\n\nimport json\nimport requests\n\n\nstationsFile = open(\"Data/stationsJson.json\", \"r\")\nstationsRawData = stationsFile.read()\nstations = json.loads(stationsRawData)\n\nfor station in stations:\n\n newStation = {\"stationName\" : station[\"name\"], \"station_id\": station[\"station_id\"], \"lat\" : station[\"lat\"], \"lon\" : station[\"lon\"]}\n headers = {'Content-type': 'application/json'}\n\n\n r = requests.post(\"https://jacob-toomey.com/api/Stations\", data=json.dumps(newStation), headers=headers)\n\n print(r.status_code)","repo_name":"jcbtmy/jcbtmywebsite","sub_path":"scripts/addStations.py","file_name":"addStations.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43901971672","text":"import paho.mqtt.client as mqtt\nfrom time import *\nfrom adafruit_servokit import ServoKit\n\nkit = ServoKit(channels=16)\nkit.servo[8].angle = 0\n\ndef connect_mqtt():\n client = mqtt.Client(transport=\"websockets\")\n client.on_connect = on_connect\n client.on_message = on_message\n\n broker_address = \"192.168.1.220\"\n broker_port = 1884\n\n # Set the MQTT broker's WebSocket URI\n websocket_uri = f\"ws://{broker_address}:{broker_port}/mqtt\"\n\n # Connect to the MQTT broker over WebSocket\n client.ws_set_options(path=\"/mqtt\") # Set the WebSocket path\n client.connect(broker_address, broker_port, 60)\n\n # Start the MQTT client's network loop\n client.loop_forever()\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected to MQTT broker with result code \" + str(rc))\n client.subscribe(\"test/servo\")\n\ndef on_message(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n # Perform desired action based on the received message\n if msg.payload.decode() == 'This is lock':\n # Trigger the lock action on the Raspberry Pi\n # Your lock action code goes here\n print(\"Lock action triggered\")\n kit.servo[8].angle = 180\n elif msg.payload.decode() == 'This is unlock':\n # Trigger the unlock action on the Raspberry Pi\n # Your unlock action code goes here\n print(\"Unlock action triggered\")\n kit.servo[8].angle = 0\n\n# Run the connect_mqtt coroutine\nconnect_mqtt()","repo_name":"sheevam2/Doorbell23","sub_path":"intial_testing/mqtt_servo.py","file_name":"mqtt_servo.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37659686860","text":"from scipy.optimize import root_scalar\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef f(x):\n return (x*np.sin(x))-1\n\n\nx = np.linspace(start=-1, stop=2)\nplt.plot(x, f(x))\nplt.grid()\nplt.savefig(\"graph\")\n\ntry:\n sol = root_scalar(f, method='brentq', bracket=[-1, 2])\n print(f\"Metodo de Brent: \\n\\\n - Raiz: {sol.root} \\n\\\n - Iteraciones: {sol.iterations}\")\nexcept:\n print(\"🔴 No converge\")","repo_name":"Camilo19g/analisis-numerico","sub_path":"Participaciones/Brent.py","file_name":"Brent.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20941600477","text":"from PyQt5 import QtWidgets\nfrom controller.build import BuildApp, add_part, clear_parts, search_parts, exit_app\nfrom controller.plate import PlateApp\nfrom model.content_filter import similar_plate\nfrom model.message import add_limit, confirm_restart, is_selected, open_file\nfrom model.read_csv import open_csv\nfrom view import pcb\n\n\n# The PCB widget controller\n# The table is populated with PCB data.\nclass PcbApp(pcb.Ui_Form, QtWidgets.QWidget):\n def __init__(self):\n super(PcbApp, self).__init__()\n self.setupUi(self)\n self.plate = PlateApp()\n self.build = BuildApp()\n self.pushButton.clicked.connect(self.add_part)\n self.pushButton_2.clicked.connect(self.show_build)\n self.pushButton_3.clicked.connect(self.start_over)\n self.pushButton_4.clicked.connect(exit_app)\n self.pushButton_6.clicked.connect(self.open_file)\n\n # Add selected part to build table\n def add_part(self):\n temp = []\n selected_pcb = []\n\n # If a row is selected, clear selection from other table\n if self.tableWidget.selectionModel().hasSelection():\n self.tableWidget_2.clearSelection()\n selected_pcb = self.tableWidget.selectedItems()\n if self.tableWidget_2.selectionModel().hasSelection():\n self.tableWidget.clearSelection()\n selected_pcb = self.tableWidget_2.selectedItems()\n\n # Alert user if a row has not been selected from either tables\n if not self.tableWidget.selectionModel().hasSelection() and not self.tableWidget_2.selectionModel().hasSelection():\n is_selected()\n return\n\n # Add PCB if no PCB exist in the build table\n if not search_parts('PCB'):\n for index, item in enumerate(selected_pcb):\n if index == 4:\n continue\n else:\n temp.append(item.text())\n add_part(temp)\n self.close()\n self.plate.load_table()\n self.plate.load_table2(similar_plate(selected_pcb[1].text()))\n self.plate.showMaximized()\n # Warning message if adding more than 1 item of the same type\n # Not needed if code is working properly\n else:\n add_limit()\n return\n\n # Show build window\n def show_build(self):\n self.build.load_table()\n self.build.showMaximized()\n\n # Close PCB window and clear the build table\n def start_over(self):\n if confirm_restart():\n self.close()\n clear_parts()\n else:\n return\n\n # Open CSV file of current window\n def open_file(self):\n if open_file():\n self.close()\n open_csv('board.csv')\n clear_parts()\n else:\n return\n\n # Populate PCB table\n def load_table(self, layout):\n boards = layout\n row = 0\n self.tableWidget.setRowCount(len(boards))\n\n for i in boards:\n self.tableWidget.setItem(row, 0, QtWidgets.QTableWidgetItem(i[0]))\n self.tableWidget.setItem(row, 1, QtWidgets.QTableWidgetItem(i[1]))\n self.tableWidget.setItem(row, 2, QtWidgets.QTableWidgetItem(i[2]))\n self.tableWidget.setItem(row, 3, QtWidgets.QTableWidgetItem(i[3]))\n self.tableWidget.setItem(row, 4, QtWidgets.QTableWidgetItem(i[4]))\n self.tableWidget.setItem(row, 5, QtWidgets.QTableWidgetItem(i[5]))\n row += 1\n self.tableWidget.resizeColumnsToContents()\n self.tableWidget.resizeRowsToContents()\n self.tableWidget.clearSelection()\n\n # Populate PCB recommendation table\n def load_table2(self, boards):\n row = 0\n self.tableWidget_2.setRowCount(len(boards))\n\n for n in boards:\n self.tableWidget_2.setItem(row, 0, QtWidgets.QTableWidgetItem(n[0]))\n self.tableWidget_2.setItem(row, 1, QtWidgets.QTableWidgetItem(n[1]))\n self.tableWidget_2.setItem(row, 2, QtWidgets.QTableWidgetItem(n[2]))\n self.tableWidget_2.setItem(row, 3, QtWidgets.QTableWidgetItem(n[3]))\n self.tableWidget_2.setItem(row, 4, QtWidgets.QTableWidgetItem(n[4]))\n self.tableWidget_2.setItem(row, 5, QtWidgets.QTableWidgetItem(n[5]))\n row += 1\n self.tableWidget_2.resizeColumnsToContents()\n self.tableWidget_2.resizeRowsToContents()\n self.tableWidget_2.clearSelection()\n","repo_name":"AudreyGH/Hybrid-Recommender-System","sub_path":"controller/pcb.py","file_name":"pcb.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26316287792","text":"__author__ = 'Rakesh Kumar'\n\n\nclass Intent:\n\n def __init__(self, intent_type, flow_match, in_port, out_port, apply_immediately=True,\n min_rate=None, max_rate=None):\n\n self.intent_type = intent_type\n self.flow_match = flow_match\n self.in_port = in_port\n self.out_port = out_port\n self.min_rate = min_rate\n self.max_rate = max_rate\n\n self.tree_id = None\n self.required_vlan_id = None\n self.hash_value = hash(str(self.in_port) + str(self.out_port) + str(self.flow_match))\n\n self.apply_immediately = apply_immediately\n\n self.src_host = None\n self.dst_host = None\n\n self.consolidated_in_a_failover_group = False\n\n def __hash__(self):\n return self.hash_value\n\n def __eq__(self, other):\n if other:\n return self.hash_value == other.hash_value\n else:\n return False\n\n def __str__(self):\n return \"Hash Value: \" + str(self.hash_value) + \" \" +\\\n \"Intent Type: \" + str(self.intent_type) + \" \" + \\\n \"Flow Match: \" + str(self.flow_match) + \" \" + \\\n \"In Port: \" + str(self.in_port) + \" \" + \\\n \"Out Port: \" + str(self.out_port)","repo_name":"Vignesh2208/Melody","sub_path":"srcs/cyber_network/synthesis/intent.py","file_name":"intent.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"1121586333","text":"# import library\nimport cv2\nfrom Moildev import Moildev\n\n# create object of Moildev\nmoildev = Moildev(\"../Raspi_Cam.json\") # select parameter file (.json)\n\n# create specified variable value\nalpha_max = 110\nalpha = 30\nbeta = 30\n\n# read image from directory using opencv\nimage = cv2.imread(\"../image.jpg\")\n\n# create reverse image\nreverse_image = moildev.reverseImage(image, alpha_max, alpha, beta) # fill the variable\n\n# save reverse image\ncv2.imwrite(\"reverse_image.jpg\", reverse_image)\n","repo_name":"aji-ptn/moildev","sub_path":"examples/Reverse_view/reverse_view.py","file_name":"reverse_view.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28694460174","text":"def caesarCipher(s, k):\r\n temp = []\r\n \r\n # chuyển tất cả character qua ASCII \r\n for character in s:\r\n temp.append(ord(character))\r\n \r\n for i in range(len(temp)):\r\n # uppercase trong ASCII là từ 65 đến 90\r\n if 65 <= temp[i] <= 90:\r\n # -65 + k % 26 là để trường hợp nếu số vượt qua 90 thì quay ngược lại \r\n temp[i] = (65 + ((temp[i]) - 65 + k)% 26) \r\n # lowercase trong ASCII là từ 97 đến 122\r\n elif 97 <= temp[i] <= 122:\r\n # để quay ngược tương tự như trên\r\n temp[i] = (97 + ((temp[i]) - 97 + k)% 26) \r\n return (\"\".join(chr(x) for x in temp))","repo_name":"MinhThieu145/CodeExplainer","sub_path":"Caesar Cipher.py","file_name":"Caesar Cipher.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9062267204","text":"# main/browse/services.py _____________________________________________________\n# Author: Sun Lee, Mathias Sackey\n\n\nfrom main.adapters.abstract_repository import repo as repo\nfrom main.adapters.services import write_reviews\nfrom main.models.msac_models import Review\n\n\ndef get_alphanumeric(tag, i):\n alphanumeric = []\n if not i.isdigit():\n alphanumeric = [attr.name[0].lower() for attr in getattr(repo, f'get_{tag}s')()\n if 96 < ord(attr.name[0].lower()) < 123] + ['#']\n\n return sorted(set(alphanumeric))\n\ndef get_track(id):\n return repo.get_track(id)\n\n\ndef get_tag(tag, i):\n # return album/artist/genre name\n if i.isdigit():\n return getattr(repo, f'get_{tag}')(int(i)).name\n \n # return Album/Artist/Genre\n return tag\n\n\ndef get_tagged(tag, i):\n # return tracks\n if i.isdigit():\n return getattr(repo, f'get_tracks_by_{tag}')(int(i))\n \n # return albums/artists/genres\n else:\n\n # non-alphabetical\n if i == '#':\n return [attr for attr in getattr(repo, f'get_{tag}s')()\n if not (96 < ord(attr.name[0].lower()) < 123)]\n \n # alphabetical\n else:\n return [attr for attr in getattr(repo, f'get_{tag}s')()\n if attr.name[0].lower() == i]\n\n\ndef add_review(track_id, user_name, datetime, rating, review):\n track = repo.get_track(track_id)\n user = repo.get_user(user_name)\n r = repo.get_review(track, user)\n # remove old review\n if r:\n track.remove_review(r)\n user.remove_review(r)\n # add new review\n r = Review(datetime, rating, review, track, user)\n repo.add_review(r)\n write_reviews(repo)\n","repo_name":"CodeNClimb/Musify-Web-App","sub_path":"main/browse/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15922238023","text":"\nfrom __future__ import division\nfrom math import pi, cos, sin\n\nfrom mesh import Vector\n\n\n\ndef add_cylinder(mesh, radius, height,\n n_circle = 10, n_height = 1,\n v_axis = Vector(0,0,1),\n basepoint = Vector(0,0,0),\n twist_angle = 0,\n close_bot = True,\n close_top = True):\n \"\"\"Add a cylinder to an existing mesh.\n\n Arguments are as follows:\n - mesh is the mesh to add to;\n - radius is the radius;\n - height is the height (expressed in multiples of v_axis);\n - n_circle is the number of points to place around a circle;\n - n_height is the number of layers to subdivide the cylinder into;\n - v_axis is a unit vector in the direction of the cylinder;\n - basepoint is a vector in the centre of the base of the cylinder;\n - twist_angle is the phase with which points are distributed;\n - close_bot and close_top say whether to give the circular disk\n ends of the cylinder.\n \"\"\"\n\n v_axis = Vector(v_axis).normalise()\n (perp1, perp2) = v_axis.get_orthogonal_vectors()\n basepoint = Vector(basepoint)\n\n def make_row(i):\n for j in xrange(n_circle):\n t = twist_angle + (i+2*j)*pi/n_circle\n v = basepoint + perp1*(radius*cos(t)) + perp2*(radius*sin(t)) + v_axis*(height*i/n_height)\n yield mesh.add_vertex(v)\n\n r = list(make_row(0))\n\n if close_bot:\n o = mesh.add_vertex(basepoint)\n for j in xrange(n_circle):\n mesh.add_face(r[j],o,r[(j+1)%n_circle])\n\n for i in xrange(n_height):\n s = list(make_row(i+1))\n for j in xrange(n_circle):\n mesh.add_face(r[j],r[(j+1)%n_circle],s[j])\n mesh.add_face(s[(j-1)%n_circle],r[j],s[j])\n r = s\n\n if close_top:\n o = mesh.add_vertex(basepoint + v_axis*height)\n for j in xrange(n_circle):\n mesh.add_face(s[j],s[(j+1)%n_circle],o)\n","repo_name":"brachyprint/brachyprint","sub_path":"src/mesh/primitives/cylinder.py","file_name":"cylinder.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17218169730","text":"\nimport numpy as np\nimport pandas as pd\nimport h5py \nimport os\nimport time\n\nfrom scipy.spatial import cKDTree\nfrom hydroflow.src_physics.utils import get_limits\n\n\n##### READ PARTICLE DATA\ndef read_subvol(path,ivol,nslice,nchunks=1e3):\n\n pdata_file=h5py.File(path,'r')\n boxsize=pdata_file['Header'].attrs['BoxSize']*1e-3\n hval=pdata_file['Header'].attrs['HubbleParam']\n masstable=pdata_file['Header'].attrs['MassTable']\n afac=1/(1+pdata_file['Header'].attrs['Redshift'])\n pdata_file.close()\n \n flist=sorted([path.split('snap_')[0]+fname for fname in os.listdir(path.split('snap_')[0]) if '.hdf5' in fname])\n if nchunks and len(flist)>nchunks:\n flist=flist[:nchunks]\n\n numfiles=len(flist)\n print(f'Loading from {numfiles} files')\n\n lims=get_limits(ivol,nslice,boxsize,buffer=0.1)\n ptype_fields={0:['InternalEnergy','ElectronAbundance','GFM_Metallicity','StarFormationRate'],\n 4:['GFM_Metallicity','GFM_StellarFormationTime'],\n 5:[]}\n\n pdata=[{ptype:[] for ptype in ptype_fields} for ifile in range(numfiles)]\n\n for ifile,ifname in enumerate(flist):\n pdata_ifile=h5py.File(ifname,'r')\n npart_ifile=pdata_ifile['Header'].attrs['NumPart_ThisFile']\n\n print(f'Loading data for ifile {ifile+1}/{numfiles}')\n for iptype,ptype in enumerate(ptype_fields):\n t0=time.time()\n\n if npart_ifile[ptype]:\n\n #mask for subvolume\n subvol_mask=np.ones(npart_ifile[ptype])\n coordinates=np.float32(pdata_ifile[f'PartType{ptype}']['Coordinates'][:]*1e-3)\n \n for idim,dim in enumerate('xyz'):\n lims_idim=lims[2*idim:(2*idim+2)]\n if lims_idim[0]<0 and nslice>1:#check for periodic\n otherside=coordinates[:,idim]>=boxsize+lims_idim[0]\n coordinates[:,idim][otherside]=coordinates[:,idim][otherside]-boxsize\n if lims_idim[1]>boxsize and nslice>1:#check for periodic\n otherside=coordinates[:,idim]<=(lims_idim[1]-boxsize)\n coordinates[:,idim][otherside]=coordinates[:,idim][otherside]+boxsize\n\n idim_mask=np.logical_and(coordinates[:,idim]>=lims_idim[0],coordinates[:,idim]<=lims_idim[1])\n subvol_mask=np.logical_and(subvol_mask,idim_mask)\n npart_ifile_invol=np.nansum(subvol_mask)\n\n if npart_ifile_invol:\n print(f'There are {npart_ifile_invol} ivol ptype {ptype} particles in this file')\n subvol_mask=np.where(subvol_mask)\n coordinates=coordinates[subvol_mask]\n \n # print('Loading IDs,ptypes')\n pdata[ifile][ptype]=pd.DataFrame(data=pdata_ifile[f'PartType{ptype}']['ParticleIDs'][:][subvol_mask],columns=['ParticleIDs'])\n pdata[ifile][ptype]['ParticleType']=np.uint16(np.ones(npart_ifile_invol)*ptype)\n\n # print('Loading')\n pdata[ifile][ptype].loc[:,[f'Coordinates_{dim}' for dim in 'xyz']]=coordinates;del coordinates\n if not ptype==1:\n pdata[ifile][ptype].loc[:,[f'Velocity_{dim}' for dim in 'xyz']]=pdata_ifile[f'PartType{ptype}']['Velocities'][:][subvol_mask]\n\n # print('Loading masses')\n if not ptype==1:\n pdata[ifile][ptype]['Mass']=np.float32(pdata_ifile[f'PartType{ptype}']['Masses'][:][subvol_mask]*1e10/hval)\n else:\n pdata[ifile][ptype]['Mass']=np.float32(np.ones(npart_ifile_invol)*masstable[ptype]*1e10/hval) \n\n # print('Loading rest')\n for field in ptype_fields[ptype]:\n if not 'GFM' in field:\n pdata[ifile][ptype][field]=np.float32(pdata_ifile[f'PartType{ptype}'][field][:][subvol_mask])\n else:\n field_out=field[4:]\n pdata[ifile][ptype][field_out]=np.float32(pdata_ifile[f'PartType{ptype}'][field][:][subvol_mask])\n\n #if gas, do temp clc\n if ptype==0:\n ne = pdata[ifile][ptype].ElectronAbundance; del pdata[ifile][ptype]['ElectronAbundance']\n energy = pdata[ifile][ptype].InternalEnergy; del pdata[ifile][ptype]['InternalEnergy']\n yhelium = 0.0789\n temp = energy*(1.0 + 4.0*yhelium)/(1.0 + yhelium + ne)*1e10*(2.0/3.0)\n temp *= (1.67262178e-24/ 1.38065e-16 )\n pdata[ifile][ptype]['Temperature']=np.float32(temp)\n \n else:\n print(f'No ivol ptype {ptype} particles in this file!')\n pdata[ifile][ptype]=pd.DataFrame([])\n else:\n print(f'No ptype {ptype} particles in this file!')\n pdata[ifile][ptype]=pd.DataFrame([])\n\n print(f'Loaded itype {ptype} for ifile {ifile+1}/{numfiles} in {time.time()-t0:.3f} sec')\n\n ########### match the tracers to the baryonic particles ###########\n ################################################################## \n\n numbar_thisvol=np.nansum([pdata[ifile][ptype].shape[0] for ptype in [0,4,5]])\n numtcr=pdata_ifile[f'PartType3']['ParentID'].shape[0]\n \n if numbar_thisvol and numtcr:\n pdata[ifile][0]=pd.concat([pdata[ifile][ptype] for ptype in [0,4,5] if not pdata[ifile][ptype].shape[0]==0])\n pdata[ifile][0].sort_values(by='ParticleIDs',inplace=True)\n pdata[ifile][0].reset_index(inplace=True,drop=True)\n pdata_ifile_baryons_IDs=pdata[ifile][0].ParticleIDs\n\n t0=time.time()\n pdata_tcr_parent_IDs=np.uint64(pdata_ifile[f'PartType3']['ParentID'][:])\n expected_idx_of_tracer_in_pdata=np.searchsorted(pdata_ifile_baryons_IDs,pdata_tcr_parent_IDs)\n tracer_match_1=pdata_tcr_parent_IDs==np.concatenate([pdata_ifile_baryons_IDs,[np.nan]])[(expected_idx_of_tracer_in_pdata,)]\n pdata_tcr_tracer_IDs_invol=np.uint64(pdata_ifile[f'PartType3']['TracerID'][:])[tracer_match_1]\n expected_idx_of_tracer_in_pdata=expected_idx_of_tracer_in_pdata[tracer_match_1];del tracer_match_1 \n\n pdata[ifile][3]=pdata[ifile][0].loc[expected_idx_of_tracer_in_pdata,:].copy()# reindexing to tracer based\n pdata[ifile][3]['ParticleIDs']=pdata_tcr_tracer_IDs_invol #set particle IDs as the tracer IDs\n pdata[ifile][3].reset_index(drop=True,inplace=True)\n pdata[ifile][3]['Flag_Tracer']=np.ones(pdata[ifile][3].shape[0],dtype=np.int8) #\n pdata[ifile][3]['Mass']=np.float32(np.ones(pdata[ifile][3].shape[0])*masstable[3]*10**10/hval) \n numtcr_thisvol=pdata[ifile][3].shape[0]\n\n pdata[ifile][0]['Flag_Tracer']=np.zeros(pdata[ifile][0].shape[0],dtype=np.int8) \n\n print(f'Matched tracers for ifile {ifile+1}/{numfiles} in {time.time()-t0:.3f} sec')\n pdata_ifile.close()#housekeeping\n\n else:\n numtcr_thisvol=0\n print('No baryons in ifile for desired volume, will not match tracers')\n if numtcr_thisvol or numbar_thisvol:\n try:\n pdata[ifile]=pd.concat(pdata[ifile][ptype] for ptype in [0,3] if not pdata[ifile][ptype].shape[0]==0)\n except:\n print('No particles in ifile for desired volume')\n pdata[ifile]=pd.DataFrame([])\n\n else:\n print('No particles in ifile for desired volume')\n pdata[ifile]=pd.DataFrame([])\n\n print('Successfully loaded')\n\n #concat all pdata into one df\n pdata=pd.concat(pdata)\n pdata.sort_values(by=\"ParticleIDs\",inplace=True)\n pdata.reset_index(inplace=True,drop=True)\n\n pdata_tracers=pdata.loc[pdata.Flag_Tracer==1,:].copy()\n pdata_baryons=pdata.loc[pdata.Flag_Tracer==0,:].copy()\n\n pdata_tracers.reset_index(inplace=True,drop=True)\n pdata_baryons.reset_index(inplace=True,drop=True)\n\n #generate KDtree\n pdata_kdtree=cKDTree(pdata_tracers.loc[:,[f'Coordinates_{x}'for x in 'xyz']].values)\n pdata_kdtree_cells=cKDTree(pdata_baryons.loc[:,[f'Coordinates_{x}'for x in 'xyz']].values)\n \n return pdata_tracers, pdata_kdtree, pdata_baryons, pdata_kdtree_cells","repo_name":"RJWright25/hydroflow","sub_path":"src_sims/illustris/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72958167233","text":"\"\"\"\n反转整数,整数范围为-2^31<=x<=2^31\n\"\"\"\n\n\nclass Solution:\n def reverse(self, x: int) -> int:\n if x == 0:\n return x\n a = x if x > 0 else -x\n s = ''\n while a >= 10:\n s += str(a % 10)\n a = a//10\n else:\n s += str(a)\n res = int(s)\n if x < 0:\n res = -res\n if res > 2 ** 31-1 or res < -2 ** 31:\n res = 0\n return res\n\n\nsolution = Solution()\nprint(solution.reverse(1563847412))\n","repo_name":"SsuperL/leetcode-practice","sub_path":"medium/exercise_7.py","file_name":"exercise_7.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40414677201","text":"import torch.nn as nn\r\nimport torch\r\n\r\n\r\nclass DADSH_Loss(nn.Module):\r\n \"\"\"\r\n Loss function of ADSH\r\n\r\n Args:\r\n code_length(int): Hashing code length.\r\n gamma(float): Hyper-parameter.\r\n \"\"\"\r\n def __init__(self, code_length, eta, mu, gamma, varphi, delta, device):\r\n super(DADSH_Loss, self).__init__()\r\n self.device = device\r\n self.code_length = code_length\r\n self.eta = eta\r\n self.mu = mu\r\n self.gamma = gamma\r\n self.varphi = varphi\r\n self.delta = delta\r\n\r\n\r\n def forward(self, Y, U, V, S, W1, W2, index):\r\n m = index.shape[0] # mΪÑù±¾Êý\r\n discrete_loss1 = self.eta*torch.norm(U @ torch.eye(m, device=self.device))**2\r\n discrete_loss2 = self.mu*torch.norm((U@U.t() - 2*m/3*torch.eye(self.code_length, device=self.device)))**2\r\n discrete_loss3 = 2*self.gamma*(abs(U).mul((1-U**2))).sum()\r\n\r\n quantization_loss1 = torch.norm(Y.t() - W1.t() @ U)**2\r\n quantization_loss2 = self.varphi*torch.norm(V[:, index] - U) ** 2\r\n hash_loss = self.delta*torch.norm(self.code_length*S - (W1.t()@U).t() @ (W2.t()@V))**2\r\n\r\n loss = (discrete_loss1 + discrete_loss2 + discrete_loss3 + hash_loss\r\n + quantization_loss1 + quantization_loss2) / (V.shape[1] * U.shape[1])\r\n\r\n return loss\r\n","repo_name":"Sue-syx/L2H","sub_path":"DADSH/models/dadsh_loss.py","file_name":"dadsh_loss.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"34442858491","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom src.lappy.models.bound import Bound\n\n\nclass PolygonMaker(object):\n \"\"\"\n class definition\n \"\"\"\n\n def make(self, bound: Bound):\n \"\"\"\n make points and segments from bound\n\n \"\"\"\n [pts, seg] = self.__polygon(bound)\n\n return [pts, seg]\n\n def __polygon(self, bound: Bound):\n \"\"\"\n make outer bound as polygon from\n\n \"\"\"\n\n point_count = len(bound.points)\n seg = []\n pts = []\n\n for i in range(point_count-1):\n seg.append([i, i + 1])\n seg.append([point_count - 1, 0])\n\n for pt in bound.points:\n pts.append([pt.x, pt.y])\n\n return pts, seg\n","repo_name":"erythrocyte/qtlappy","sub_path":"old_var/src/lappy/services/polygon_maker.py","file_name":"polygon_maker.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4002864160","text":"import os\nimport shutil\n\nfrom panoptes.dataset.evaluator import Evaluator\n\n\nclass HtmlEvaluator(Evaluator):\n def dump_overview_head(self, out):\n out.write(\"\"\"\n\n\n \n\n\n
\n
\n
Overview
\n\"\"\".strip())\n\n def dump_overview_foot(self, out):\n out.write(\"\"\"\n
\n
\n\n\n\"\"\".strip())\n\n def dump_task_head(self, out):\n out.write(\"\"\"\n\n\n \n\n\n
\n\"\"\".strip())\n\n def dump_episode_head(self, episode_index, correct, total, out):\n out.write(\"\"\"\n
\n
Episode %d (%d / %d)
\n\"\"\" % (episode_index + 1, correct, total))\n\n def dump_episode_foot(self, out):\n out.write(\"\"\"\n
\n\"\"\")\n\n def dump_task_foot(self, out):\n out.write(\"\"\"\n
\n\n\n\"\"\")\n\n def evaluate_episode(self, agent, episode_index, episode, out):\n correct = 0\n total = 0\n agent.reset()\n uid = agent.new_user()\n lines = []\n for in_s, want_out in episode.pairs:\n delib = agent.put(uid, in_s)\n \"\"\"\n line = '%d %d %d %s ' % (\n len(delib.recognized.parses), len(delib.recognized.ssens),\n len(delib.recognized.dsens), in_s.encode('utf-8'))\n \"\"\"\n line = in_s.encode('utf-8')\n if want_out or delib.out:\n if want_out == delib.out:\n line += ' %s' % want_out\n line = '%s' % line\n correct += 1\n else:\n line += ' want %s got %s' % (want_out, delib.out)\n line = '%s' % line\n total += 1\n line += '
\\n'\n lines.append(line)\n\n self.dump_episode_head(episode_index, correct, total, out)\n\n for line in lines:\n out.write(line)\n\n self.dump_episode_foot(out)\n\n return correct, total\n\n def evaluate_task(self, agent, task, episodes_per_task, out, die_on_error):\n try:\n self.dump_task_head(out)\n if episodes_per_task is None:\n episodes = task.episodes\n else:\n episodes = task.episodes[:episodes_per_task]\n correct = 0\n total = 0\n for i, episode in enumerate(episodes):\n sub_correct, sub_total = \\\n self.evaluate_episode(agent, i, episode, out)\n correct += sub_correct\n total += sub_total\n self.dump_task_foot(out)\n return correct, total\n except:\n if die_on_error:\n raise\n return 0, 1\n\n def evaluate(self, agent, dataset, episodes_per_task=None,\n die_on_error=False):\n root = 'data/evaluation/%s/' % dataset.name\n if os.path.exists(root):\n shutil.rmtree(root)\n os.makedirs(root)\n dataset.overview()\n results = []\n for i, task in enumerate(dataset.tasks):\n fn = root + '%02d_%s.html' % (i + 1, task.name)\n with open(fn, 'wb') as out:\n correct, total = \\\n self.evaluate_task(\n agent, task, episodes_per_task, out, die_on_error)\n results.append((correct, total))\n\n fn = root + 'overview.html'\n with open(fn, 'wb') as out:\n self.dump_overview_head(out)\n out.write('')\n for i, ((correct, total), task) in \\\n enumerate(zip(results, dataset.tasks)):\n pct = 100.0 * correct / total\n task_url = '%02d_%s.html' % (i + 1, task.name)\n task_name = task.name.replace('-', ' ')\n line = \"\"\"\n \n \n \n \n \n \n \n \"\"\" % (i + 1, pct, correct, total, task_url, task_name)\n out.write(line)\n out.write('
%d.%.2f%%%d%d%s
')\n self.dump_overview_foot(out)\n","repo_name":"knighton/babi","sub_path":"panoptes/dataset/html_evaluator.py","file_name":"html_evaluator.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38547811819","text":"\"\"\"\n @Time : 4/30/21 19:35\n @Author : TaylorMei\n @Email : mhy666@mail.dlut.edu.cn\n \n @Project : iccv\n @File : rename.py\n @Function:\n \n\"\"\"\nimport os\nimport shutil\n\ninput_path = '/media/iccd/disk1/mirror/data/ylt/test/image'\noutput_path = '/home/iccd/Desktop/more_mirror_image'\n\nimages = os.listdir(input_path)\n\nfor i, image in enumerate(images):\n src = os.path.join(input_path, image)\n dst = os.path.join(output_path, image[:-4] + '_a' + image[-4:])\n\n shutil.copy(src, dst)\n\nprint('ok')","repo_name":"Mhaiyang/iccv","sub_path":"utils/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"26259456033","text":"import requests\nfrom kuangjia.data.MXshuju import shuju\nclass MX(object):\n def mingxi(self,num):\n url= \"https://mobileqa.dms.saic-gm.com/api/sit/pol4s/pol4sPartOrder/rest/pol4s/partOrder/electricInvoiceDetail\"\n payload = \"{\\r\\n \\\"pageNum\\\": 1,\\r\\n \\\"pageSize\\\": 10,\\r\\n \\\"queryTerms\\\":\\r\\n {\\r\\n \\t\\\"billingNo\\\":\\\"%s\\\"\\r\\n }\\r\\n}\"%(num)\n headers = {\n 'Content-Type': \"application/json\",\n 'x-dealer-code': \"2100001\",\n 'x-position-code': \"D_PO_1028\",\n 'x-resource-code': \"pol4s_partOrder_electricInvoiceDetail\",\n 'x-track-code': \"4320e7d0-cf0c-7ba2-b3fe-1ecb1f15e394\",\n 'x-user-code': \"dhxc1u\",\n 'x-access-token': \"d6a5abdb98fd2ee203a4ddcd7ae47d07\",\n 'User-Agent': \"PostmanRuntime/7.15.0\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"47ad19cc-6ce9-42ec-b30d-bdc22069bc77,1f7162a0-740d-44f9-9342-a17bb024e25a\",\n 'Host': \"mobileqa.dms.saic-gm.com\",\n 'cookie': \"dapp.sgm.com:qa:=d6a5abdb98fd2ee203a4ddcd7ae47d07; fdaa0f2d854071f7f82d1c80a99830bb=2d45a497bf053a6a9a23955ddef3f0bd; dapp.sgm.com:qa:=d6a5abdb98fd2ee203a4ddcd7ae47d07; a689baa2b7060531c4d0be5b10aa7055=b1100f0adf89b706031ddd7ab44c593f\",\n 'accept-encoding': \"gzip, deflate\",\n 'content-length': \"96\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n res = requests.post(url=url, headers=headers, data=payload)\n print(res.text)\n return res.json()\n# if __name__==\"__main__\":\n# shuju()\n# print(shuju())\n# for i in shuju():\n# MX().mingxi(int(i[0]))","repo_name":"Weitao1378116505/bowen1","sub_path":"python/test/kuangjia/config/mingxi.py","file_name":"mingxi.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16933505926","text":"from app.config import config\nfrom app.utils import jsonparser\nimport time, os, json, pprint, ast, csv\nimport requests as r\nimport logging\nfrom flask import current_app as app\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException, ElementClickInterceptedException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import WebDriverException\n\n\nfrom flask_restplus import Namespace, Resource, fields\nworker = Namespace('workers', description='busy busy beee beee')\n\n'''\nThis file contains the selenium worker for starting a session with the hub, and a library for actions that can be used\nMore functionality added everyday\n'''\nclass SeleniumWorkerSession():\n # Connection to de Selenium Hub, which runs in a Docker container\n def __init__(self):\n self.driver = webdriver.Remote(\n command_executor=config['selenium']['hub'],\n desired_capabilities={\n \"browserName\": \"chrome\",\n \"applicationName\": \"debug\",\n 'loggingPrefs': {'performance': 'ALL'}\n }\n )\n\n def __enter__(self):\n return self.driver\n\n def __call__(self):\n print(\"waarom doet hij dit niet\")\n self.wait(5)\n\n def __exit__(self, type, value, traceback):\n self.driver.quit()\n app.logger.info('chrome session released')\n\nclass SeleniumTaskLibrary():\n def __init__(self, driver):\n self.driver = driver\n\n def url(self, data):\n result = self.driver.get(data['input'])\n response = {\"requested_url\": data['input']}\n response.update(self.info(data))\n return response\n\n def info(self, data):\n return {\"info\":{\"title\": self.driver.title, \"current_url\":self.driver.current_url}}\n\n def source(self, data):\n return {\"source\": self.driver.page_source}\n\n def xpath(self, data):\n try:\n if data['type'] == 'text':\n elements = self.driver.find_elements_by_xpath(data['input'])\n return {data['name']: [element.text for element in elements]}\n elif data['type'] == 'urls':\n self.wait(2)\n elements = self.driver.find_elements_by_xpath(data['input'])\n response = {\"urls\": [self.grab_attribute(element, 'href') for element in elements]}\n elif data['type'] == 'click':\n button = self.driver.find_elements_by_xpath(data['input'])\n button[0].click()\n self.wait(3)\n response = {\"next_page\": self.driver.current_url}\n else:\n response = {\"exception\": \"xpath type not defined\"}\n\n except WebDriverException as err:\n app.logger.error(err, exc_info=True)\n response = {\"exception\":str(err)}\n except Exception as err:\n app.logger.error(err, exc_info=True)\n response = {\"exception\":str(err)}\n\n return response\n\n\n def grab_attribute(self, element, attribute):\n try:\n return element.get_attribute(attribute)\n except Exception as ex:\n return {\"exception\": str(ex)}\n\n\n def elements(self, atrribute):\n #elements_by\n pass\n\n def wait(self, data):\n if isinstance(data, int) == False:\n print(\"uoo\")\n sec = data['input']\n else:\n sec = data\n time.sleep(sec) ## hacky for demo\n self.driver.implicitly_wait(sec)\n return {'wait': f'Waited {sec} seconds'}\n\n def pagination(self, data):\n for _ in range(35):\n button = self.driver.find_elements_by_xpath(data['input'])\n app.logger.info(button[0])\n actions = ActionChains(self.driver)\n actions.move_to_element(button[0]).perform()\n button[0].click()\n time.sleep(5)\n return {\"response\": f\"current url: {self.driver.current_url}\"}\n\n def click(self, data):\n button = self.driver.find_elements_by_xpath(data['input'])\n button[0].click()\n time.sleep(2)\n return {\"response\": f\"current url: {self.driver.current_url}\"}\n\n def cookie(self, cookie):\n self.driver.add_cookie(cookie)\n\n def scroll_to(self):\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n def driver_performance_log(self):\n ## write hieruit halen?\n write_jsonfile(self.driver.get_log('performance'))\n print(\"written performance log to json file\")\n return self.driver.get_log('performance')\n\nclass SeleniumGroupTasksLibrary:\n def __init__(self, driver):\n self.driver = driver\n self.Task = SeleniumTaskLibrary(self.driver)\n\n def vacancy_pages(self, recipe):\n '''\n WORK IN PROGRESS\n links = [{\"id\": int, \"table_title\": \"\", \"location\": \"\", \"stuff\" }] ### Dynamic for every usefull element in tablerow?\n '''\n pages = 40\n all_links = []\n next_page = None #default if something goes wrong\n page_counter = 1\n\n try:\n for ingredient in recipe:\n if ingredient['fieldname'] == 'start_url':\n starting_page = ingredient\n\n elif ingredient['fieldname'] == 'vacancy_xc_path':\n links_path = ingredient\n\n elif ingredient['fieldname'] == 'next_page_xc_path':\n next_page = ingredient\n\n\n self.Task.url(starting_page)\n for page in range(pages):\n app.logger.info(links_path)\n get_links = self.Task.xpath(links_path)\n app.logger.info(get_links)\n if any(print(elem) in all_links for elem in get_links['urls']):\n #print(f\"all_links: {all_links}\") # testing\n #print(f\"get_links: {get_links}\") # testing\n break\n\n if 'urls' in get_links: all_links.extend(get_links['urls'])\n if next_page['input'] is not None:\n try:\n button = self.driver.find_element_by_xpath(next_page['input'])\n actions = ActionChains(self.driver)\n actions.move_to_element(button).perform()\n #body.send_keys(webdriver.common.keys.Keys.END) #hacky solution to go to bottom of the page\n #button = WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, next_page['input'])))\n #time.sleep(7)\n to_next_page = self.Task.pagination(next_page)\n page_counter += 1\n except TimeoutException as err:\n print('timeout!, last page?')\n break\n\n except ElementClickInterceptedException as err:\n app.logger.error(err, exc_info=True)\n break\n else:\n break\n\n except Exception as err:\n app.logger.error(err, exc_info=True)\n\n print(len(all_links))\n response = {'pagination': 'succes', 'data': {'links': all_links, 'number_of_links': len(all_links), 'number_of_pages': page_counter, 'root_url': starting_page['input']}}\n print(f\"name: {starting_page['input']} #urls: {response['data']['number_of_links']} pages: {page_counter}\")\n return response\n\n def table_row_info(self):\n pass\n\n\ndef SeleniumTaskHandler(message):\n type = message['event']['type']\n name = message['event']['name']\n recipes = message['recipes']\n try:\n with SeleniumWorkerSession() as browser:\n Group = SeleniumGroupTasksLibrary(browser)\n browser.implicitly_wait(5)\n if type == 'actionchain':\n results = [getattr(Group, recipe['group_id'])(recipe['ingredients']) for recipe in recipes]\n results[0]['data'].update({'name':name})\n results = results[0]['data']\n\n elif type == 'singleton':\n Task = SeleniumTaskLibrary(browser)\n results = [getattr(Task, ingredient['method'])(ingredient) for ingredient in recipes]\n ## Iets voor result bedenken wat het zijn nu losse dicts in lijst. dict comprehension mebe\n except Exception as err:\n app.logger.error(err, exc_info=True)\n results = str(err)\n finally:\n app.logger.info('shutdown')\n return {'response_selenium': {'results':results}}\n\n\n### voor snelle dev tests\n@worker.route('/test')\nclass SeleniumAPI(Resource):\n\n def get(self):\n message = {\n 'event': {'method': 'requesting_workers', 'workers':['selenium'], 'name':'google', 'module': 'singleton'}, ### can be different\n 'recipes': [\n {'method': 'url', 'name': 'url', 'data': {'input': 'https://google.nl'}},\n {'method': 'wait', 'data': 100}\n ]}\n\n response = SeleniumTaskHandler(message)\n return response\n\n\n\n'''\n\nFinding Elements:\n\n# find_element_by_id\n# find_element_by_name\n# find_element_by_xpath\n# find_element_by_link_text\n# find_element_by_partial_link_text\n# find_element_by_tag_name\n# find_element_by_class_name\n# find_element_by_css_selector”\n\n\nLooking if elements are present:\n\nelement_located_selection_state_to_be(locator, is_selected): checks whether an element is located matching a locator (see explanation below) and its selection state matches is_selected (True of False).”\nelement_located_to_be_selected(locator): checks whether an element (a WebElement object) is located matching a locator (see explanation below) and is selected.\nelement_selection_state_to_be(element, is_selected): checks whether the selection state of an element (a WebElement object) matches is_selected (True or False).\nelement_to_be_selected(element): checks whether an element (a WebElement object) is selected.\nelement_to_be_clickable(locator): checks whether an element is located matching a locator (see explanation below) and can be clicked (i.e., is enabled).\nframe_to_be_available_and_switch_to_it(locator): checks whether a frame matching a locator (see explanation below) is located and can be switched to, once found, the condition switches to this frame.\ninvisibility_of_element_located(locator): checks whether an element matching a locator (see explanation below) is invisible or not present on the page (visibility means that the element is not only displayed or has a height and width that is greater than 0).\npresence_of_all_elements_located(locator): checks whether there is at least one element present on the page matching a locator (see explanation below). If found, the condition returns a list of matching elements .\npresence_of_element_located(locator): checks whether there is at least one element present[…]\nvisibility_of(element): checks whether a present element (a WebElement object) is visible (visibility means that the element is not only displayed but also has a height and width that is greater than 0).\nvisibility_of_all_elements_located(locator): checks whether all elements matching a locator (see explanation below) are also visible. If this is the case, returns a list of matching elements.\nvisibility_of_any_elements_located(locator): checks whether any element matching a locator (see explanation below) is visible. If this is the case, returns the first visible element.\nvisibility_of_element_located(locator): checks whether the first element matching a locator (see explanation below) is also visible. If this is the case, return the element.\n\n\nWindows & url changes:\nalert_is_present: checks whether an alert is present.\nnew_window_is_opened(current_handles): checks whether a new window has opened.\nnumber_of_windows_to_be(num_windows): checks whether a specific number of windows have opened.\ntitle_contains(title): checks whether the title of the page contains the given string.\ntitle_is(title): checks whether the title of the page is equal to the given string.\nurl_changes(url): checks whether the URL is different from a given one.\nurl_contains(url): checks whether the URL contains the given one.\nurl_matches(pattern): checks whether the URL matches a given regular expression pattern.\nurl_to_be(url): checks whether the URL matches the given one .\n\n\nDropdown menus:\nselect_by_index(index): select the option at the given index.\nselect_by_value(value): select all options that have a value matching the argument.\nselect_by_visible_text(text): select all options that display text matching the argument.\nThe methods above all come with deselect_* variants as well to deselect options. The deselect_all method clears all selected entries (note that the select tag can support multiple selections).\nall_selected_options: returns a list of all selected options belonging to this select tag.\nfirst_selected_option: the first selected option in this select tag (or the currently selected option in a normal select that only allows for a single selection).\noptions: returns a list of all options belonging to this select tag.”\n\nSimulate Clicks:\nclick(on_element=None): clicks an element. If None is given, uses the current mouse position.\nclick_and_hold(on_element=None): holds down the left mouse button on an element or the current mouse position.\nrelease(on_element=None): releasing a held mouse button on an element or the current mouse position.\ncontext_click(on_element=None): performs a context click (right-click) on an element or the current mouse position.\ndouble_click(on_element=None): double-clicks an element or the current mouse position.\n\nMoving on the screen:\nmove_by_offset(xoffset, yoffset): move the mouse to an offset from current mouse position.\nmove_to_element(to_element): move the mouse to the middle of an element.\nmove_to_element_with_offset(to_element, xoffset, yoffset): move the mouse by an offset of the specified element. Offsets are relative to the top-left corner of the element.\ndrag_and_drop(source, target): holds down the left mouse button on the source element, then moves to the target element and releases the mouse button .\ndrag_and_drop_by_offset(source, xoffset, yoffset): holds down the left mouse button on the source element, then moves to the target offset and releases the mouse button.\nkey_down(value, element=None): sends a keypress only, without releasing it. Should only be used with modifier keys (i.e., Control, Alt, and Shift).\nkey_up[…]\nkey_up(value, element=None): releases a modifier key.\nsend_keys(*keys_to_send): sends keys to current focused element.\nsend_keys_to_element(element, *keys_to_send): sends keys to an element.\npause(seconds): wait for a given amount of seconds.\n\nAction chaining:\nperform(): performs all stored actions defined on the action chain. This is normally the last command you’ll give to a chain.\nreset_actions(): clears actions that are already stored on the remote end.”\n\n\n\n'''\n\n### temp THE FRANKENCODE\n#### Implement GROUP OF TASKS\ndef vacancy_parser(message):\n recipes = message['event']['recipes']\n name = message['event']['name']\n page_counter = 0\n end_of_pages = True\n all_links = []\n with SeleniumWorkerSession() as browser:\n Task = SeleniumTaskLibrary(browser)\n for recipe in recipes:\n\n if recipe['group_id'] == 'start':\n for ingredient in recipe['ingredients']:\n starter = Task.url(ingredient['data'])\n print(\"begonnen op:\", starter)\n if recipe['group_id'] == 'all_links':\n while end_of_pages:\n print(page_counter + 1)\n if 'next_page' in recipe:\n print(name)\n end_of_pages = False\n links = [getattr(Task, ingredient['method'])(ingredient['data']) for ingredient in recipe['ingredients'] if recipe['group_id'] == 'all_links']\n page_counter += 1\n if links != []:\n if any(elem in all_links for elem in links[0]['urls']):\n end_of_pages = False\n\n #print(links)\n if any('EXCEPTION' in x for x in links):\n print('EXCEPTION')\n end_of_pages = False\n\n if page_counter == 50:\n end_of_pages = False\n #print('ree', links)\n else:\n all_links.extend(links[0]['urls'])\n number_of_links = len(all_links)\n response = {'number_of_pages': page_counter, 'number_of_links': number_of_links,'links': all_links}\n response.update(starter)\n response.update({'name':name})\n return response\n","repo_name":"Yaleesa/project-Monarch","sub_path":"services/workers-app/app/worker_pool/wo_selenium.py","file_name":"wo_selenium.py","file_ext":"py","file_size_in_byte":17011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22454927170","text":"import argparse\nimport numpy as np\n\nimport core.common as common\nimport casecontrol.locusworkflow\nimport casecontrol.motifworkflow\n\n\ndef initialize_parser():\n parser = argparse.ArgumentParser(\n prog=\"casecontrol\", description=\"Case-control analysis of STR profiles\"\n )\n return parser\n\n\ndef initialize_subparsers(parser):\n subparsers = parser.add_subparsers(help=\"command help\")\n subparsers.required = True\n subparsers.dest = \"command\"\n return subparsers\n\n\ndef add_locus_command(subparsers):\n command_parser = subparsers.add_parser(\n \"locus\", help=\"Perform locus-based case-control analysis\"\n )\n\n # Add required arguments\n required_args = command_parser.add_argument_group(\"required arguments\")\n help = \"TSV file describing all STR profiles\"\n required_args.add_argument(\"--manifest\", help=help, required=True)\n\n help = \"JSON file with combined counts of anchored in-repeat reads\"\n required_args.add_argument(\"--multisample-profile\", help=help, required=True)\n\n help = \"TSV file with results of the case-control analysis\"\n required_args.add_argument(\"--output\", help=help, required=True)\n\n # Add optional arguments\n min_irr_count = 5\n help = \"Require regions to have at least this many in-repeat reads (default: {})\"\n help = help.format(min_irr_count)\n command_parser.add_argument(\n \"--min-inrepeat-reads\", help=help, default=min_irr_count, type=int\n )\n\n help = \"BED file with regions to which analysis should be restricted\"\n command_parser.add_argument(\"--target-regions\", help=help, default=None)\n\n wilcoxon_method = \"normal\"\n help = \"Method of calculating Wilcoxon Rank-Sum Test p-value (default: {})\"\n help = help.format(wilcoxon_method)\n command_parser.add_argument(\"--test-params\", help=help, default=wilcoxon_method)\n\n # TODO: Implement logic for \"resample(10000)\" parameter\n # num_resamples = 1000000\n # help = \"Number of iterations for the resampling test (default: {})\"\n # help = help.format(num_resamples)\n\n return command_parser\n\n\ndef add_motif_command(subparsers):\n command_parser = subparsers.add_parser(\n \"motif\", help=\"Perform motif-based case-control analysis\"\n )\n\n # Add required arguments\n required_args = command_parser.add_argument_group(\"required arguments\")\n help = \"TSV file describing all STR profiles\"\n required_args.add_argument(\"--manifest\", help=help, required=True)\n\n help = \"JSON file with combined counts of anchored in-repeat reads\"\n required_args.add_argument(\"--multisample-profile\", help=help, required=True)\n\n help = \"TSV file with results of the case-control analysis\"\n required_args.add_argument(\"--output\", help=help, required=True)\n\n # Add optional arguments\n min_irr_pair_count = 5\n help = \"Require at least this many in-repeat read pairs for a given motif (default: {})\"\n help = help.format(min_irr_pair_count)\n command_parser.add_argument(\n \"--min-inrepeat-read-pairs\", help=help, default=min_irr_pair_count, type=int\n )\n\n wilcoxon_method = \"normal\"\n help = \"Method of calculating Wilcoxon Rank-Sum Test p-value (default: {})\"\n help = help.format(wilcoxon_method)\n command_parser.add_argument(\"--test-params\", help=help, default=wilcoxon_method)\n\n return command_parser\n\n\ndef decode_test_params(params):\n if params == \"normal\":\n return (\"normal\",)\n elif \"permute_\" in params:\n num_perms = int(params.replace(\"permute_\", \"\"))\n return (\"permute\", num_perms)\n else:\n raise Exception(\"Unknown test parameters: {}\".format(params))\n\n\ndef run_locus_workflow(args):\n params = casecontrol.locusworkflow.Parameters(\n manifest_path=args.manifest,\n multisample_profile_path=args.multisample_profile,\n min_inrepeat_reads=args.min_inrepeat_reads,\n output_path=args.output,\n target_region_path=args.target_regions,\n test_params=decode_test_params(args.test_params),\n )\n\n casecontrol.locusworkflow.run(params)\n\n\ndef run_motif_workflow(args):\n params = casecontrol.motifworkflow.Parameters(\n manifest_path=args.manifest,\n multisample_profile_path=args.multisample_profile,\n min_inrepeat_read_pairs=args.min_inrepeat_read_pairs,\n output_path=args.output,\n test_params=decode_test_params(args.test_params),\n )\n\n casecontrol.motifworkflow.run(params)\n\n\ndef main():\n np.random.seed(42)\n common.init_logger()\n parser = initialize_parser()\n subparsers = initialize_subparsers(parser)\n locus_command_parser = add_locus_command(subparsers)\n locus_command_parser.set_defaults(run_workflow=run_locus_workflow)\n motif_command_parser = add_motif_command(subparsers)\n motif_command_parser.set_defaults(run_workflow=run_motif_workflow)\n\n args = parser.parse_args()\n args.run_workflow(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Illumina/ExpansionHunterDenovo","sub_path":"scripts/casecontrol.py","file_name":"casecontrol.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"61"} +{"seq_id":"16506546371","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport requests\nimport json\nimport datetime\nimport sys\nfrom ift6758.client import game_client, serving_client\n\n########################################\n# Partie client\n# For local testing\n#pinger = game_client.GameClient(\"./predictions\", \"127.0.0.1\", 5000)\n#serving = serving_client.ServingClient(ip=\"127.0.0.1\", port=5000)\n\n# For Docker \npinger = game_client.GameClient(\"./predictions\", \"flask\", 5000)\nserving = serving_client.ServingClient(ip=\"flask\",port=5000)\n\n\n\nglobal memoryGamet1\nglobal memoryGamet2\nglobal memoryGameID\nglobal homeOrAway\nmemoryGamet1=0\nmemoryGamet2=0\nmemoryGameID=0\nhomeOrAway=[]\n\n\n\nst.title(\"Hockey visualization App\")\n\nwith st.form(key='Form2'):\n \n with st.sidebar:\n # TODO: Add input for the sidebar\n workspace1 = st.selectbox('Workspace', ('ift6758-22-milestone-2',' '))\n Model1 = st.selectbox('Model', ('question-6-random-forest-classifier-base','xgboost-task5-model','question-6-decision-tree-classifer-base'))\n Version1 = st.selectbox('Version', ('3.0.0',' '))\n \n getmodel1 = st.form_submit_button(label='Get Models')\n pass\n\nwith st.container():\n form1 = st.form(key='GameID')\n gameid1 = form1.number_input('Enter GameID', step=None, value=2021020312)\n ping_game = form1.form_submit_button(label='Ping games')\n \n \npass\n\nif getmodel1:\n #serving.download_registry_model('ift6758-22-milestone-2', 'question-6-random-forest-classifier-base', '1.0.0')\n \n dic= serving.download_registry_model(workspace1,Model1,Version1)\n \n st.write(dic['message'])\n #st.write(\"Model \",Version1, \" Version \",Model1,\" Workspace \",workspace1,\" has been uploaded\")\n \n\n \n\n\n\n\n\nif ping_game:\n\n \n\n tag = workspace1+Model1+Version1\n data = requests.get('https://statsapi.web.nhl.com/api/v1/game/{}/feed/live/'.format(gameid1)).json()\n game=(pinger.ping(gameid1,tag))\n period1 = game[\"period\"].iloc[-1]\n s = game.iloc[-1][\"game_elapsed_time\"]\n \n timet1 = str(datetime.timedelta(seconds=int(s)))\n \n #timet1 = str(game.iloc[-1][\"game_period_seconds\"]).split(\":\")\n\n \n # print(data[\"liveData\"][\"plays\"][\"allPlays\"])\n # period_no = data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"period\"]\n # periodTimeRemaining = data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"periodTimeRemaining\"]\n # # td_home = data[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"teamStats\"][\"teamSkaterStats\"]\n # home = data[\"gameData\"][\"teams\"][\"home\"][\"name\"]\n # away = data[\"gameData\"][\"teams\"][\"away\"][\"name\"]\n # st.subheader(f'Game {gameid1}: {away} @ {home}')\n # st.write(f'Period {period_no} - {periodTimeRemaining} left')\n # actual_goals = {\n # 'home': data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"goals\"][\"home\"],\n # 'away': data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"goals\"][\"away\"],\n # }\n \n timeLeftt1 = data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"periodTimeRemaining\"]\n\n # get Team Names\n team1 = data[\"gameData\"][\"teams\"][\"home\"][\"name\"]\n team2 = data[\"gameData\"][\"teams\"][\"away\"][\"name\"]\n\n # Separate dataframes for each team\n gamet1= game[game['team_name']==team1]\n gamet2= game[game['team_name']==team2]\n \n #Set up memory game and ID\n if type(memoryGameID)==type(0):\n #if no memory yet, set memoryGameID\n memoryGameID=str(gameid1)\n if type(memoryGamet1)==type(0):\n #if no memory yet, set memoryGame\n memoryGamet1=gamet1\n memoryGamet2=gamet2\n else:\n if memoryGameID!=str(gameid1):\n #if the memoryGameID does not correspond to the input gameID, it means that\n #we are dealing with a new game. \n memoryGamet1=gamet1\n memoryGamet2=gamet2\n memoryGameID=str(gameid1)\n else:\n #This is if the new events belong to the same game\n memoryGamet1.append(gamet1,ignore_index=True)\n memoryGamet2.append(gamet2,ignore_index=True)\n\n\n \n \n st.subheader(f'Game {gameid1}: {team1} @ {team2}')\n st.write(f'Period {period1} - {timeLeftt1} left')\n actual_goals = {\n 'home': data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"goals\"][\"home\"],\n 'away': data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"goals\"][\"away\"],\n }\n \n # Corresponding XG for each teams\n XGt1 = gamet1[['goal_probability']]\n \n \n #XGt1 = XGt1.drop_duplicates(subset='goal_probability', keep='first')\n\n XGt2 = gamet2[['goal_probability']]\n #XGt2 = XGt2.drop_duplicates(subset='goal_probability', keep='first')\n # Sum\n sumt1 = float(XGt1.sum())\n sumt2 = float(XGt2.sum())\n \n # Dataframe with only goals for each team\n Gt1 = gamet1[gamet1['event_type']=='GOAL']\n \n #Gt1 = Gt1.drop_duplicates(subset='event_id', keep='first')\n\n Gt2 = gamet2[gamet2['event_type']=='GOAL']\n \n #Gt2 = Gt2.drop_duplicates(subset='event_id', keep='first')\n # Sum of goals for each team\n goalst1 = int(len(Gt1.index))\n goalst2 = int(len(Gt2.index))\n\n delta1 = sumt1 - goalst1\n delta2 = sumt2 - goalst2\n \n\n # st.subheader('Features data and predictions')\n # col1, col2 = st.columns(2)\n # with col1:\n # st.metric(\n # label=f\"{home} - Expected Goals (actual)\",\n # value=f\"{pred_goals['home']} ({actual_goals['home']})\",\n # delta=pred_goals['home']-actual_goals['home']\n # )\n # with col2:\n # st.metric(\n # label=f\"{away} - Expected Goals (actual)\",\n # value=f\"{pred_goals['away']} ({actual_goals['away']})\",\n # delta=pred_goals['away']-actual_goals['away']\n # )\n\n # Probleme: les stats ne se reinstalise pas apres chaque ping. \n st.subheader('Features data and predictions')\n col1, col2 = st.columns(2)\n col1.metric(\n label=f\"{team1} - Expected Goals (actual)\",\n value=f\"{'%.2f' % sumt1} ({goalst1})\",\n delta='%.2f' % delta1\n )\n with col2:\n st.metric(\n label=f\"{team2} - Expected Goals (actual)\",\n value=f\"{'%.2f' % sumt2} ({goalst2})\",\n delta='%.2f' % delta2\n )\n #events = pd.DataFrame(\n # [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],\n # columns=['feature0', 'feature1', 'feature2', 'feature3', 'feature4', 'prediction']\n #)\n #pred_goals = {\n # 'home': sum([0.33, 0.45, 0.11, 0.89]),\n # 'away': sum([0.63, 0.95, 0.31, 0.79]),\n #}\n \n\n #st.table(data=events)\n \n #game = game.drop_duplicates(subset='event_id', keep='first')\n df1 = game[['coordinates_x', 'coordinates_y', 'period', 'game_elapsed_time', 'shot_distance', 'shot_angle', 'hand_based_shot_angle', 'empty_net', 'last_coordinates_x', 'last_coordinates_y', 'time_since_last_event', 'distance_from_last_event', 'rebond', 'speed_from_last_event', 'shot_angle_change', 'ShotType_Backhand', 'ShotType_Deflected', 'ShotType_Slap Shot', 'ShotType_Snap Shot', 'ShotType_Tip-In', 'ShotType_Wrap-around', 'ShotType_Wrist Shot','goal_probability']]\n st.dataframe(df1)\n\n\n\n\n#with st.container():\n# form = st.form(key='GameID')\n# gameid = form.number_input('Enter GameID', step=None, value=0)\n# submit_button = form.form_submit_button(label='Ping game')\n# # gameid = st.text_input('GameID')\n#\n# if submit_button:\n# if st.button('Ping game'):\n#\n# data = requests.get('https://statsapi.web.nhl.com/api/v1/game/{}/feed/live/'.format(gameid)).json()\n# print(data[\"liveData\"][\"plays\"][\"allPlays\"])\n# period_no = data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"period\"]\n# periodTimeRemaining = data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"periodTimeRemaining\"]\n# # td_home = data[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"teamStats\"][\"teamSkaterStats\"]\n# home = data[\"gameData\"][\"teams\"][\"home\"][\"name\"]\n# away = data[\"gameData\"][\"teams\"][\"away\"][\"name\"]\n# st.subheader(f'Game {gameid}: {away} @ {home}')\n# st.write(f'Period {period_no} - {periodTimeRemaining} left')\n# actual_goals = {\n# 'home': data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"goals\"][\"home\"],\n# 'away': data[\"liveData\"][\"plays\"][\"allPlays\"][-1][\"about\"][\"goals\"][\"away\"],\n# }\n#\n# # TODO get data from client API\n# # - Predictions\n# # - Features\n#\n","repo_name":"kaborewelisee/Project-6758","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":8985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73292312195","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QComboBox\n\nclass Scrollbar(QWidget):\n def __init__(self, filename):\n QWidget.__init__(self)\n self.file = open(filename, 'r')\n self.data = []\n\n def init(self):\n self.setGeometry(300, 300, 300, 250)\n self.setStyleSheet('background-color:white;')\n self.setWindowTitle('Scrollbar Text Reader')\n\n self.output = QLineEdit(self)\n self.output.setText('1')\n self.output.move(20, 120)\n self.output.setReadOnly(True)\n\n self.input = QComboBox(self)\n self.input.move(20, 20)\n for line in self.file:\n self.data.append(line.split())\n self.input.addItem(line.split()[0])\n \n self.input.currentIndexChanged.connect(self.select)\n\n self.show()\n\n def select(self):\n a = self.input.currentText()\n b = ''\n for i in self.data:\n if i[0] == a:\n b = i[1]\n break\n self.output.setText(b)\n\n\napp = QApplication(sys.argv)\n\nw = Scrollbar('WUP Task 3 Text.txt')\nw.init()\n\nsys.exit(app.exec_())\n","repo_name":"DarrenChiang/Python-2016","sub_path":"GUI Files/WUP Task 3.py","file_name":"WUP Task 3.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16316245075","text":"import os\nfrom pathlib import Path\n\n\ndef running_in_docker():\n path = '/proc/self/cgroup'\n return os.path.exists('/.dockerenv') or os.path.isfile(path) and any('docker' in line for line in open(path))\n\n\ntest_src_dir = Path(r'/cfs/home/s/t/stiebesi/code/tests/test_data')\ntorch_datasets = test_src_dir / \"TestSaveDatasetsTorch\" / \"unix\"\nif running_in_docker():\n test_out_dir = Path('/cache')\n# else:\n# test_out_dir = Path(f'/cfs/share/cache/output_johanmay/tests')\n\nif os.name == \"nt\":\n test_out_dir = Path(r'C:\\Users\\stiebesi\\CACHE\\test_output')\n test_src_dir = Path(r'X:\\s\\t\\stiebesi\\code\\tests\\test_data')\n torch_datasets = test_src_dir / \"TestSaveDatasetsTorch\" / \"win\"\n\ntest_training_src_dir = test_src_dir\ntest_pipeline_dir = test_src_dir\n\ntest_eval_dir = test_src_dir / '2019-12-12_20-27-20_eff_net_cleaned_data'\ntest_checkpoint = test_src_dir / '2019-12-12_20-27-20_eff_net_cleaned_data' / \"checkpoint.pth\"\n\ndata_loader_img_file = test_src_dir / '2019-06-05_15-30-52_0_RESULT.erfh5'\n\ntest_eval_output_path = test_out_dir / 'eval'\ntest_save_dataset_path = test_out_dir / 'dataset'\ntest_training_out_dir = test_out_dir / 'training'\ntest_training_datasets_dir = test_out_dir / 'datasets'\n\ntest_split_path = test_src_dir / '2019-09-06_15-44-58_63_sensors'\n\ntest_caching_dir = test_out_dir / 'erfh5_pipeline' / 'caching'\n\ntest_useless_file = test_src_dir / \"2019-07-23_15-38-08_5000p_0/2019-07-23_15-38-08_0_RESULT.erfh5\"\n\n\nif __name__ == \"__main__\":\n print(running_in_docker())\n","repo_name":"isse-augsburg/rtm-predictions","sub_path":"Resources/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"41095473155","text":"from sys import stdin\nfrom collections import defaultdict\nimport sys\nsys.setrecursionlimit(100000)\n\nN, Q = list(map(int,stdin.readline().split()))\n\nlst=[]\nfor _ in range(2**N):\n lst.append(list(map(int,stdin.readline().split())))\n\nL_list = list(map(int,stdin.readline().split()))\n\n\n# 파이어스톰을 시전\nfor L in L_list:\n lst2 = [[0] * (2 ** N) for _ in range(2 ** N)] # 회전후의 얼음판을 담음\n if L>0:\n # 2**L x 2**L 부분격자로 나누고 90도 회전\n part_len=2**L\n row, col = 0, 0\n\n while(1):\n if row+part_len>2**N:\n break\n\n for i in range(row,row+part_len):\n for j in range(col,col+part_len):\n #print(i,j,' / ',j+row-col,row+part_len+col-1-i)\n lst2[j+row-col][row+part_len+col-1-i] = lst[i][j]\n\n col+=part_len\n if col>=2**N:\n col=0\n row+=part_len\n\n\n else: # L == 0 이면 회전하지 않음 (1x1부분격자이므로)\n for i in range(2**N):\n for j in range(2**N):\n lst2[i][j]=lst[i][j]\n\n lst3=[[0]*(2**N) for _ in range(2**N)]\n\n # 얼음이 있는칸 3개이상과 인접하지않은 칸에 얼음양 -1\n direction=[(1,0),(-1,0),(0,1),(0,-1)]\n\n for i in range(2**N):\n for j in range(2**N):\n if lst2[i][j]>0:\n cnt=0\n for d in direction:\n new_x=i+d[0]\n new_y=j+d[1]\n if new_x<0 or new_x>=2**N or new_y<0 or new_y>=2**N or lst2[new_x][new_y]<=0:\n continue\n cnt+=1\n\n if cnt<3:\n lst3[i][j]=lst2[i][j]-1\n else:\n lst3[i][j]=lst2[i][j]\n\n lst=[[0]*(2**N) for _ in range(2**N)]\n for i in range(2**N):\n for j in range(2**N):\n lst[i][j]=lst3[i][j]\n\n\n# 칸에서 남아있는 얼음의 합 구하기\nice_sum=0\nfor x in lst:\n ice_sum += sum(x)\n\nprint(ice_sum)\n\n# 남아있는 얼음 중 가장 큰 덩어리찾기 (dfs)\nd=defaultdict(int)\nvisit=[[0]*(2**N) for _ in range(2**N)]\n\n\ndef find_dungeori(i,j,d_num):\n if i<0 or i>=2**N or j<0 or j>=2**N or lst[i][j]<=0 or visit[i][j]==1:\n return\n\n visit[i][j]=1\n d[d_num]+=1 # 각 덩어리의 번호 : d_num\n find_dungeori(i + 1, j, d_num)\n find_dungeori(i - 1, j, d_num)\n find_dungeori(i, j+1, d_num)\n find_dungeori(i, j-1, d_num)\n\n\nd_num=0\nfor i in range(2**N):\n for j in range(2**N):\n if visit[i][j]==1 or lst[i][j]<=0:\n continue\n\n d[d_num]=0\n find_dungeori(i,j,d_num)\n d_num+=1\n\n\n# 가장 큰 덩어리의 칸개수 계산\nif d.values():\n biggest_kan = sorted(d.values(), reverse=True)[0] # value를 기준으로 sort\n print(biggest_kan)\n\nelse:\n print(0)\n","repo_name":"kimseojeong6533/CodingTest_python","sub_path":"백준_마법사상어와파이어스톰.py","file_name":"백준_마법사상어와파이어스톰.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28657408114","text":"\"\"\"Generalized Python logging for Pyodide.\"\"\"\n\nimport os, sys, threading, time, logging\nfrom functools import partial\nfrom typing import Any\nfrom js import console\n\n# fixme: This is a little strange, global objects only work inside lists or dicts (Pyodide-related)\nloggers = []\n\n\nclass FlareLogRecord(logging.LogRecord):\n \"\"\"A LogRecord instance represents an event being logged.\n\n LogRecord instances are created every time something is logged. They\n contain all the information pertinent to the event being logged. The\n main information passed in is in msg and args, which are combined\n using str(msg) % args to create the message field of the record. The\n record also includes information such as when the record was created,\n the source line where the logging call was made, and any exception\n information to be logged.\n\n NOTE: This is mostly the same as the original LogRecord. Differences:\n\n * Do not use a single dict as keyword args because pyodites' Proxy objects cannot be used\n with isinstance(proxy, collections.abc.Mapping). This will be discussed upstream.\n * User-supplied arguments to logging messages will not be replaced in message, but will be forwarded\n to js console via separate arguments.\n \"\"\"\n\n def __init__(\n self,\n name,\n level,\n pathname,\n lineno,\n msg,\n args,\n exc_info,\n func=None,\n sinfo=None,\n mergeArgs=False,\n **kwargs\n ):\n \"\"\"Initialize a logging record with interesting information.\"\"\"\n ct = time.time()\n self.name = name\n self.msg = msg\n #\n # The following statement allows passing of a dictionary as a sole\n # argument, so that you can do something like\n # logging.debug(\"a %(a)d b %(b)s\", {'a':1, 'b':2})\n # Suggested by Stefan Behnel.\n # Note that without the test for args[0], we get a problem because\n # during formatting, we test to see if the arg is present using\n # 'if self.args:'. If the event being logged is e.g. 'Value is %d'\n # and if the passed arg fails 'if self.args:' then no formatting\n # is done. For example, logger.warning('Value is %d', 0) would log\n # 'Value is %d' instead of 'Value is 0'.\n # For the use case of passing a dictionary, this should not be a\n # problem.\n # Issue #21172: a request was made to relax the isinstance check\n # to hasattr(args[0], '__getitem__'). However, the docs on string\n # formatting still seem to suggest a mapping object is required.\n # Thus, while not removing the isinstance check, it does now look\n # for collections.abc.Mapping rather than, as before, dict.\n self.args = args\n self.levelname = logging.getLevelName(level)\n self.levelno = level\n self.pathname = pathname\n try:\n self.filename = os.path.basename(pathname)\n self.module = os.path.splitext(self.filename)[0]\n except (TypeError, ValueError, AttributeError):\n self.filename = pathname\n self.module = \"Unknown module\"\n self.exc_info = exc_info\n self.exc_text = None # used to cache the traceback text\n self.stack_info = sinfo\n self.lineno = lineno\n self.funcName = func\n self.created = ct\n self.msecs = (ct - int(ct)) * 1000\n self.relativeCreated = (self.created - logging._startTime) * 1000\n self.mergeArgs = mergeArgs\n if logging.logThreads:\n self.thread = threading.get_ident()\n self.threadName = threading.current_thread().name\n else: # pragma: no cover\n self.thread = None\n self.threadName = None\n if not logging.logMultiprocessing: # pragma: no cover\n self.processName = None\n else:\n self.processName = \"MainProcess\"\n mp = sys.modules.get(\"multiprocessing\")\n if mp is not None:\n # Errors may occur if multiprocessing has not finished loading\n # yet - e.g. if a custom import hook causes third-party code\n # to run when multiprocessing calls import. See issue 8200\n # for an example\n try:\n self.processName = mp.current_process().name\n except Exception: # pragma: no cover\n pass\n if logging.logProcesses and hasattr(os, \"getpid\"):\n self.process = os.getpid()\n else:\n self.process = None\n\n def getMessage(self) -> str:\n \"\"\"Optionally merge args into message driven by mergeArgs flag in ctor, otherwise this will happen later in js console as objects.\n\n :return:\n \"\"\"\n if self.mergeArgs:\n return super().getMessage()\n return self.msg\n\n\nclass JSConsoleHandler(logging.StreamHandler):\n \"\"\"Brings our awesome log messages onto the js console.\"\"\"\n\n def emit(self, record: logging.LogRecord) -> None:\n msg = self.format(record)\n if record.levelno == logging.DEBUG:\n console.debug(msg, *record.args)\n elif record.levelno == logging.INFO:\n console.info(msg, *record.args)\n elif record.levelno == logging.WARNING:\n console.warn(msg, *record.args)\n elif record.levelno == logging.ERROR:\n if record.exc_info:\n console.error(msg, *record.args)\n else:\n myargs = [\"color: red; font-weight: bold;\"]\n myargs.extend(record.args)\n msg = \"%c{0}\".format(msg)\n console.log(msg, *myargs)\n elif record.levelno == logging.CRITICAL:\n console.error(msg, *record.args)\n else:\n console.log(\"dont know which level\", record.msg, *record.args)\n\n\ndef prepareLogger(level: str, mergeArgs: bool = False) -> None:\n \"\"\"Call this before first usage of logging or getLogger().\n\n :param level Log level as str as of all, info, debug, warning, error or critical\n :param mergeArgs: If True we're merging args into resulting message resulting in\n possible duplicated output or get the 'raw' message output if False.\n \"\"\"\n if loggers:\n return\n\n if level == \"all\":\n level = logging.NOTSET\n elif level == \"info\":\n level = logging.INFO\n elif level == \"debug\":\n level = logging.DEBUG\n elif level == \"warning\":\n level = logging.WARNING\n elif level == \"error\":\n level = logging.ERROR\n elif level == \"critical\":\n level = logging.CRITICAL\n else:\n level = logging.DEBUG\n\n logging.setLogRecordFactory(partial(FlareLogRecord, mergeArgs=mergeArgs))\n logger = logging.getLogger()\n logger.setLevel(level)\n ch = JSConsoleHandler()\n ch.setLevel(level)\n formatter = logging.Formatter(\n \"%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s\"\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n loggers.append(logger)\n\n\ndef getLogger(name: str) -> Any:\n \"\"\"Creates a child logger of our 'root' logger with a name.\n\n Usually it's the __name__ attribute of the module you want to use a logger for.\n\n :param name:\n :return:\n \"\"\"\n return loggers[0].getChild(name)\n","repo_name":"viur-framework/flare","sub_path":"flare/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"28327563072","text":"#!/usr/bin/python3\n\"\"\"\n Module that contain a sqlalchemy declarative\n\"\"\"\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\nclass State(Base):\n \"\"\"\n class state inheriting from Base declarative\n \"\"\"\n __tablename__ = 'states'\n id = Column(Integer, primary_key=True, nullable=False,\n autoincrement=True)\n name = Column(String(128), nullable=False)\n","repo_name":"Thapelo-ST/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/model_state.py","file_name":"model_state.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16890627262","text":"# importing os library\nimport os\n# importing datetime library\nimport datetime\n# application module\ndef Application():\n os.system('clear')\n running = True\n today = datetime.datetime.now()\n while running != False:\n option = int(input('{}\\nEscolha uma opção em seguida pressione enter para continuar.\\nEditar (1)\\nCalcular idade (2)\\nSair (0)\\n:'.format(Date())))\n if option > 2 or option < 0:\n print('\\nErro: USB 04')\n running = False\n elif option == 0:\n running = False\n elif option == 2:\n running = False\n print('\\nVocê tem {} anos.'.format(int(today.strftime('%Y')) - year))\n else:\n os.system('clear')\n# get date function\ndef Date():\n global year\n monthdict = {\n 1:'Janeiro',\n 2:'Fevereiro',\n 3:'Março',\n 4:'Abril',\n 5:'Maio',\n 6:'Junho',\n 7:'Julho',\n 8:'Agosto',\n 9:'Setembro',\n 10:'Outubro',\n 11:'Novembro',\n 12:'Dezembro'\n }\n year = int(input('Digite o ano que você nasceu.\\nEx: 1996: '))\n if year > 9999 or year < 0:\n return \"Erro: USB 01\\n\"\n else:\n month = int(input('\\nDigite o mês que você nasceu.\\nEx: 10: '))\n if month > 12 or month < 1:\n return 'Erro: USB 02\\n'\n else:\n day = int(input('\\nDigite o dia que você nasceu.\\nEx: 21: '))\n if day > 31 or day < 1:\n return 'Erro: USB 03\\n'\n else:\n return \"\\nVocê nasceu no dia {} de {} de {}.\".format(day, monthdict[month], year)\n# application start\nApplication()\n","repo_name":"mtvlc/Python_CursoEmVideo","sub_path":"World1/Challenge002/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32263409737","text":"from random import randint\nfrom sys import exit\n\nclass Intro:\n def __init__(self):\n print(\"ยินดีต้อนรับสู่เกมเดา!\\n\")\n self.username()\n\n def username(self):\n try:\n print(\"กรุณาบอกชื่อของคุณ!\")\n self.username = input('> ')\n print(\"\\n\")\n #return self.username\n except KeyboardInterrupt:\n print(\"\\nบายยย\")\n exit()\n except Exception as e:\n print(e)\n\nclass Start(Intro):\n def proceed(self):\n try:\n print(f\"คุณพร้อมหรือยัง, {self.username}?\")\n self.user_input = input(\"[Y/N]> \")\n if self.user_input == 'Y' or self.user_input == 'y':\n print(\"\\nเยี่ยม คุณพร้อมแล้ว!\\n\")\n return True\n elif self.user_input =='N' or self.user_input == 'n':\n print(\"ทำไมคุณถึงอยู่ที่นี่?\")\n exit()\n else:\n print(\"\\nเมื่อกี้คืออะไร?\")\n raise ValueError\n except KeyboardInterrupt:\n print(\"\\nไว้เจอกันใหม่ภายหลัง!\")\n exit()\n except ValueError:\n print(\"\\nโปรดเรียน��ู้ที่จะพิมพ์!\")\n self.proceed()\n \n\nclass Finish:\n def victory_message(self, number, guess):\n self.number = number\n self.guess = guess\n print(\"มันสุดยอดมาก!!!\\n\")\n print(f\"เดาของคุณคือ {self.guess}.\")\n print(f\"หมายเลขลับคือ {self.number}.\")\n exit()\n\n def defeat_message(self, number, guess):\n self.number = number\n self.guess = guess\n print(\"แย่มาก. เกือบได้แล้ว.\\n\")\n print(f\"เดาของคุณคือ {self.guess}.\")\n print(f\"หมายเลขลับคือ {self.number}.\")\n print(\"\\nลองอีกครั้งในภายหลัง!\")\n exit()\n\nclass In_game(Finish):\n def __init__(self):\n print(\"\\n\\\"กฎของเกม\\\"\\n\")\n print(\"- คุณมีโอกาส 5 ครั้งในการเดา\")\n print(\"- คุณจะได้รับคำใบ้สำหรับการเดาผิดทุกครั้ง\")\n print(\"- ถ้าเดาถูกก็ชนะ ถ้าไม่อย่างนั้นก็ขาดทุน\")\n print(\"- เกมจะจบลงเมื่อคุณเดาหมายเลขหรือหมดโอกาส\")\n \n self.chances = 5\n self.number = randint(1,10)\n self.play()\n\n def play(self):\n try:\n while self.chances !=0:\n self.guess = int(input(\"เดาตัวเลขสิ : \"))\n # print(self.number)\n self.chances -= 1\n if self.guess == self.number:\n self.victory_message(self.number, self.guess)\n elif self.guess < self.number:\n if self.number - self.guess >= 3:\n print(\"\\nตัวเลขน้อยเกินไป!\")\n else:\n print(\"เกือบแล้ว! ใหญ่กว่านี้!\") \n elif self.guess > self.number:\n if self.guess - self.number >= 3:\n print(\"\\nตัวเลขใหญ่เกินไป ลดลงๆ\")\n else:\n print(\"เฉียดฉิว! เล็กลงหน่อย!\")\n else:\n print(\"เมื่อกี้คืออะไร? เราก็นับว่าเป็นโอกาสนะ!\")\n print(f\"คุณยังมี {self.chances} โอกาส\\n\")\n self.defeat_message(self.number, self.guess)\n exit()\n except KeyboardInterrupt:\n print(\"\\nอืม อย่าเพิ่งยอมแพ้แบบนั้น!\")\n print(\"แล้วเจอกันนะ!\")\n exit()\n except ValueError:\n print(\"1-10 เท่านั้น! เราก็นับว่าเป็นโอกาส!\")\n self.chances -= 1\n print(f\"คุณยังมี {self.chances} โอกาส\\n\")\n self.play()\n\n\ndef main():\n # intro = Intro()\n try:\n start = Start()\n tmp = start.proceed()\n\n if tmp == True:\n finish = In_game()\n except KeyboardInterrupt:\n print(\"แล้วเจอกันนะ!!\")\n except Exception as e:\n print(e)\n finally :\n print(\"\\n\\nCreated By Super mari0\")\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"methirat/GuessGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"th","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"8951893942","text":"from cassandra.cluster import Cluster\nfrom cassandra.query import BatchStatement\nfrom cassandra.query import SimpleStatement\nimport json\n\ncluster = Cluster(['127.0.0.1'])\nsession = cluster.connect()\n\nsession.execute(\n \"\"\"\n CREATE KEYSPACE IF NOT EXISTS twitter_data WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1 };\n \"\"\"\n)\n\nsession.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS twitter_data.tweets ( user_id bigint, user_name text, tweet text, location text, PRIMARY KEY ((user_id), user_name));\n \"\"\"\n)\n\nclass ImportToCassandra:\n def process_tweet_list(self, list):\n \n batch = BatchStatement()\n \n for data in (list):\n try:\n data = json.loads(data)\n user_id = data['user']['id']\n user_name = data['user']['screen_name']\n tweet = data['text']\n location = data['user']['location']\n batch.add(SimpleStatement(\"INSERT INTO twitter_data.tweets (user_id, user_name, tweet, location) VALUES (%s, %s, %s, %s)\"), (user_id, user_name, tweet, location)) \n except:\n pass\n \n session.execute(batch) \n\n\n \n \n \n \n \n","repo_name":"joeljacobson/cassandra_tweet_stream","sub_path":"import_to_cassandra.py","file_name":"import_to_cassandra.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70568163394","text":"from django.db import models\r\nfrom django.db.models.signals import post_delete\r\nfrom django.conf import settings\r\nfrom django.dispatch import receiver\r\nfrom review.models import Review\r\nfrom django.db.models import Avg, Q\r\n\r\ndef upload_location(instance, filename, **kwargs):\r\n\tfile_path = 'comic/{id}'.format(id=str(instance.pk), filename=filename)\r\n\treturn file_path\r\n\r\nclass Comic(models.Model):\r\n\talt_id = models.CharField(max_length=100, null=True, blank=True)\r\n\tdiamond_id = models.CharField(max_length=100, null=False, blank=True)\r\n\ttitle = models.CharField(max_length=100, null=False, blank=False)\r\n\titem_number = models.CharField(max_length=50, blank=True, null=False)\r\n\trelease_date = models.DateTimeField(null=False, blank=False)\r\n\tcover_price = models.DecimalField(max_digits=10, decimal_places=2, blank=False)\r\n\tcover = models.ImageField(upload_to=upload_location, null=False, blank=True)\r\n\tdescription = models.TextField(null=False, blank=True, default=\"No Description\")\r\n\tpage_count = models.PositiveSmallIntegerField(null=True, blank=True)\r\n\tpublisher = models.ForeignKey(\"publisher.Publisher\", null=False, blank=False, on_delete=models.CASCADE)\r\n\tseries = models.ForeignKey(\"series.Series\", blank=False, null=False, on_delete=models.CASCADE)\r\n\timprint = models.ForeignKey(\"publisher.Imprint\", null=True, blank=True, on_delete=models.CASCADE)\r\n\tcreators = models.ManyToManyField(\"creator.Creator\", blank=True, through=\"creator.ComicCreator\")\r\n\tbarcode = models.CharField(max_length=200, null=False, blank=True)\r\n\tprinting = models.PositiveSmallIntegerField(default=1, null=False, blank=True)\r\n\tformat_type = models.CharField(max_length=50, null=False, blank=True)\r\n\tdate_added = models.DateTimeField(auto_now_add=True)\r\n\tdate_updated = models.DateTimeField(auto_now=True)\r\n\tsolicit_date = models.DateTimeField(null=True, blank=True)\r\n\tis_mature = models.BooleanField(default=False, blank=True, null=False)\r\n\tis_standard_issue = models.BooleanField(default=False, blank=True, null=False)\r\n\tversion_of = models.ForeignKey(\"self\", blank=True, null=True, on_delete=models.CASCADE)\r\n\tversions = models.PositiveSmallIntegerField(default=0, blank=True, null=False)\r\n\tvariant_code = models.CharField(max_length=100, null=False, blank=True)\r\n\ttotal_wanted = models.PositiveIntegerField(default=0, blank=True, null=False)\r\n\ttotal_favorited = models.PositiveIntegerField(default=0, blank=True, null=False)\r\n\ttotal_owned = models.PositiveIntegerField(default=0, blank=True, null=False)\r\n\ttotal_read = models.PositiveIntegerField(default=0, blank=True, null=False)\r\n\r\n\tdef get_avg_rating(self):\r\n\t\treviews = Review.objects.filter(comic__id=self.id)\r\n\t\tif reviews:\r\n\t\t\treturn reviews.aggregate(average=Avg(\"rating\"))[\"average\"]\r\n\t\telse:\r\n\t\t\treturn \"No Ratings\"\r\n\r\n\tdef get_number_of_reviews(self):\r\n\t\treturn Review.objects.filter(Q(comic__id=self.id) & ~Q(title=\"\")).count()\r\n\r\n\tdef save(self, *args, **kwargs):\r\n\t\tif self.pk is None:\r\n\t\t\tsaved_cover = self.cover\r\n\t\t\tself.cover = None\r\n\t\t\tsuper(Comic, self).save(*args, **kwargs)\r\n\t\t\tself.cover = saved_cover\r\n\r\n\t\tsuper(Comic, self).save(*args, **kwargs)\r\n\r\n@receiver(post_delete, sender=Comic)\r\ndef submission_delete(sender, instance, **kwargs):\r\n\tinstance.cover.delete(False)\r\n","repo_name":"gchartier/compendia-backend","sub_path":"comic/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72635811394","text":"import pandas as pd\nimport numpy as np\nimport argparse\nfrom tqdm.auto import tqdm\n\nfrom hcrot import layers, dataset, optim\n\nclass Model(layers.Module):\n def __init__(self, input_len=28*28, hidden=512, num_classes=10):\n super().__init__()\n self.net1 = layers.Sequential(\n layers.Linear(in_features=input_len, out_features=hidden),\n layers.Sigmoid()\n )\n self.dropout = layers.Dropout(p=0.3)\n self.net2 = layers.Sequential(\n layers.Linear(in_features=hidden, out_features=hidden),\n layers.Sigmoid()\n )\n self.fc = layers.Linear(in_features=hidden, out_features=num_classes)\n \n def forward(self, x):\n o = self.dropout(self.net1(x))\n return self.fc(self.net2(o))\n\ndef train(args):\n model = Model(input_len=28*28, hidden=args.hidden_size, num_classes=10)\n criterion = layers.CrossEntropyLoss()\n optimizer = optim.Adam(model, args.lr_rate)\n\n for epoch in range(args.epochs):\n loss_, correct = 0, 0\n \n # train\n model.train()\n for x, y in tqdm(dataloader):\n pred = model.forward(x)\n loss = criterion(pred,y)\n dz = criterion.backward()\n optimizer.update(dz)\n loss_ += loss\n \n # test\n model.eval()\n for x, y in tqdm(testloader):\n pred = model.forward(x)\n correct += np.sum(np.argmax(pred,axis=1)==y)\n\n print(f'epoch = [{epoch+1}] | loss = {loss_/len(dataloader)} | ACC = {correct/(len(testloader)*len(y))}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--lr_rate', default=1e-2, type=float, help='Learning Rate')\n parser.add_argument('--hidden_size', default=28, type=int, help='Hidden Layer size')\n parser.add_argument('--epochs', default=10, type=int, help='Epochs')\n\n df = pd.read_csv('./datasets/mnist_test.csv')\n label = df['7'].to_numpy()\n df = df.drop('7',axis=1)\n dat = df.to_numpy()\n\n train_image, test_image = dat[:5000], dat[8001:9001]\n train_label, test_label = label[:5000], label[8001:9001]\n train_image = train_image.astype(np.float32)\n test_image = test_image.astype(np.float32)\n\n for i in range(len(train_image)): train_image[i] /= 255.0\n for i in range(len(test_image)): test_image[i] /= 255.0\n\n dataloader = dataset.Dataloader(train_image, train_label, batch_size=50, shuffle=True)\n testloader = dataset.Dataloader(test_image, test_label, batch_size=10, shuffle=False)\n\n args = parser.parse_args()\n train(args)\n","repo_name":"emeraldgoose/hcrot","sub_path":"mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26572889455","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\nimport platform\nimport subprocess\nimport time\n\n\ndef exec_command(cmd, log_path='out/build.log', **kwargs):\n with open(log_path, 'at') as f:\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n **kwargs)\n for line in iter(process.stdout.readline, ''):\n sys.stdout.write(line)\n f.write(line)\n\n process.wait()\n ret_code = process.returncode\n\n if ret_code != 0:\n with open(log_path, 'at') as f:\n for line in iter(process.stderr.readline, ''):\n sys.stdout.write(line)\n f.write(line)\n print('you can check build log in {}'.format(log_path))\n raise Exception(\"{} failed, return code is {}\".format(cmd, ret_code))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help='action, build or clean path.',\n nargs='*')\n parser.add_argument('-v', '--build_type', help='Release or debug version.',\n nargs='*')\n args = parser.parse_args()\n\n product_path = './out/'\n log_path = os.path.join('./', 'build.log')\n\n args.action = ['build'] if not args.action else args.action\n\n if args.action[0] == 'build':\n gn_cmd = ''\n ninja_cmd = ''\n print(\"\\n=== start build ===\\n\")\n if platform.system().find('Windows') == 0:\n gn_cmd = ['gn.exe', 'gen', product_path, '--root=.',\n '--dotfile=./.gn']\n\n ninja_cmd = ['ninja.exe',\n '-C', product_path]\n else:\n gn_cmd = ['gn', 'gen', product_path, '--root=.',\n '--dotfile=./.gn']\n if args.build_type == 'debug':\n gn_cmd += ['--args=build_type=\\\"debug\\\"']\n\n ninja_cmd = ['ninja', '-C', product_path]\n print(\"=== gn working ===\\n\")\n exec_command(gn_cmd, log_path)\n time.sleep(2)\n print(\"\\n=== ninja working ===\")\n exec_command(ninja_cmd, log_path)\n print(\"build success!\")\n elif args.action[0] == 'clean':\n clean_cmd = ''\n if not os.path.exists(product_path):\n print('Nothing to clean! No build found.')\n return 0\n print(\"\\n=== start clean ===\\n\")\n if platform.system().find('Windows') == 0:\n clean_cmd = ['ninja.exe', '-C', product_path, '-t', 'clean']\n else:\n clean_cmd = ['ninja', '-C', product_path, '-t', 'clean']\n print(\"=== clean working ===\\n\")\n exec_command(clean_cmd, log_path)\n print(\"clean success!\")\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"Taogal/gn_build_system","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35156709109","text":"def path(wire):\n DIR = dict(zip(\"RLUD\", ((1,0), (-1,0), (0,1), (0,-1))))\n pos = (0,0)\n path = {}\n steps = 0\n for el in wire:\n dir = DIR[el[0]]\n length = int(el[1:])\n for _ in range(length):\n pos = (pos[0] + dir[0], pos[1] + dir[1])\n steps += 1\n if not pos in path:\n path[pos] = steps\n return path\n\ndef manhattan_dist(point):\n return abs(point[0]) + abs(point[1])\n\ndef main():\n w1, w2 = [line.rstrip('\\n').split(\",\") for line in open(\"input.txt\")]\n # lines = [\"R75,D30,R83,U83,L12,D49,R71,U7,L72\", \"U62,R66,U55,R34,D71,R55,D58,R83\"]\n # w1, w2 = [line.rstrip('\\n').split(\",\") for line in lines]\n p1 = path(w1)\n p2 = path(w2)\n print(min(manhattan_dist(p) for p in p1.keys()&p2.keys()))\n print(min(p1[p]+p2[p] for p in p1.keys()&p2.keys()))\n\nif __name__ == \"__main__\":\n main()","repo_name":"robquant/adventofcode2019","sub_path":"03/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29208850045","text":"# Imports\nimport json\nimport os\nimport logging\n\n# Maya imports\nfrom maya import cmds\n\n# Project imports\nfrom hiddenStrings.libs import skin_lib, blend_shape_lib\n\nlogging = logging.getLogger(__name__)\n\n\ndef export_selection(file_name, path):\n \"\"\"\n Export selection to json\n :param file_name: str\n :param path: str\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n selection_data = cmds.ls(selection=True)\n\n export_data_to_json(data=selection_data,\n file_name=file_name,\n file_path=path,\n relative_path=False)\n\n logging.info(selection_data)\n return selection_data\n\n\ndef import_selection(path):\n \"\"\"\n import selection to json\n :param path: str\n \"\"\"\n file_name = os.path.basename(path).split('.json')[0]\n path = os.path.dirname(path)\n\n selection_data = import_data_from_json(file_name=file_name,\n file_path=path,\n relative_path=False)\n cmds.select(selection_data)\n logging.info(selection_data)\n\n return selection_data\n\n\ndef export_nodes_and_connections(file_name, path, export_nodes=True, export_edges=False, export_connections=True):\n \"\"\"\n Export nodes and connections to .ma file\n :param file_name: str\n :param path: str\n :param export_nodes: bool\n :param export_edges: bool\n :param export_connections: bool\n \"\"\"\n node_list = cmds.ls(sl=True)\n cmds.file(r'{}/{}.ma'.format(path, file_name), type='mayaAscii', exportSelectedStrict=True, force=True)\n\n with open(r'{}/{}.ma'.format(path, file_name), 'r+') as connections_file:\n # Read and store all lines into list\n lines = connections_file.readlines()\n\n # Move file pointer to the beginning of a file\n connections_file.seek(0)\n\n # Empty the file\n connections_file.truncate()\n\n # Get connections and edge nodes\n connections_string = str()\n skip_node_list = list()\n for node in node_list:\n # Get inputs\n inputs_list = cmds.listConnections(node, destination=False, plugs=True, skipConversionNodes=True)\n if inputs_list:\n inputs_list = [x for x in inputs_list if x.split('.')[0] in node_list]\n\n for input_value in inputs_list:\n output_value = [x for x in cmds.listConnections(input_value, plugs=True, source=True,\n skipConversionNodes=True)\n if node in x][0]\n connections_string += '\\nconnectAttr \"{}\" \"{}\";'.format(input_value, output_value)\n\n # Get outputs\n outputs_list = cmds.listConnections(node, source=False, plugs=True, skipConversionNodes=True)\n if outputs_list:\n outputs_list = [x for x in outputs_list if x.split('.')[0] in node_list]\n outputs_list = [(cmds.listConnections(x,\n destination=False,\n plugs=True,\n skipConversionNodes=True)[0], x) for x in outputs_list]\n # Get edges\n if not export_edges:\n if not bool(inputs_list) or not bool(outputs_list):\n skip_node_list.append(node)\n\n # ----- Write file -----\n # write createNode lines\n if export_nodes:\n create_node_line_list = [index for index, value in enumerate(lines) if value.startswith('createNode')]\n for create_node_line_index in create_node_line_list:\n node_name = lines[create_node_line_index].split('\"')[1]\n if 'unitConversion' not in lines[create_node_line_index]:\n if node_name not in skip_node_list:\n connections_file.writelines(lines[create_node_line_index])\n for line_index, line_value in enumerate(lines[create_node_line_index + 1::]):\n if line_value.startswith('\\t'):\n if 'rename' not in line_value:\n connections_file.writelines(lines[create_node_line_index + line_index + 1])\n else:\n break\n\n # Write connections lines\n if export_connections:\n if len(connections_string) != 0:\n connections_file.writelines(connections_string)\n\n logging.info(r'{}/{}.ma has been exported.'.format(path, file_name))\n\n\ndef import_nodes_and_connections(path, import_nodes=True, import_connections=True, search_for=None, replace_with=None):\n \"\"\"\n Import connections from mel file\n :param path: str\n :param import_nodes: bool\n :param import_connections: bool\n :param search_for: str; use \",\" for more than once\n :param replace_with: str; use \",\" for more than once\n \"\"\"\n if search_for:\n search_and_replace_in_file(path, search_for=search_for, replace_with=replace_with)\n\n with open(path, 'r') as connections_file:\n lines = connections_file.readlines()\n\n if import_nodes:\n main_line_list = [index for index, value in enumerate(lines) if\n value.startswith('createNode') or value.startswith('connectAttr')]\n\n component_range_list = [(value, main_line_list[index + 1]) for index, value in enumerate(main_line_list) if\n main_line_list[index] != main_line_list[-1]]\n\n # Create a temporary mel file to import only the nodes that do not exist in the scene\n with open(r'{}_TEMP.mel'.format(path.split('.ma')[0]), 'w') as connections_file_temp:\n for file_component in component_range_list:\n if 'createNode' in lines[file_component[0]]:\n node_name = lines[file_component[0]].split('\"')[1]\n if not cmds.objExists(node_name):\n for index in range(file_component[0], file_component[1]):\n connections_file_temp.write(lines[index])\n # Import nodes\n cmds.file(r'{}_TEMP.mel'.format(path.split('.ma')[0]), i=True, force=True) # i = import\n os.remove(r'{}_TEMP.mel'.format(path.split('.ma')[0]))\n\n # Connect attributes from file, check if the connection exists and force it if false\n if import_connections:\n for line in lines:\n if line.startswith('connectAttr'):\n input_value = line.split('\"')[1]\n output_value = line.split('\"')[-2]\n if not cmds.isConnected(input_value, output_value):\n cmds.connectAttr(input_value, output_value, force=True)\n\n if search_for:\n search_and_replace_in_file(path, search_for=replace_with, replace_with=search_for)\n\n logging.info(r'{} has been imported.'.format(path))\n\n\ndef import_obj(path):\n return cmds.file(path, type='OBJ', i=True, force=True, returnNewNodes=True) # i = import\n\n\ndef export_blend_shape(node, path):\n \"\"\"\n Export blendShape of the node given\n :param node: str\n :param path: str\n \"\"\"\n if not cmds.objExists(node):\n cmds.error('{} does not exists in the scene'.format(node))\n blend_shape_name = blend_shape_lib.get_blend_shape(node)\n\n blend_shape_lib.check_blendshape(blend_shape=blend_shape_name)\n blend_shape_data = blend_shape_lib.get_blend_shape_data(blend_shape_name)\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n export_data_to_json(data=blend_shape_data, file_name=blend_shape_name, file_path=path, relative_path=False,\n compact=True)\n\n logging.info(r'{}/{}.json has been exported.'.format(path, blend_shape_name))\n\n\ndef export_blend_shapes(node_list, path):\n \"\"\"\n Export blendShapes of the nodes given\n :param node_list: list\n :param path: str\n \"\"\"\n for node in node_list:\n export_blend_shape(node=node, path=path)\n\n\ndef import_blend_shape(node, path):\n \"\"\"\n Import blendShape from path\n :param node: str\n :param path: str\n \"\"\"\n file_name = os.path.basename(path).split('.json')[0]\n path = os.path.dirname(path)\n\n blend_shape = blend_shape_lib.get_blend_shape(node=node)\n if blend_shape:\n blend_shape = blend_shape_lib.rename_blend_shape(blend_shape=blend_shape)\n else:\n blend_shape = blend_shape_lib.create_blend_shape(node=node)\n\n blend_shape_data = import_data_from_json(file_name=file_name, file_path=path, relative_path=False)\n\n blend_shape_lib.set_blendshape_data(blend_shape=blend_shape, blend_shape_data=blend_shape_data)\n\n logging.info(r'{}/{}.json has been imported.'.format(path, file_name))\n\n\ndef import_blend_shapes(path):\n \"\"\"\n Import all json blendShapes from folder\n :param path: string\n \"\"\"\n file_list = [x for x in os.listdir(path) if x.endswith('.json')]\n for blend_shape_file in file_list:\n # Get json file node\n blend_shape_data = import_data_from_json(file_name=blend_shape_file.split('.')[0],\n file_path=path,\n relative_path=False)\n\n import_blend_shape(node=blend_shape_data['node'], path=r'{}/{}'.format(path, blend_shape_file))\n pass\n\n\ndef export_skin_cluster(node, path, skin_index=1):\n \"\"\"\n Export skinCluster index of the node given if index == None then all index will be exported\n :param node: str\n :param path: str\n :param skin_index: int. -1 (last), 1, 2, 3\n \"\"\"\n if skin_index:\n skin_cluster = skin_lib.rename_skin_cluster(skin_lib.get_skin_cluster_index(node, skin_index))\n skin_path = r'{}/{}.json'.format(path, skin_cluster)\n # Check if the file exists and is writable\n if os.path.exists(skin_path) and not os.access(skin_path, os.W_OK):\n logging.info('{} is not writeable. Check Permissions.'.format(skin_path))\n else:\n if not os.path.exists(path):\n os.makedirs(path)\n # Export JSON\n cmds.deformerWeights('{}.json'.format(skin_cluster), deformer=skin_cluster, method='index',\n export=True, format='JSON', path=path)\n else:\n skin_cluster_list = skin_lib.get_skin_cluster_list(node)\n for skin_cluster in skin_cluster_list:\n skin_cluster = skin_lib.rename_skin_cluster(skin_cluster)\n skin_path = r'{}/{}.json'.format(path, skin_cluster)\n # Check if the file exists and is writable\n if os.path.exists(skin_path) and not os.access(skin_path, os.W_OK):\n logging.info('{} is not writeable. Check Permissions.'.format(skin_path))\n else:\n if not os.path.exists(path):\n os.makedirs(path)\n # Export skinCluster JSON\n cmds.deformerWeights('{}.json'.format(skin_cluster), deformer=skin_cluster, method='index',\n export=True, format='JSON', path=path)\n\n\ndef export_skin_clusters(node_list, path, skin_index=1):\n \"\"\"\n export all skinClusters\n :param node_list: list\n :param path: string\n :param skin_index: int. -1 (last), 1, 2, 3\n \"\"\"\n for node in node_list:\n export_skin_cluster(node=node, path=path, skin_index=skin_index)\n\n\ndef import_skin_cluster(node, path, skin_index=1, import_method='index', search_for=None, replace_with=None):\n \"\"\"\n Import skinCluster from path\n :param node: str\n :param path: str\n :param skin_index: int. -1 (last), 1, 2, 3\n :param import_method: index or nearest\n :param search_for: str; use \",\" for more than once\n :param replace_with: str; use \",\" for more than once\n :return:\n \"\"\"\n skin_cluster = skin_lib.get_skin_cluster_index(node, skin_index)\n file_name = os.path.basename(path)\n path = os.path.dirname(path)\n\n if search_for:\n search_and_replace_in_file(r'{}/{}'.format(path, file_name), search_for=search_for, replace_with=replace_with)\n\n # Get json file joints\n skin_data = import_data_from_json(file_name=file_name.split('.')[0],\n file_path=path,\n relative_path=False)\n\n file_skin_joints = list()\n for value in skin_data['deformerWeight']['weights']:\n file_skin_joints.append(value['source'])\n\n # Check if the joints exists in the scene\n joints_not_in_scene = list()\n for jnt in file_skin_joints:\n if not cmds.objExists(jnt):\n joints_not_in_scene.append(jnt)\n if len(joints_not_in_scene) > 0:\n cmds.error('missing in the scene: {}'.format(joints_not_in_scene))\n\n # If skinCluster exists get its joints and add the joints that are not in the skinCluster\n joints_to_lock = list()\n if skin_cluster:\n skin_cluster_joints = cmds.skinCluster(skin_cluster, query=True, influence=True)\n\n joints_to_add = [x for x in file_skin_joints if x not in skin_cluster_joints]\n joints_to_lock = [x for x in skin_cluster_joints if x not in file_skin_joints]\n\n cmds.skinCluster(skin_cluster, edit=True, addInfluence=joints_to_add, lockWeights=True)\n\n # If skinCluster does not exist create it\n else:\n skin_cluster = skin_lib.create_skin_cluster(joints=file_skin_joints, node=node, skin_index=skin_index)\n\n # Reading normalize weights\n skin_normalize = cmds.skinCluster(skin_cluster, query=True, normalizeWeights=True)\n # disable normalize weights\n cmds.skinCluster(skin_cluster, edit=True, normalizeWeights=0)\n\n # Empty skin cluster, if not it does not work as expected\n node_type = cmds.nodeType(node)\n if 'nurbs' in node_type:\n component_type = 'cv'\n elif 'lattice' in node_type:\n component_type = 'pt'\n else:\n component_type = 'vtx'\n\n shape_components = '{}.{}[:]'.format(cmds.listRelatives(node, shapes=True, noIntermediate=True)[0], component_type)\n\n cmds.skinPercent(skin_cluster, shape_components, normalize=False, pruneWeights=100)\n if joints_to_lock:\n for jnt in joints_to_lock:\n cmds.skinCluster(skin_cluster, edit=True, influence=jnt, lockWeights=True)\n\n # Import skinCluster\n cmds.deformerWeights(file_name, path=path, deformer=skin_cluster, im=True, method=import_method)\n\n # Restore normalize weights\n cmds.skinCluster(skin_cluster, edit=True, normalizeWeights=skin_normalize)\n cmds.skinCluster(skin_cluster, edit=True, forceNormalizeWeights=True)\n\n if search_for:\n search_and_replace_in_file(r'{}/{}'.format(path, file_name), search_for=replace_with, replace_with=search_for)\n\n logging.info('{} has been imported.'.format(file_name))\n\n\ndef import_skin_clusters(path, import_method='index'):\n \"\"\"\n Import all json skinClusters from folder\n :param path: string\n :param import_method: string, index or nearest\n \"\"\"\n file_list = [x for x in os.listdir(path) if x.endswith('.json')]\n for skin_file in file_list:\n # Get json file node\n skin_data = import_data_from_json(file_name=skin_file.split('.')[0],\n file_path=path,\n relative_path=False)\n\n node = cmds.listRelatives(skin_data['deformerWeight']['shapes'][0]['name'], parent=True)[0]\n skin_cluster = skin_data['deformerWeight']['weights'][0]['deformer']\n skin_index = int(skin_cluster.split('_')[0][-1:])\n\n import_skin_cluster(node=node, path=r'{}/{}'.format(path, skin_file),\n skin_index=skin_index, import_method=import_method)\n\n\ndef export_data_to_json(data, file_name, file_path, relative_path=True, use_indent=True, compact=False):\n \"\"\"\n export data to a json file\n :param data: str, list, dict\n :param file_name: str\n :param file_path: str\n :param relative_path: bool\n :param use_indent: bool\n :param compact: bool\n :return: file path name with \".json\"\n \"\"\"\n if relative_path:\n module_path = os.path.dirname(os.path.dirname(__file__))\n file_path_name_with_extension = '{}/{}/{}.json'.format(module_path, file_path, file_name)\n else:\n file_path_name_with_extension = '{}/{}.json'.format(file_path, file_name)\n\n if compact:\n with open(file_path_name_with_extension, 'w') as write_file:\n json.dump(data, write_file)\n else:\n with open(file_path_name_with_extension, 'w') as write_file:\n indent_value = 4 if use_indent else 0\n json.dump(data, write_file, indent=indent_value)\n\n return file_path_name_with_extension\n\n\ndef import_data_from_json(file_name, file_path, relative_path=True):\n \"\"\"\n Import data from a json file\n :param file_name: str\n :param file_path: str\n :param relative_path: bool\n :return: data\n \"\"\"\n if relative_path:\n script_path = os.path.dirname(os.path.dirname(__file__))\n file_path_name_with_extension = '{}/{}/{}.json'.format(script_path, file_path, file_name)\n else:\n file_path_name_with_extension = '{}/{}.json'.format(file_path, file_name)\n\n with open(file_path_name_with_extension, 'r') as read_file:\n data = json.load(read_file)\n\n return data\n\n\ndef search_and_replace_in_file(path, search_for, replace_with):\n \"\"\"\n Replace words in the file\n :param path: str\n :param search_for: str; use \",\" for more than once\n :param replace_with: str; use \",\" for more than once\n return path\n \"\"\"\n\n search_for = search_for.split(',')\n replace_with = replace_with.split(',')\n\n if len(search_for) != len(replace_with):\n cmds.error('Search for and replace with must have same number of words (split with commas)')\n\n with open(path, 'r+') as skin_file:\n lines = skin_file.readlines()\n skin_file.seek(0)\n skin_file.truncate()\n for line in lines:\n for index, value in enumerate(search_for):\n line = line.replace(value, replace_with[index])\n skin_file.writelines(line)\n\n return path\n","repo_name":"ivan-cuenca-rigging/hiddenStrings","sub_path":"libs/import_export_lib.py","file_name":"import_export_lib.py","file_ext":"py","file_size_in_byte":18283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17258062364","text":"import torch as torch\r\nfrom collections import OrderedDict\r\n\r\n\r\nclass UnFlatten(torch.nn.Module):\r\n def forward(self, x, size=1000):\r\n return x.view(x.size(0), size, 1, 1)\r\n\r\n\r\nclass Decoder(torch.nn.Module):\r\n def __init__(self, mode=None):\r\n super().__init__()\r\n\r\n if mode == \"up\":\r\n mode = \"bilinear\"\r\n self.decoder = torch.nn.Sequential(\r\n UnFlatten(),\r\n torch.nn.Upsample(scale_factor=7, mode=mode),\r\n torch.nn.Conv2d(1000, 512, kernel_size=(3,3), stride=(1, 1), padding=(1, 1)),\r\n torch.nn.ReLU(),\r\n torch.nn.Upsample(scale_factor=2, mode=mode),\r\n torch.nn.Conv2d(512, 256, kernel_size=(3,3), stride=(1, 1), padding=(1, 1)),\r\n torch.nn.ReLU(),\r\n torch.nn.Upsample(scale_factor=2, mode=mode),\r\n torch.nn.Conv2d(256, 128, kernel_size=(3,3), stride=(1, 1), padding=(1, 1)),\r\n torch.nn.ReLU(),\r\n torch.nn.Upsample(scale_factor=2, mode=mode),\r\n torch.nn.Conv2d(128, 64, kernel_size=(3,3), stride=(1, 1), padding=(1, 1)),\r\n torch.nn.ReLU(),\r\n torch.nn.Upsample(scale_factor=2, mode=mode),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 32, kernel_size=(3,3), stride=(1, 1), padding=(1, 1)),\r\n torch.nn.Upsample(scale_factor=2, mode=mode),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(32, 1, kernel_size=(3,3), stride=(1, 1), padding=(1, 1)),\r\n torch.nn.Sigmoid()\r\n )\r\n else:\r\n self.decoder = torch.nn.Sequential(\r\n UnFlatten(),\r\n torch.nn.ConvTranspose2d(1000, 512, kernel_size=(5, 5), stride=(2, 2)),\r\n torch.nn.ReLU(),\r\n torch.nn.ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2)),\r\n torch.nn.ReLU(),\r\n torch.nn.ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2)),\r\n torch.nn.ReLU(),\r\n torch.nn.ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2)),\r\n torch.nn.ReLU(),\r\n torch.nn.ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2)),\r\n torch.nn.ReLU(),\r\n torch.nn.ConvTranspose2d(32, 1, kernel_size=(6, 6), stride=(2, 2)),\r\n torch.nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x):\r\n return self.decoder(x)\r\n\r\n\r\n","repo_name":"muruvvetb/-Explainable-DL-for-COVID-19-detection","sub_path":"architectures/Decoder.py","file_name":"Decoder.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7945365823","text":"import PySimpleGUI as sg\nimport process_query\nimport process_sql_query\n\ndef query_window(cnx):\n\n layout_query = [[sg.Text(\"Query your database:\")],\n [sg.Text(\"Enter table name:\"), sg.Input(key='-NAME-', do_not_clear=True, size=(20, 1))],\n [sg.Button('Show Table')],\n [sg.Text(\"SQL Query:\")],\n [sg.Multiline(size=(30, 5), key='textbox')],\n [sg.Button('SQL Query -> Output'), sg.Button('SQL Query -> Table'), sg.Exit()]]\n\n\n\n layout_error = [[sg.Text(\"Error messages:\")],\n [sg.Multiline(\"\", size=(100, 10), key='OUTPUT')],\n [sg.Text(\"Output:\")],\n [sg.Multiline(\"\", size=(100, 10), key='OUTPUT_GENERAL')]]\n\n layout = [\n [sg.Column(layout_query),\n sg.VSeperator(),\n sg.Column(layout_error)] ]\n\n\n window = sg.Window(\"Jazz Database\", layout, resizable=True).Finalize()\n window.Maximize()\n\n \n\n while True:\n event, values = window.read()\n if event in (sg.WIN_CLOSED, 'Exit'):\n window.close()\n try:\n if event == 'SQL Query -> Table':\n query = values['textbox']\n process_sql_query.create(query, cnx)\n window['OUTPUT_GENERAL'].update(value=\"Query was successful\")\n\n elif event == 'SQL Query -> Output':\n\n cursor = cnx.cursor()\n\n query = values['textbox']\n cursor.execute(query)\n rows = cursor.fetchall()\n window['OUTPUT_GENERAL'].update(value=rows)\n\n cnx.commit()\n \n elif event == 'Show Table':\n table_name = values['-NAME-'] \n process_query.create(table_name, cnx)\n\n window['OUTPUT_GENERAL'].update(value=\"Query was successful\")\n \n except Exception as err:\n window['OUTPUT'].update(value=\"Error: \" + str(err))\n\n cursor.close()\n cnx.close()\n\n","repo_name":"Seb125/MySQL","sub_path":"general_GUI.py","file_name":"general_GUI.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28238384126","text":"class Recursion:\n def __init__(self):\n pass\n\n def factorial(self, n):\n if n == 0:\n return 1\n else:\n return n * self.factorial(n-1)\n\n def sumupton(self, n):\n if n == 1:\n return 1\n else:\n return n+self.sumupton(n-1)\n \n def pow(self, x, n):\n if n == 0:\n return 1\n elif n == 1:\n return x\n else:\n return x * self.pow(x, n-1)\n\n def fastpow(self, x, n):\n if n == 0:\n return 1\n else:\n if n % 2 == 0:\n y = self.fastpow(x, n/2)\n return y * y\n else:\n return x * self.fastpow(x, n-1)\n\n def mod_exp(self, x, n, m):\n if n == 0:\n return 1\n elif n % 2 == 0:\n y = self.mod_exp(x, n/2, m)\n return (y * y) % m\n else:\n return (self.mod_exp(x, n, m) * self.mod_exp(x, n-1, m)) % m\n\n def fibonacci(self, n):\n if n == 0 or n == 1:\n return n\n else:\n return self.fibonacci(n-1) + self.fibonacci(n-2)\n\nr = Recursion()\nprint(r.factorial(3))\nprint(r.sumupton(3))\nprint(r.pow(2, 3))\nprint(r.fibonacci(4))","repo_name":"withinfinitedegreesoffreedom/datastructures-algorithms","sub_path":"misc/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44201236865","text":"# libraray to make the user interface\r\nfrom tkinter import *\r\n# librarey to conncet to sockets\r\nimport socket\r\nimport threading\r\n\r\ndef buildconncetion():\r\n port = 5050\r\n serverip = socket.gethostbyname(socket.gethostname())\r\n addr = (serverip,port)\r\n client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n client.connect(addr)\r\n\r\n\r\ndef sendPackets(packets):\r\n pass\r\n\r\n\r\ndef userinterface():\r\n # main user inter face to send packets \r\n window = Tk()\r\n window.geometry(\"500x500\")\r\n pakettext = Label(window,text = \"message to send\")\r\n pakettext.place(relx = 0.2, rely= 0.2)\r\n # to take to number of packets to send\r\n paketnumber = Entry(window)\r\n paketnumber.place(relx=0.5, rely=0.2)\r\n window.mainloop()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n buildconncetion()\r\n # userinterface()","repo_name":"pravinshinde215/server_scripts-","sub_path":"packetsender.py","file_name":"packetsender.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35368032821","text":"import csv\r\n\r\nnames = []\r\naddresses = []\r\n\r\nwith open('ads.csv', 'r', encoding='utf-8') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n names.append(row[2])\r\n addresses.append(row[5])\r\nset_names = (list(set(names)))\r\nset_names.remove('author')\r\nnames_dict = {}\r\nn = 1\r\nfor name in set_names:\r\n names_dict[name] = n\r\n n += 1\r\n\r\n\r\nwith open('name.csv', 'w', newline='', encoding='utf-8') as f:\r\n writer = csv.writer(f)\r\n for num, name in names_dict.items():\r\n nam = [num, name]\r\n writer.writerow(nam)\r\n\r\nset_addresses = (list(set(addresses)))\r\nset_addresses.remove('address')\r\naddresses_dict = {}\r\nn = 1\r\nfor address in set_addresses:\r\n addresses_dict[address] = n\r\n n += 1\r\n\r\n\r\nwith open('address.csv', 'w', newline='', encoding='utf-8') as f:\r\n writer = csv.writer(f)\r\n for num, address in addresses_dict.items():\r\n addr = [num, address]\r\n writer.writerow(addr)\r\n\r\n\r\nwith open('ads.csv', 'r', encoding='utf-8') as f:\r\n reader = csv.DictReader(f)\r\n for row in reader:\r\n id = row['Id']\r\n name = row['name']\r\n author = names_dict[row['author']]\r\n price = row['price']\r\n description = row['description']\r\n address = addresses_dict[row['address']]\r\n is_published = row['is_published']\r\n with open('new_ads.csv', 'a', newline='', encoding='utf-8') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(\r\n (\r\n id,\r\n name,\r\n author,\r\n price,\r\n description,\r\n address,\r\n is_published\r\n )\r\n )\r\n\r\n\r\n","repo_name":"getter65/SkyProProject","sub_path":"avito-main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39925624500","text":"__title__ = \"butterfly\"\n__version__ = \"3.2.5\"\n\n__summary__ = \"A sleek web based terminal emulator\"\n__uri__ = \"https://github.com/paradoxxxzero/butterfly\"\n__author__ = \"Florian Mounier\"\n__email__ = \"paradoxxx.zero@gmail.com\"\n\n__license__ = \"GPLv3\"\n__copyright__ = \"Copyright 2017 %s\" % __author__\n\n__all__ = [\n '__title__', '__version__', '__summary__', '__uri__', '__author__',\n '__email__', '__license__', '__copyright__'\n]\n","repo_name":"paradoxxxzero/butterfly","sub_path":"butterfly/__about__.py","file_name":"__about__.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":2884,"dataset":"github-code","pt":"61"} +{"seq_id":"23379558301","text":"f = open('A-small-attempt2.in')\r\na = f.readline()\r\nanswers = []\r\n\r\ndef check(inString, caseNum):\r\n if '.' in inString:\r\n return False\r\n elif 'X' in inString and 'O' not in inString:\r\n answers.append('Case #{}: X won'.format(str(caseNum)))\r\n return True\r\n elif 'O' in inString and 'X' not in inString:\r\n answers.append('Case #{}: O won'.format(str(caseNum)))\r\n return True\r\n \r\nfor i in range(int(a)):\r\n c = []\r\n hasWinner = False\r\n for b in range(4):\r\n c.append(f.readline().strip('\\n'))\r\n if not hasWinner and check(c[b], i + 1):\r\n hasWinner = True\r\n if b == 3 and not hasWinner:\r\n if check('{}{}{}{}'.format(c[0][0], c[1][1], c[2][2], c[3][3]), i + 1):\r\n hasWinner = True\r\n elif check('{}{}{}{}'.format(c[0][3], c[1][2], c[2][1], c[3][0]), i + 1):\r\n hasWinner = True\r\n if not hasWinner:\r\n for b in range(4):\r\n if not hasWinner:\r\n if check('{}{}{}{}'.format(c[0][b], c[1][b], c[2][b], c[3][b]), i + 1):\r\n hasWinner = True\r\n if not hasWinner:\r\n for v, b in enumerate(c):\r\n if '.' not in b and v == 3:\r\n answers.append('Case #{}: Draw'.format(i + 1))\r\n break\r\n elif v == 3:\r\n answers.append('Case #{}: Game has not completed'.format(i+1))\r\n break\r\n f.readline()\r\nf.close()\r\nf = open('A-small-attempt2.txt', 'w')\r\nfor i in answers:\r\n f.write(i)\r\n f.write('\\n')\r\nf.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1541.py","file_name":"1541.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26785195147","text":"#!/usr/bin/python3\n\"\"\" CityTest module \"\"\"\n\n\nfrom models.city import City\nimport unittest\n\n\nclass CityTest(unittest.TestCase):\n \"\"\" CityTest class \"\"\"\n\n def testClassDocumentation(self):\n \"\"\"\n Class have documentation\n \"\"\"\n self.assertGreater(len(City.__doc__), 0)\n\n def testConstructorDocumentation(self):\n \"\"\"\n Constructor have documentation\n \"\"\"\n self.assertGreater(len(City.__init__.__doc__), 0)\n\n def testConstructor(self):\n \"\"\"\n Constructor test\n \"\"\"\n c1 = City()\n c1.name = \"Lille\"\n c1.save()\n self.assertGreater(c1.updated_at, c1.created_at)\n self.assertDictEqual(\n c1.to_dict(),\n {\n 'id': c1.id,\n 'created_at': c1.created_at.strftime(\"%Y-%m-%dT%H:%M:%S.%f\"),\n 'updated_at': c1.updated_at.strftime(\"%Y-%m-%dT%H:%M:%S.%f\"),\n 'name': \"Lille\",\n '__class__': 'City'\n }\n )\n\n def testNameType(self):\n \"\"\"\n Check name attribute type\n \"\"\"\n self.assertIsInstance(City().name, str)\n\n def testStateIdType(self):\n \"\"\"\n Check state_id attribute type\n \"\"\"\n self.assertIsInstance(City().state_id, str)\n","repo_name":"Atemmuda/AirBnB_clone","sub_path":"tests/test_models/test_city.py","file_name":"test_city.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31374332451","text":"\n\nfile_test = open(\"2022/data/2022-11-test.txt\")\nlines_test = file_test.readlines()\nfile = open(\"2022/data/2022-11.txt\")\nlines = file.readlines()\n\n\nclass Monkey:\n def __init__(self, lines):\n self.number = lines[0].split(' ')[1].split(':')[0]\n self.items = [int(i) for i in lines[1].split(':')[1].split(',')]\n self.operation = lines[2].split(':')[1].split('=')[1].strip('\\n')\n self.divisible = int(lines[3].split('by')[1])\n self.if_true = int(lines[4].split('monkey')[1])\n self.if_false = int(lines[5].split('monkey')[1])\n self.items_inspected = 0\n #print(\"{} {} {} {} {} {}\".format(self.number, self.items,\n # self.operation, self.divisible, self.if_true, self.if_false))\n\n def evaluate(self):\n items_thrown = []\n for item in self.items:\n \n old = item\n self.items_inspected += 1\n new_item = eval(self.operation)\n new_item = int(new_item/3)\n if (new_item % self.divisible) == 0:\n items_thrown.append((self.if_true, new_item))\n else:\n items_thrown.append((self.if_false, new_item))\n self.items = []\n\n return items_thrown\n\n def evaluate_2(self):\n items_thrown = []\n for item in self.items:\n self.items_inspected += 1\n \n # si c'est une multiplication\n if \"*\" in self.operation:\n old = item \n\n # si l'ancien est divisible alors on le fait sur le old qui doit etre plus petit\n if (old % self.divisible) == 0:\n old = old // self.divisible\n new_item = eval(self.operation)\n items_thrown.append((self.if_true, new_item)) \n else:\n new_item = eval(self.operation)\n items_thrown.append((self.if_false, new_item)) \n else:\n old = item \n new_item = eval(self.operation)\n if (new_item % self.divisible) == 0:\n new_item = new_item//self.divisible\n items_thrown.append((self.if_true, new_item))\n else:\n items_thrown.append((self.if_false, new_item))\n\n self.items = []\n\n return items_thrown\n def prime_factors(self, n):\n factors = []\n c = 2\n while(n > 1):\n \n if(n % c == 0):\n factors.append(c)\n n = n / c\n else:\n c = c + 1\n \n return factors\n\n def evaluate_3(self,pgcm):\n \n items_thrown = []\n for item in self.items:\n\n self.items_inspected += 1 \n old = item\n new_item = eval(self.operation)\n \n if new_item%self.divisible == 0:\n items_thrown.append((self.if_true, new_item%pgcm))\n else:\n items_thrown.append((self.if_false, new_item%pgcm))\n \n self.items = []\n return items_thrown\n\n def __str__(self) -> str:\n return \"{} {} {} {} {} {}\".format(self.number, self.items, self.operation, self.divisible, self.if_true, self.if_false)\n\n\ndef monky_business(lines,turns):\n monkeys = [Monkey(lines[i:i+7]) for i in range(0, len(lines), 7)]\n for i in range(turns):\n for monkey in monkeys:\n items_to_move = monkey.evaluate()\n for item in items_to_move:\n monkeys[item[0]].items.append(item[1])\n\n print(sorted([monkey.items_inspected for monkey in monkeys])[-2:])\n return sorted([monkey.items_inspected for monkey in monkeys])[-2]*sorted([monkey.items_inspected for monkey in monkeys])[-1]\n\n\nprint(\"PART 1 TEST : 10605 =? {}\".format(monky_business(lines_test,20)))\nprint(\"PART 1 PUZZLE: {}\".format(monky_business(lines,20)))\n\n\ndef monky_business_2(lines,turn):\n monkeys = [Monkey(lines[i:i+7]) for i in range(0, len(lines), 7)]\n pgcm = 1\n for m in monkeys:\n pgcm = pgcm * m.divisible\n for i in range(turn):\n #print(i)\n\n for monkey in monkeys:\n items_to_move = monkey.evaluate_3(pgcm)\n for item in items_to_move:\n monkeys[item[0]].items.append(item[1])\n for monkey in monkeys:\n print(monkey.items_inspected)\n\n return sorted([monkey.items_inspected for monkey in monkeys])[-2]*sorted([monkey.items_inspected for monkey in monkeys])[-1]\n\n\nprint(\"PART 2 TEST 20 (UNFINISHED) : 2713310158 =? {}\".format(monky_business_2(lines_test,20)))\nprint(\"PART 2 TEST 1000 (UNFINISHED) : 2713310158 =? {}\".format(monky_business_2(lines_test,1000)))\nprint(\"PART 2 TEST 10000 (UNFINISHED) : 2713310158 =? {}\".format(monky_business_2(lines_test,10000)))\n# en vrai, ça donne pas la bonne répone mais close enough\nprint(\"PART 2 PUZZLE: {}\".format(monky_business_2(lines,10000)))\n","repo_name":"clemshubs/adventOfCode","sub_path":"2022/2022-11.py","file_name":"2022-11.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73405754753","text":"from image_preprocessing import load_train, extract_labels, extract_features\nfrom model import train_model\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import load_model\n\nIMAGE_SIZE = 66\n\ndef plot_confusion_matrix(X, Y, cmap=plt.cm.Greens, figsize=(10, 6)):\n Y_pred = model.predict(X)\n Y_pred = np.argmax(Y_pred, axis=1)\n\n Y_true = np.argmax(Y, axis=1)\n cm = confusion_matrix(Y_true, Y_pred)\n plt.figure(figsize=figsize)\n ax = sns.heatmap(cm, cmap=cmap, annot=True, square=True)\n plt.show()\n \n\nif __name__ == '__main__':\n train = load_train()\n X_train = extract_features(train, IMAGE_SIZE)\n Y_train = extract_labels(train)\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train,\n test_size=0.1, \n random_state=42)\n\n # train_model(X_train, X_val, Y_train, Y_val, IMAGE_SIZE)\n\n model = load_model('model.h5')\n\n # Change X_val and Y_val to evaluate different data\n loss, accuracy = model.evaluate(X_val, Y_val)\n print('Final Loss: {}, Final Accuracy: {}'.format(loss, accuracy))\n\n plot_confusion_matrix(X_val, Y_val)\n","repo_name":"AlphaHelix456/Plant-Seedlings-Classifier","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18614204475","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\nfrom inferbeddings.models import TranslatingModel, BilinearDiagonalModel, ComplexModel\nfrom inferbeddings.models import similarities\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClosedForm:\n def __init__(self, parser,\n predicate_embedding_layer,\n model_class, model_parameters,\n is_unit_cube):\n self.parser = parser\n self.predicate_embedding_layer = predicate_embedding_layer\n self.model_class, self.model_parameters = model_class, model_parameters\n self.is_unit_cube = is_unit_cube\n\n def _to_idx(self, predicate_name):\n return self.parser.predicate_to_index[predicate_name]\n\n def _translating_loss(self, clause):\n head, body = clause.head, clause.body\n\n # At the moment, only simple rules as in \"r(X, Y) :- b(X, Y)\" are supported\n assert len(body) == 1\n body_atom = body[0]\n\n variable_names = {arg.name for arg in head.arguments} | {arg.name for arg in body_atom.arguments}\n assert len(variable_names) == 2\n\n # Check if it is an inverse rule, as in r(X, Y) :- b(Y, X), or not, as in r(X, Y) :- b(X, Y).\n is_inverse = False\n if head.arguments[0].name == body_atom.arguments[1].name:\n if head.arguments[1].name == body_atom.arguments[0].name:\n is_inverse = True\n\n # We only support TransE in its L2 squared distance formulation\n assert self.model_parameters['similarity_function'] == similarities.l2_sqr\n\n # Indices of q and r, respectively\n r_idx, b_idx = self._to_idx(head.predicate.name), self._to_idx(body_atom.predicate.name)\n\n r = tf.nn.embedding_lookup(self.predicate_embedding_layer, r_idx)\n b = tf.nn.embedding_lookup(self.predicate_embedding_layer, b_idx)\n\n prefix = tf.reduce_sum(tf.square(r)) - tf.reduce_sum(tf.square(b))\n if is_inverse:\n if self.is_unit_cube:\n loss = tf.nn.relu(prefix + 2 * tf.reduce_sum(tf.abs(r + b)))\n else:\n loss = tf.nn.relu(prefix + 4 * tf.sqrt(tf.reduce_sum(tf.square(r + b))))\n else:\n if self.is_unit_cube:\n loss = tf.nn.relu(prefix + 2 * tf.reduce_sum(tf.abs(r - b)))\n else:\n loss = tf.nn.relu(prefix + 4 * tf.sqrt(tf.reduce_sum(tf.square(r - b))))\n return loss\n\n def _bilinear_diagonal_loss_one(self, clause):\n head, body = clause.head, clause.body\n\n # At the moment, only simple rules as in \"r(X, Y) :- b(X, Y)\" are supported\n assert len(body) == 1\n body_atom = body[0]\n\n variable_names = {arg.name for arg in head.arguments} | {arg.name for arg in body_atom.arguments}\n assert len(variable_names) == 2\n\n # Indices of q and r, respectively\n r_idx, b_idx = self._to_idx(head.predicate.name), self._to_idx(body_atom.predicate.name)\n\n r = tf.nn.embedding_lookup(self.predicate_embedding_layer, r_idx)\n b = tf.nn.embedding_lookup(self.predicate_embedding_layer, b_idx)\n\n if self.is_unit_cube:\n loss = tf.reduce_sum(tf.nn.relu(b - r))\n else:\n loss = tf.reduce_max(tf.abs(b - r))\n return loss\n\n def _bilinear_diagonal_loss_two(self, clause):\n head, body = clause.head, clause.body\n\n # At the moment, only simple rules as in \"r(X, Z) :- b1(X, Y), b2(Y, Z)\" are supported\n assert len(body) == 2\n body_atom_1 = body[0]\n body_atom_2 = body[1]\n\n variable_names = {arg.name for arg in head.arguments} | {arg.name for arg in body_atom_1.arguments} | {arg.name for arg in body_atom_2.arguments}\n assert len(variable_names) == 3\n\n assert body_atom_1.arguments[0].name == head.arguments[0].name\n assert body_atom_2.arguments[1].name == head.arguments[1].name\n\n # Indices of b1, b2 and r, respectively\n r_idx = self._to_idx(head.predicate.name)\n b1_idx = self._to_idx(body_atom_1.predicate.name)\n b2_idx = self._to_idx(body_atom_2.predicate.name)\n\n r = tf.nn.embedding_lookup(self.predicate_embedding_layer, r_idx)\n b1 = tf.nn.embedding_lookup(self.predicate_embedding_layer, b1_idx)\n b2 = tf.nn.embedding_lookup(self.predicate_embedding_layer, b2_idx)\n\n if self.is_unit_cube:\n case_0 = tf.zeros_like(r)\n case_1 = - r\n case_2 = - r + tf.minimum(b1, b2)\n case_3 = tf.minimum(tf.zeros_like(r), b1)\n case_4 = tf.minimum(tf.zeros_like(r), b2)\n\n # Creating a [k, 5]-dimensional tensor\n _cases = tf.transpose(tf.stack([case_0, case_1, case_2, case_3, case_4]))\n\n # Computing max(case_i)\n _losses = tf.reduce_max(_cases, axis=1)\n\n # Computing \\sum_i max(case_i)\n loss = tf.reduce_sum(_losses)\n else:\n case_0 = tf.zeros_like(r)\n case_1 = tf.minimum(b1, b2) - r\n case_2 = tf.minimum(- b1, - b2) - r\n case_3 = tf.minimum(b1, - b2) + r\n case_4 = tf.minimum(- b1, b2) + r\n\n # Creating a [k, 5]-dimensional tensor\n _cases = tf.transpose(tf.stack([case_0, case_1, case_2, case_3, case_4]))\n\n # Computing max(case_i)\n _losses = tf.reduce_max(_cases, axis=1)\n\n # Computing \\max_i max(case_i)\n loss = tf.reduce_max(_losses)\n return loss\n\n def _complex_loss(self, clause):\n head, body = clause.head, clause.body\n\n # At the moment, only simple rules as in \"r(X, Y) :- b(X, Y)\" are supported\n assert len(body) == 1\n body_atom = body[0]\n\n variable_names = {arg.name for arg in head.arguments} | {arg.name for arg in body_atom.arguments}\n assert len(variable_names) == 2\n\n # Check if it is an inverse rule, as in r(X, Y) :- b(Y, X), or not, as in r(X, Y) :- b(X, Y).\n is_inverse = False\n if head.arguments[0].name == body_atom.arguments[1].name:\n if head.arguments[1].name == body_atom.arguments[0].name:\n is_inverse = True\n\n # Indices of q and r, respectively\n r_idx, b_idx = self._to_idx(head.predicate.name), self._to_idx(body_atom.predicate.name)\n\n r = tf.nn.embedding_lookup(self.predicate_embedding_layer, r_idx)\n b = tf.nn.embedding_lookup(self.predicate_embedding_layer, b_idx)\n\n n = r.get_shape()[-1].value\n r_re, r_im = r[:n // 2], r[n // 2:]\n b_re, b_im = b[:n // 2], b[n // 2:]\n\n if is_inverse:\n if self.is_unit_cube:\n # This ha the same form as the simple implications case,\n # but with r replaced by \\compl{r} or, more specifically,\n # r_im replaced by - r_im.\n r_im = - r_im\n\n delta_re, delta_im = b_re - r_re, b_im - r_im\n\n # For each index, the loss will be the maximum across such values\n case_1 = 2 * delta_re\n case_2 = tf.abs(delta_im)\n case_3 = delta_re + tf.abs(delta_im)\n\n # The result will be a [k, 3]-dimensional tensor\n _cases = tf.transpose(tf.stack([case_1, case_2, case_3]))\n # Computing the maximum on dimension 1, leading to a [k]-dimensional tensor\n _losses = tf.reduce_max(_cases, axis=1)\n\n loss = tf.reduce_sum(_losses)\n else:\n loss = tf.reduce_max(tf.sqrt(tf.square(b_re - r_re) + tf.square(b_im + r_im)))\n else:\n if self.is_unit_cube:\n delta_re, delta_im = b_re - r_re, b_im - r_im\n\n # For each index, the loss will be the maximum across such values\n case_1 = 2 * delta_re\n case_2 = tf.abs(delta_im)\n case_3 = delta_re + tf.abs(delta_im)\n\n # The result will be a [k, 3]-dimensional tensor\n _cases = tf.transpose(tf.stack([case_1, case_2, case_3]))\n # Computing the maximum on dimension 1, leading to a [k]-dimensional tensor\n _losses = tf.reduce_max(_cases, axis=1)\n\n loss = tf.reduce_sum(_losses)\n else:\n loss = tf.reduce_max(tf.sqrt(tf.square(b_re - r_re) + tf.square(b_im - r_im)))\n return loss\n\n def __call__(self, clause):\n clause_body = clause.body\n\n loss = None\n if self.model_class == BilinearDiagonalModel:\n # We are using DistMult\n if len(clause_body) == 1:\n loss = self._bilinear_diagonal_loss_one(clause)\n elif len(clause_body) == 2:\n loss = self._bilinear_diagonal_loss_two(clause)\n elif self.model_class == TranslatingModel:\n # We are using DistMult\n loss = self._translating_loss(clause)\n elif self.model_class == ComplexModel:\n # We are using ComplEx\n loss = self._complex_loss(clause)\n\n assert loss is not None\n\n return tf.nn.relu(loss)\n","repo_name":"uclnlp/inferbeddings","sub_path":"inferbeddings/adversarial/closedform/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"61"} +{"seq_id":"38547526674","text":"from docx import Document\nfrom googletrans import Translator\n\ndef translate_text(text, source_language='en', target_language='ar'):\n try:\n # Initialize the translator\n translator = Translator()\n\n # Translate the text to the target language\n translated_text = translator.translate(text, src=source_language, dest=target_language)\n\n if translated_text.text:\n return translated_text.text\n else:\n print(\"Translation failed for:\", text)\n return text\n\n except Exception as e:\n print(\"An error occurred during translation:\", str(e))\n return text\n\ndef translate_docx(input_file, output_file, source_language='en', target_language='ar'):\n try:\n # Load the input Word document\n doc = Document(input_file)\n\n # Translate the content of each paragraph in the document\n for paragraph in doc.paragraphs:\n translated_text = translate_text(paragraph.text, source_language, target_language)\n paragraph.text = translated_text\n\n # Translate the content of each table in the document\n for table in doc.tables:\n for row in table.rows:\n for cell in row.cells:\n translated_text = translate_text(cell.text, source_language, target_language)\n cell.text = translated_text\n\n # Save the translated content to the output file\n doc.save(output_file)\n\n print(\"Translation completed. The translated content is saved in:\", output_file)\n\n except Exception as e:\n print(\"An error occurred:\", str(e))\n\nif __name__ == \"__main__\":\n # Replace 'input_file.docx' with the path to your input Word file.\n # The translated content will be saved to 'output_file_arabic.docx'.\n translate_docx('input_file.docx', 'output_file_arabic.docx')\n","repo_name":"AhmedAbdelbasetAli/Automation","sub_path":"Translate word file form English to Arabic.py","file_name":"Translate word file form English to Arabic.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31775077766","text":"from antlr4 import *\n\nfrom . import YAMLLexer\nfrom . import YAMLParser\nfrom . import yaml_input_stream\n\n\nclass Antelope(object):\n def __init__(self, s):\n istream = yaml_input_stream.StringInputStream(s)\n self.parse(istream)\n\n def parse(self, istream):\n lexer = YAMLLexer(istream)\n stream = CommonTokenStream(lexer)\n parser = YAMLParser(stream)\n self.tree = parser.document()\n print(self.tree.toStringTree(recog=parser))\n","repo_name":"omry/antelope","sub_path":"python/antelope/antelope.py","file_name":"antelope.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73309375553","text":"# 实现一个 MapSum 类,支持两个方法,insert 和 sum:\n#\n# MapSum() 初始化 MapSum 对象\n# void insert(String key, int val) 插入 key-val 键值对,字符串表示键 key ,整数表示值 val 。如果键 key 已经存在,那么原来的键值对将被替代成新的键值对。\n# int sum(string prefix) 返回所有以该前缀 prefix 开头的键 key 的值的总和。\n\n# 前缀和这种问题,使用递归,但是需要注意递归的边界条件\n\nclass TNode:\n def __init__(self, val):\n self.val = val\n self.weight = 0\n self.end = False\n self.children = {}\n\nclass MapSum:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TNode(\"\")\n\n def insert(self, key, val):\n currNode = self.root\n for ch in key:\n if ch not in currNode.children:\n currNode.children[ch] = TNode(ch)\n currNode = currNode.children[ch]\n currNode.weight = val\n currNode.end = True\n\n # 不包含startNode.weight\n def sum_help(self, startNode):\n if len(startNode.children) == 0:\n return 0\n res = 0\n for ch in startNode.children:\n nextNode = startNode.children[ch]\n if nextNode.end:\n res += nextNode.weight\n res += self.sum_help(nextNode)\n return res\n\n def sum(self, prefix):\n currNode = self.root\n for ch in prefix:\n if ch not in currNode.children:\n return 0\n currNode = currNode.children[ch]\n\n return self.sum_help(currNode) + currNode.weight\n\n# Your MapSum object will be instantiated and called as such:\nobj = MapSum()\nobj.insert(\"apple\",3)\nparam_1 = obj.sum(\"ap\")\nobj.insert(\"app\",2)\nparam_2 = obj.sum(\"ap\")\nprint(param_1)\nprint(param_2)","repo_name":"fengjiaxin/prepare_work","sub_path":"leetcode_tag/Trie/677.键值映射.py","file_name":"677.键值映射.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408559380","text":"import numpy as np, logging\nimport h5py\nimport NLSA\nimport myio\nimport warnings\nwarnings.simplefilter(action='ignore',category=FutureWarning)\nimport os\nimport p\n\n'''\nCopyright (c) UWM, Ali Dashti 2016 (original matlab version)\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nCCopyright (c) Columbia University Hstau Liao 2018 (python version) \nCopyright (c) Columbia University Evan Seitz 2019 (python version) \t\nCopyright (c) Columbia University Suvrajit Maji 2020 (python version) \n'''\n\ndef corr(a,b,n,m):\n A = a[:,n]\n B = b[:,m]\n A = A - np.mean(A)\n B = B - np.mean(B)\n try:\n co = np.dot(A,B) / (np.std(A)*np.std(B))\n except:\n print(\"flat image\")\n return 1\n return co\n\ndef diff_corr(a,b,max):\n\n return corr(a,b,0,0)+corr(a,b,max,max)- \\\n (corr(a,b,0,max)+corr(a,b,max,0))\n\ndef fileCheck(N):\n fin_PDs = np.zeros(shape=(N,p.num_psis), dtype=int) #zeros signify PD_psi entry not complete\n for root, dirs, files in os.walk(p.psi2_prog):\n for file in sorted(files):\n if not file.startswith('.'): #ignore hidden files\n fin_PD, fin_psi = file.split('_')\n fin_PDs[int(fin_PD),int(fin_psi)] = int(1)\n return fin_PDs\n\n\ndef op(input_data,conOrderRange,traj_name,isFull,psiTrunc,*argv):\n dist_file = input_data[0]\n psi_file = input_data[1] #15-dim diffusion map coordinates\n psi2_file = input_data[2] #output to be generated by Psi Analysis\n EL_file = input_data[3]\n psinums = input_data[4]\n senses = input_data[5]\n prD = input_data[6]\n if len(input_data) == 8:\n psi_list = input_data[7]\n else:\n psi_list = psinums\n data_IMG = myio.fin1(dist_file)\n data_psi = myio.fin1(psi_file)\n\n D = np.array(data_IMG['D']) #distance matrix\n imgAll = np.array(data_IMG['imgAll']) #every image in PD (and dimensions): e.g., shape=(numPDs,boxSize,boxSize)\n\n msk2 = np.array(data_IMG['msk2']) # April 2020, vol mask to be used after ctf has been applied\n\n CTF = np.array(data_IMG['CTF'])\n psi = data_psi['psi'] #coordinates of all images in 15-dim space from diffusion map: e.g., shape=(numPDs,15)\n posPath = data_psi['posPath'] #indices of every image in PD: e.g., shape=(numPDs,); [0,1,2,...(numPDs-1)]\n nS = len(posPath) #number of images in PD\n ConOrder = int(np.floor(float(nS) / conOrderRange))\n # if ConOrder is large, noise-free 2D frames expected w/ small range of conformations, \\\n # while losing snapshots at edges\n\n dim = int(np.sqrt(imgAll.size/D.shape[0]))\n CTF = CTF.reshape(D.shape[0],dim,dim) #needed only if read from matlab\n posPath = np.squeeze(posPath)\n D = D[posPath][:, posPath]\n\n ExtPar = dict(outDir='',prD=prD)\n\n for psinum in psi_list: #for each reaction coordinates do the following:\n if psinum == -1:\n continue\n psiSortedInd = np.argsort(psi[:,psinum]) #e.g., shape=(numPDs,): reordering image indices along each diff map coord\n PosPsi1 = psiSortedInd #duplicate of above...\n\n psi1 = psi[PosPsi1,:]\n CC = range(0,max(psi1.shape))\n\n DD = D[PosPsi1]\n DD = DD[:,PosPsi1] #distance matrix with indices of images re-arranged along current diffusion map coordinate\n num = DD.shape[1] #number of images in PD (duplicate of nS?)\n k = num - ConOrder\n\n NLSAPar = dict(num=num,ConOrder=ConOrder,k=k,tune=p.tune,nS=nS,save=False,psiTrunc=psiTrunc)\n #IMGT,Topo_mean,psirec,psiC1,sdiag,VX,mu,tau = NLSA.op(NLSAPar, DD, posPath,PosPsi1,imgAll,CTF, ExtPar)\n IMGT,Topo_mean,psirec,psiC1,sdiag,VX,mu,tau = NLSA.op(NLSAPar, DD, posPath,PosPsi1,imgAll,msk2,CTF, ExtPar) # April 2020, pass the msk2 var also\n kk = 10\n #if np.median(tau[:kk] > 0.8): this doesn't do it\n # tau = 1-tau\n #print 'yes'\n nSrecon = min(IMGT.shape)\n numclass = int(min(p.nClass, np.floor(nSrecon / 2.)))\n\n tau = (tau - min(tau))/ (max(tau) - min(tau))\n tauinds = []\n i1 = 0\n i2 = IMGT.shape[0]\n\n IMG1 = np.zeros((i2, numclass),dtype='float64')\n for i in range(numclass):\n ind1 = float(i) / numclass\n ind2 = ind1 + 1. / numclass;\n if (i == numclass-1):\n tauind = ((tau >= ind1) & (tau <= ind2)).nonzero()[0]\n else:\n tauind = ((tau >= ind1) & (tau < ind2)).nonzero()[0]\n while (tauind.size == 0):\n sc = 1. / (numclass * 2.)\n ind1 = ind1 - sc * ind1\n ind2 = ind2 + sc * ind2\n tauind = ((tau >= ind1) & (tau < ind2)).nonzero()[0]\n\n IMG1[i1:i2, i] = IMGT[:, tauind[0]]\n tauinds.append(tauind[0])\n\n if isFull: # second pass for EL1D\n\n # adjust tau by comparing the IMG1s\n psi2_file = '{}_psi_{}'.format(psi2_file, psinum)\n data = myio.fin1(psi2_file)\n IMG1a= data['IMG1']\n\n dc = diff_corr(IMG1,IMG1a,numclass-1)\n if (senses[0]==-1 and dc > 0) or senses[0]==1 and dc < 0:\n tau = 1-tau\n # output result\n\n outFile = '{}_{}_1'.format(EL_file,traj_name)\n myio.fout1(outFile, ['IMG1','IMGT','posPath','PosPsi1','psirec','tau','psiC1','mu',\n 'VX','sdiag','Topo_mean','tauinds'],\n [IMG1,IMGT,posPath,PosPsi1,psirec,tau,psiC1,mu,\n VX,sdiag,Topo_mean,tauinds])\n\n #######################################################\n # create empty PD files after each Pickle dump to...\n # ...be used to resume (avoiding corrupt Pickle files):\n progress_fname = os.path.join(p.EL_prog, '%s' % (prD))\n open(progress_fname, 'a').close() #create empty file to signify non-corrupted Pickle dump\n #######################################################\n\n else: # first pass\n outFile = '{}_psi_{}'.format(psi2_file,psinum)\n myio.fout1(outFile,['IMG1','psirec','tau','psiC1','mu','VX','sdiag',\n 'Topo_mean','tauinds'],[IMG1,psirec,tau,psiC1,mu,VX,sdiag,\n Topo_mean,tauinds])\n\n #######################################################\n # create empty PD files after each Pickle dump to...\n # ...be used to resume (avoiding corrupt Pickle files):\n progress_fname = os.path.join(p.psi2_prog, '%s_%s' % (prD, psinum))\n open(progress_fname, 'a').close() #create empty file to signify non-corrupted Pickle dump\n #######################################################\n if argv:\n progress3 = argv[0]\n fin_PDs = fileCheck(p.numberofJobs) #array of finished PDs (0's are unfinished, 1's are finished)\n offset = np.count_nonzero(fin_PDs==1)\n progress3.emit(int((offset / float((p.numberofJobs)*p.num_psis)) * 100))\n\n res = 'ok'\n return res\n\nif __name__ == '__main__':\n #align_param_file = 'data/refine_005.tls'\n op()\n","repo_name":"evanseitz/ManifoldEM_Python","sub_path":"modules/psiAnalysisParS2.py","file_name":"psiAnalysisParS2.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"31288634305","text":"import csv\nrssi = []\nphase = []\nchannel = []\nepc = []\nwith open('D:\\\\Atom\\\\Dependency\\\\tens.csv', newline='') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n channel.append(row['CHANNEL'])\n epc.append(row['EPC'])\n phase.append(row['PHASE'])\n rssi.append(row['RSSI'])\nprint(rssi)\n","repo_name":"Jennifer331/Scripts","sub_path":"read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6364068092","text":"\"\"\"\n 根据urlsha过滤\n\"\"\"\n\nimport hashlib\nfrom scrapy.dupefilters import RFPDupeFilter\nfrom scrapy.utils.url import canonicalize_url\n\n\nclass URLSha1Filter(RFPDupeFilter):\n\n def __init__(self, path=None, debug=False):\n self.url_seen = set()\n RFPDupeFilter.__init__(self, path)\n\n def request_seen(self, request):\n fp = hashlib.sha1()\n fp.update((canonicalize_url(request.url).encode('utf-8')))\n url_sha1 = fp.hexdigest()\n if url_sha1 in self.url_seen:\n return True\n else:\n self.url_seen.add(url_sha1)","repo_name":"cshk/scrapy","sub_path":"dytt/dytt/util/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12098932889","text":"from floodsystem.geo import stations_within_radius\nfrom floodsystem.stationdata import build_station_list\n\ndef run():\n \"\"\"Requirements for Task 1C\"\"\"\n\n stations = build_station_list()\n #uses function to calculate stations within 10 of the centre given\n within_10 = stations_within_radius(stations, (52.2053, 0.1218), 10)\n stations_within_10 = []\n #iterates through stations and appends all items to station_within_10\n for station in within_10:\n stations_within_10.append(station)\n print(sorted(stations_within_10, key=lambda x: x.name))\n\nif __name__ == \"__main__\":\n print(\"***Task 1C: CUED Part IA FLood Warning System***\")\n run()\n","repo_name":"HSCam/Flood-Warning-System","sub_path":"Task1C.py","file_name":"Task1C.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24520147894","text":"import time\n\nimport requests\nfrom PIL import Image\n\n\ndef detect_color(rgb, filename):\n img = Image.open(filename)\n img = img.convert('RGBA')\n data = img.getdata()\n\n for item in data:\n if item[0] == rgb[0] and item[1] == rgb[1] and item[2] == rgb[2]:\n return True\n return False\n\n\ndef solve_epic_guard(message):\n apple = [237, 28, 36, 38, 198, 86, 201, 120, 31, \"apple\"]\n life_potion = [217, 17, 27, 198, 252, 255, 109, 64, 1, \"life potion\"]\n normie_fish = [0, 198, 255, 0, 0, 0, 0, 96, 124, \"normie fish\"]\n coin = [255, 242, 0, 221, 210, 0, 149, 141, 0, \"coin\"]\n zombie_eye = [194, 235, 71, 77, 98, 11, 40, 40, 40, \"zombie eye\"]\n banana = [253, 215, 0, 225, 191, 0, 209, 135, 22, \"banana\"]\n golden_fish = [255, 204, 0, 129, 104, 0, 0, 0, 0, \"golden fish\"]\n unicorn_horn = [118, 23, 54, 241, 82, 134, 237, 116, 155, \"unicorn horn\"]\n ruby = [230, 0, 0, 164, 0, 0, 196, 0, 0, \"ruby\"]\n epic_coin = [184, 95, 184, 152, 77, 202, 106, 65, 214, \"epic coin\"]\n tries = [apple, life_potion, normie_fish, coin, zombie_eye, banana, golden_fish, unicorn_horn, ruby, epic_coin]\n global epic_guard_answer\n epic_guard_answer = \"Tvoje mamka\"\n img_data = requests.get(message.attachments[0]).content\n name = str(time.time()).split(\".\", 1)[0] + \".png\"\n with open(\"epic_guard_images/\" + name, 'wb') as handler:\n handler.write(img_data)\n for item in tries:\n color_one = detect_color((item[0], item[1], item[2]), \"epic_guard_images/\" + name)\n color_two = detect_color((item[3], item[4], item[5]), \"epic_guard_images/\" + name)\n color_three = detect_color((item[6], item[7], item[8]), \"epic_guard_images/\" + name)\n if color_one is True and color_two is True and color_three is True:\n epic_guard_answer = item[9]\n return epic_guard_answer\n","repo_name":"hesmatt/epic-rpg-bot","sub_path":"modules/epic_guard_solver.py","file_name":"epic_guard_solver.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14354320344","text":"#====================== BEGIN GPL LICENSE BLOCK ======================\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n#======================= END GPL LICENSE BLOCK ========================\n\n# \n\nimport bpy\n\nfrom ...base_rig import BaseRig\n\nfrom ...utils.layers import DEF_LAYER\nfrom ...utils.naming import strip_org, make_deformer_name\nfrom ...utils.widgets_basic import create_bone_widget, create_circle_widget\n\nfrom itertools import repeat\n\n\nclass Rig(BaseRig):\n \"\"\" A \"copy\" rig. All it does is duplicate the original bone and\n constrain it.\n This is a control and deformation rig.\n\n \"\"\"\n def find_org_bones(self, pose_bone):\n return pose_bone.name\n\n\n def initialize(self):\n \"\"\" Gather and validate data about the rig.\n \"\"\"\n self.org_name = strip_org(self.bones.org)\n\n self.make_control = self.params.make_control\n self.make_widget = self.params.make_widget\n\n deform = self.params.make_deform\n rename = self.params.rename_to_deform\n\n self.make_deform = deform and not rename\n self.rename_deform = deform and rename\n\n self.relink = self.params.relink_constraints\n\n\n def generate_bones(self):\n bones = self.bones\n\n # Make a control bone (copy of original).\n if self.make_control:\n bones.ctrl = self.copy_bone(bones.org, self.org_name, parent=True)\n\n # Make a deformation bone (copy of original, child of original).\n if self.make_deform:\n bones.deform = self.copy_bone(bones.org, make_deformer_name(self.org_name), bbone=True)\n\n\n def parent_bones(self):\n bones = self.bones\n\n if self.make_deform:\n self.set_bone_parent(bones.deform, bones.org, use_connect=False)\n\n if self.relink:\n self.generator.disable_auto_parent(bones.org)\n\n parent_spec = self.params.parent_bone\n if parent_spec:\n old_parent = self.get_bone_parent(bones.org)\n new_parent = self.find_relink_target(parent_spec, old_parent or '') or None\n self.set_bone_parent(bones.org, new_parent)\n\n if self.make_control:\n self.set_bone_parent(bones.ctrl, new_parent)\n\n\n def configure_bones(self):\n bones = self.bones\n\n if self.make_control:\n self.copy_bone_properties(bones.org, bones.ctrl)\n\n\n def rig_bones(self):\n bones = self.bones\n\n if self.relink:\n for con in self.get_bone(bones.org).constraints:\n parts = con.name.split('@')\n\n if len(parts) > 1:\n self.relink_constraint(con, parts[1:])\n\n if self.make_control:\n # Constrain the original bone.\n self.make_constraint(bones.org, 'COPY_TRANSFORMS', bones.ctrl, insert_index=0)\n\n def relink_constraint(self, con, specs):\n if con.type == 'ARMATURE':\n if len(specs) == 1:\n specs = repeat(specs[0])\n elif len(specs) != len(con.specs):\n self.report_error(\"Constraint {} actually has {} targets\", con.name, len(con.targets))\n\n for tgt, spec in zip(con.targets, specs):\n tgt.subtarget = self.find_relink_target(spec, tgt.subtarget)\n\n else:\n if len(specs) > 1:\n self.report_error(\"Only the Armature constraint can have multiple '@' targets: {}\", con.name)\n\n con.subtarget = self.find_relink_target(specs[0], con.subtarget)\n\n def find_relink_target(self, spec, old_target):\n if spec == '':\n return old_target\n elif spec in {'DEF', 'MCH'}:\n spec = spec + '-' + strip_org(old_target)\n\n if spec not in self.obj.pose.bones:\n # Hack: allow referring to copy rigs using Rename To Deform as DEF\n if old_target.startswith('ORG-') and spec == make_deformer_name(strip_org(old_target)):\n from . import copy_chain\n\n owner = self.generator.bone_owners.get(old_target)\n\n if ((isinstance(owner, Rig) and owner.rename_deform) or\n (isinstance(owner, copy_chain.Rig) and owner.rename_deforms)):\n return old_target\n\n self.report_error(\"Cannot find bone '{}' for relinking\", spec)\n\n return spec\n\n\n def generate_widgets(self):\n bones = self.bones\n\n if self.make_control:\n # Create control widget\n if self.make_widget:\n create_circle_widget(self.obj, bones.ctrl, radius=0.5)\n else:\n create_bone_widget(self.obj, bones.ctrl)\n\n\n def finalize(self):\n if self.rename_deform:\n new_name = self.rename_bone(self.bones.org, make_deformer_name(self.org_name))\n\n bone = self.get_bone(new_name).bone\n bone.use_deform = True\n bone.layers = DEF_LAYER\n\n\n @classmethod\n def add_parameters(self, params):\n \"\"\" Add the parameters of this rig type to the\n RigifyParameters PropertyGroup\n \"\"\"\n params.make_control = bpy.props.BoolProperty(\n name = \"Control\",\n default = True,\n description = \"Create a control bone for the copy\"\n )\n\n params.make_widget = bpy.props.BoolProperty(\n name = \"Widget\",\n default = True,\n description = \"Choose a widget for the bone control\"\n )\n\n params.make_deform = bpy.props.BoolProperty(\n name = \"Deform\",\n default = True,\n description = \"Create a deform bone for the copy\"\n )\n\n params.rename_to_deform = bpy.props.BoolProperty(\n name = \"Rename To Deform\",\n default = False,\n description = \"Rename the original bone itself to use as deform bone (advanced feature)\"\n )\n\n params.relink_constraints = bpy.props.BoolProperty(\n name = \"Relink Constraints\",\n default = False,\n description = \"For constraints with names formed like 'base@bonename', use the part after '@' as the new subtarget after all bones are created. Use '@DEF' or '@MCH' to simply prepend the prefix\"\n )\n\n params.parent_bone = bpy.props.StringProperty(\n name = \"Parent\",\n default = \"\",\n description = \"Replace the parent with a different bone after all bones are created. Using simply DEF or MCH will prepend the prefix instead\"\n )\n\n\n @classmethod\n def parameters_ui(self, layout, params):\n \"\"\" Create the ui for the rig parameters.\n \"\"\"\n r = layout.row()\n r.prop(params, \"make_control\")\n r = layout.row()\n r.prop(params, \"make_widget\")\n r.enabled = params.make_control\n r = layout.row()\n r.prop(params, \"make_deform\")\n\n if params.make_deform:\n r = layout.row()\n r.prop(params, \"rename_to_deform\")\n\n r = layout.row()\n r.prop(params, \"relink_constraints\")\n\n if params.relink_constraints:\n r = layout.row()\n r.prop(params, \"parent_bone\")\n\n\ndef create_sample(obj):\n \"\"\" Create a sample metarig for this rig type.\n \"\"\"\n # generated by rigify.utils.write_metarig\n bpy.ops.object.mode_set(mode='EDIT')\n arm = obj.data\n\n bones = {}\n\n bone = arm.edit_bones.new('Bone')\n bone.head[:] = 0.0000, 0.0000, 0.0000\n bone.tail[:] = 0.0000, 0.0000, 0.2000\n bone.roll = 0.0000\n bone.use_connect = False\n bones['Bone'] = bone.name\n\n bpy.ops.object.mode_set(mode='OBJECT')\n pbone = obj.pose.bones[bones['Bone']]\n pbone.rigify_type = 'basic.super_copy'\n pbone.lock_location = (False, False, False)\n pbone.lock_rotation = (False, False, False)\n pbone.lock_rotation_w = False\n pbone.lock_scale = (False, False, False)\n pbone.rotation_mode = 'QUATERNION'\n\n bpy.ops.object.mode_set(mode='EDIT')\n for bone in arm.edit_bones:\n bone.select = False\n bone.select_head = False\n bone.select_tail = False\n for b in bones:\n bone = arm.edit_bones[bones[b]]\n bone.select = True\n bone.select_head = True\n bone.select_tail = True\n arm.edit_bones.active = bone\n\n return bones\n","repo_name":"sambler/myblenderaddons","sub_path":"rigify/rigs/basic/super_copy.py","file_name":"super_copy.py","file_ext":"py","file_size_in_byte":9013,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"35466664293","text":"from django.shortcuts import render, redirect, HttpResponse\r\nfrom django.contrib import messages\r\nfrom .models import Customer, Product, Cart, OrderPlaced\r\nfrom .forms import CustomerRegistrationForm, CustomerProfileForm\r\nfrom django.views import View\r\nfrom django.http import JsonResponse\r\nfrom django.db.models import Q\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.utils.decorators import method_decorator\r\n\r\n\r\nclass ProductView(View):\r\n def get(self, request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n furniture = Product.objects.filter(category='FU')\r\n fashion = Product.objects.filter(category='F')\r\n toys = Product.objects.filter(category='TY')\r\n electronics = Product.objects.filter(category='E')\r\n homeappliance = Product.objects.filter(category='HA')\r\n return render(request, 'app/home.html',\r\n {'furniture': furniture, 'fashion': fashion, 'electronics': electronics, 'toys': toys,\r\n 'homeappliance': homeappliance,'totalitem': totalitem})\r\n\r\n\r\nclass ProductDetailView(View):\r\n def get(self, request, pk):\r\n totalitem = 0\r\n product = Product.objects.get(pk=pk)\r\n item_already_in_cart = False\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n item_already_in_cart = Cart.objects.filter(Q(product=product.id) & Q(user=request.user)).exists()\r\n return render(request, 'app/productdetail.html',\r\n {'product': product, 'item_already_in_cart': item_already_in_cart, 'totalitem': totalitem})\r\n\r\n\r\n@login_required()\r\ndef add_to_cart(request):\r\n user = request.user\r\n item_already_in_cart1 = False\r\n product = request.GET.get('prod_id')\r\n item_already_in_cart1 = Cart.objects.filter(Q(product=product) & Q(user=request.user)).exists()\r\n if not item_already_in_cart1:\r\n product_title = Product.objects.get(id=product)\r\n Cart(user=user, product=product_title).save()\r\n messages.success(request, 'Product Added to Cart Successfully !!')\r\n return redirect('/cart')\r\n else:\r\n return redirect('/cart')\r\n\r\n\r\n@login_required\r\ndef show_cart(request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n user = request.user\r\n cart = Cart.objects.filter(user=user)\r\n amount = 0.0\r\n shipping_amount = 0\r\n totalamount = 0.0\r\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\r\n print(cart_product)\r\n if cart_product:\r\n for p in cart_product:\r\n tempamount = (p.quantity * p.product.discounted_price)\r\n amount += tempamount\r\n totalamount = amount + shipping_amount\r\n return render(request, 'app/addtocart.html',\r\n {'carts': cart, 'amount': amount, 'totalamount': totalamount, 'totalitem': totalitem})\r\n else:\r\n return render(request, 'app/emptycart.html', {'totalitem': totalitem})\r\n else:\r\n return render(request, 'app/emptycart.html', {'totalitem': totalitem})\r\n\r\n\r\ndef plus_cart(request):\r\n if request.method == 'GET':\r\n prod_id = request.GET['prod_id']\r\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\r\n c.quantity += 1\r\n c.save()\r\n amount = 0.0\r\n shipping_amount = 0\r\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\r\n for p in cart_product:\r\n tempamount = (p.quantity * p.product.discounted_price)\r\n amount += tempamount\r\n data = {\r\n 'quantity': c.quantity,\r\n 'amount': amount,\r\n 'totalamount': amount + shipping_amount\r\n }\r\n return JsonResponse(data)\r\n else:\r\n return HttpResponse(\"\")\r\n\r\n\r\ndef minus_cart(request):\r\n if request.method == 'GET':\r\n prod_id = request.GET['prod_id']\r\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\r\n c.quantity -= 1\r\n c.save()\r\n amount = 0.0\r\n shipping_amount = 0\r\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\r\n for p in cart_product:\r\n tempamount = (p.quantity * p.product.discounted_price)\r\n amount += tempamount\r\n data = {\r\n 'quantity': c.quantity,\r\n 'amount': amount,\r\n 'totalamount': amount + shipping_amount\r\n }\r\n return JsonResponse(data)\r\n else:\r\n return HttpResponse(\"\")\r\n\r\n\r\n@login_required\r\ndef checkout(request):\r\n user = request.user\r\n add = Customer.objects.filter(user=user)\r\n cart_items = Cart.objects.filter(user=request.user)\r\n amount = 0.0\r\n shipping_amount = 0\r\n totalamount = 0.0\r\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\r\n if cart_product:\r\n for p in cart_product:\r\n tempamount = (p.quantity * p.product.discounted_price)\r\n amount += tempamount\r\n totalamount = amount + shipping_amount\r\n return render(request, 'app/checkout.html', {'add': add, 'cart_items': cart_items, 'totalcost': totalamount})\r\n\r\n\r\n@login_required\r\ndef payment_done(request):\r\n custid = request.GET.get('custid')\r\n print(\"Customer ID\", custid)\r\n user = request.user\r\n cartid = Cart.objects.filter(user=user)\r\n customer = Customer.objects.get(id=custid)\r\n print(customer)\r\n for cid in cartid:\r\n OrderPlaced(user=user, customer=customer, product=cid.product, quantity=cid.quantity).save()\r\n print(\"Order Saved\")\r\n cid.delete()\r\n print(\"Cart Item Deleted\")\r\n return redirect(\"orders\")\r\n\r\n\r\ndef remove_cart(request):\r\n if request.method == 'GET':\r\n prod_id = request.GET['prod_id']\r\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\r\n c.delete()\r\n amount = 0.0\r\n shipping_amount = 0\r\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\r\n for p in cart_product:\r\n tempamount = (p.quantity * p.product.discounted_price)\r\n amount += tempamount\r\n data = {\r\n 'amount': amount,\r\n 'totalamount': amount + shipping_amount\r\n }\r\n return JsonResponse(data)\r\n else:\r\n return HttpResponse(\"\")\r\n\r\n\r\n@login_required\r\ndef address(request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n add = Customer.objects.filter(user=request.user)\r\n return render(request, 'app/address.html', {'add': add, 'active': 'btn-primary', 'totalitem': totalitem})\r\n\r\n\r\n@login_required\r\ndef orders(request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n op = OrderPlaced.objects.filter(user=request.user)\r\n return render(request, 'app/orders.html', {'order_placed': op, 'totalitem': totalitem})\r\n\r\n\r\ndef electronic(request, data=None):\r\n totalitem = 0\r\n electronics = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n if data == None:\r\n electronics = Product.objects.filter(category='E')\r\n elif data == 'Apple' or data == 'Samsung' or data == 'LG' or data == 'Google':\r\n electronics = Product.objects.filter(category='E').filter(brand=data)\r\n elif data == 'below':\r\n electronics = Product.objects.filter(category='E').filter(discounted_price__lt=10000)\r\n elif data == 'above':\r\n electronics = Product.objects.filter(category='E').filter(discounted_price__gt=10000)\r\n return render(request, 'app/electronic.html', {'electronics': electronics, 'totalitem': totalitem})\r\n\r\n\r\ndef furnitures(request, data=None):\r\n totalitem = 0\r\n furniture = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n if data == None:\r\n furniture = Product.objects.filter(category='FU')\r\n elif data == 'IKEA' or data == 'PELLOS':\r\n furniture = Product.objects.filter(category='FU').filter(brand=data)\r\n elif data == 'below':\r\n furniture = Product.objects.filter(category='FU').filter(discounted_price__lt=10000)\r\n elif data == 'above':\r\n furniture = Product.objects.filter(category='FU').filter(discounted_price__gt=10000)\r\n return render(request, 'app/furniture.html', {'furniture': furniture, 'totalitem': totalitem})\r\n\r\n\r\ndef toys(request, data=None):\r\n totalitem = 0\r\n toy = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n if data == None:\r\n toy = Product.objects.filter(category='TY')\r\n elif data == 'LEGO' or data == 'WireScorts':\r\n toy = Product.objects.filter(category='TY').filter(brand=data)\r\n elif data == 'below':\r\n toy = Product.objects.filter(category='TY').filter(discounted_price__lt=10000)\r\n elif data == 'above':\r\n toy = Product.objects.filter(category='TY').filter(discounted_price__gt=10000)\r\n return render(request, 'app/toys.html', {'toy': toy, 'totalitem': totalitem})\r\n\r\n\r\ndef fashion(request, data=None):\r\n totalitem = 0\r\n cloth = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n if data == None:\r\n cloth = Product.objects.filter(category='F')\r\n elif data == 'Jockey' or data == 'Puma' or data == 'Van_Heusen' or data == 'Levi':\r\n cloth = Product.objects.filter(category='F').filter(brand=data)\r\n elif data == 'below':\r\n cloth = Product.objects.filter(category='F').filter(discounted_price__lt=10000)\r\n elif data == 'above':\r\n cloth = Product.objects.filter(category='F').filter(discounted_price__gt=10000)\r\n return render(request, 'app/fashion.html', {'cloth': cloth, 'totalitem': totalitem})\r\n\r\n\r\ndef appliance(request, data=None):\r\n totalitem = 0\r\n appliances = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n if data == None:\r\n appliances = Product.objects.filter(category='HA')\r\n elif data == 'Philips' or data == 'Samsung' or data == 'Prestige':\r\n appliances = Product.objects.filter(category='HA').filter(brand=data)\r\n elif data == 'below':\r\n appliances = Product.objects.filter(category='HA').filter(discounted_price__lt=10000)\r\n elif data == 'above':\r\n appliances = Product.objects.filter(category='HA').filter(discounted_price__gt=10000)\r\n return render(request, 'app/homeappliances.html', {'appliances': appliances, 'totalitem': totalitem})\r\n\r\n\r\ndef search(request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n if request.method == 'POST':\r\n sea = request.POST['search']\r\n final = Product.objects.filter(description__icontains=sea) | Product.objects.filter(title__icontains=sea)\r\n return render(request, 'app/search.html',{'final':final,'sea':sea, 'totalitem': totalitem})\r\n else:\r\n return render(request, 'app/search.html')\r\n\r\nclass CustomerRegistrationView(View):\r\n def get(self, request):\r\n form = CustomerRegistrationForm()\r\n return render(request, 'app/customerregistration.html', {'form': form})\r\n\r\n def post(self, request):\r\n form = CustomerRegistrationForm(request.POST)\r\n if form.is_valid():\r\n messages.success(request, 'Congratulations!! Registered Successfully.')\r\n form.save()\r\n return render(request, 'app/customerregistration.html', {'form': form})\r\n\r\n\r\n@method_decorator(login_required, name='dispatch')\r\nclass ProfileView(View):\r\n def get(self, request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n form = CustomerProfileForm()\r\n return render(request, 'app/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})\r\n\r\n def post(self, request):\r\n totalitem = 0\r\n if request.user.is_authenticated:\r\n totalitem = len(Cart.objects.filter(user=request.user))\r\n form = CustomerProfileForm(request.POST)\r\n if form.is_valid():\r\n usr = request.user\r\n name = form.cleaned_data['name']\r\n locality = form.cleaned_data['locality']\r\n city = form.cleaned_data['city']\r\n state = form.cleaned_data['state']\r\n zipcode = form.cleaned_data['zipcode']\r\n reg = Customer(user=usr, name=name, locality=locality, city=city, state=state, zipcode=zipcode)\r\n reg.save()\r\n messages.success(request, 'Congratulations!! Profile Updated Successfully.')\r\n return render(request, 'app/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})\r\n\r\n\r\ndef contact(request):\r\n return render(request,'app/contact.html')\r\n\r\ndef thankyou(request):\r\n return render(request,'app/thankyou.html')\r\n\r\ndef aboutus(request):\r\n return render(request,'app/aboutus.html')\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sanskarkakde14/SmartBuy_E-Commerce","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13256840696","text":"import sys\nimport urllib.request\nimport csv\n\nif (len(sys.argv) < 2):\n\tprint('firt arg is input file name')\n\texit()\n\ninputFile = sys.argv[1]\ncounter = 0\n\nwith open(inputFile, 'r') as csvfile:\n\tcsvReader = csv.reader(csvfile)\n\tfor row in csvReader:\n\t\tprint(row)\n\t\ttweet = row[0].lower()\n\t\tif '#' in tweet or '@' in tweet:\n\t\t\tcontinue\n\t\tgroup = 'yes' if 'yes' in tweet else 'no'\n\t\turllib.request.urlretrieve(row[1], 'data/'+group+'/'+group+'_'+str(counter)+'.jpg')\n\t\tcounter = counter + 1\n","repo_name":"alex9311/alex9311.github.io","sub_path":"code-projects/twitter-scraper/get-training-data.py","file_name":"get-training-data.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71970270915","text":"import time\nfrom pyrep import VRep\nfrom Behaviour import *\nfrom Parameter import *\n\n# contextlib\n# simpy\n# multiprocessing cpu\n\n\nwith VRep.connect(\"127.0.0.1\", 19997) as vrep:\n\t# vrep.simulation.stop()\n\t# time.sleep(2)\n\t# vrep.simulation.start()\n\n\tsma1 = vrep.joint.spherical(\"sp_joint\")\n\tsma2 = vrep.joint.spherical(\"sp_joint2\")\n\tsma_list = [sma1, sma2]\n\t# improve later........\n\tsma_par1 = Parameter('sma')\n\tsma_par2 = Parameter('sma')\n\tsma_par_list = [sma_par1, sma_par2]\n\n\t#######################\n\t# flags and constants #\n\t#######################\n\tstate = -1\n\tidle_time = 4\n\tidle_gap = 5\n\tactive_gap = 1\n\n\tidle_event_start_time = time.time()\n\tactive_event_start_time = time.time()\n\n\tstate_start_time = time.time()\n\n\ttrigger_ls = []\n\tactive_ls = []\n\n\ttrigger_index = 0\n\n\tbehaviour = Behaviour()\n\tprint('Simulation starts')\n\tprint('*************')\n\twhile True:\n\t\t#########################################\n\t\t# Get triggered locations and numbers #\n\t\t#########################################\n\n\t\t# for sma_id in sma_patches:\n\t\t# \tir_out = sma_patches[sma_id].get_block_output('IRSensor_1', 'ir')\n\t\t# \tprint str(sma_id) + ':' + str(ir_out.value)\n\t\t# \tif ir_out.value == 1:\n\t\t# \t\ttrigger_key.append(sma_id)\n\t\t# \t\tcoordinate = find_location(location_map)\n\n\t\tif len(trigger_ls) == 0 and time.time() - state_start_time >= idle_time:\n\t\t\t# Idle state\n\t\t\t# Update start time only at state transitions\n\t\t\tif state != 0:\n\t\t\t\tstate = 0\n\t\t\t\tidle_event_start_time = time.time()\n\t\t\t\tprint('state = ' + str(state))\n\t\telif len(trigger_ls) > 0:\n\t\t\t# Active state\n\t\t\t# activate blocks\n\t\t\tif state != 1:\n\t\t\t\tstate = 1\n\t\t\t\tactive_event_start_time = time.time()\n\t\t\t\tprint('state = ' + str(state))\n\n\t\tif state == 0:\n\t\t\t# random delay\n\t\t\tidle_event_start_time, active_ls = idle_random_event(sma_list, sma_par_list, idle_event_start_time, active_ls)\n\n\t\tif state == 1:\n\t\t\t# propagate the action\n\t\t\t# assume ONLY ONE sensor is triggered each time\n\t\t\tactive_event_start_time, active_ls = active_event(trigger_index, sma_list, sma_par_list, idle_event_start_time,)\n\n# vrep.simulation.stop()\n\n","repo_name":"daiweiLin/LAS_Project_TestField","sub_path":"simulator/V_REP_test.py","file_name":"V_REP_test.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23620813641","text":"\r\nimport itertools\r\n\r\ndef sum(a):\r\n sum = 0\r\n for b in a:\r\n sum += b\r\n return sum\r\n\r\ndef brokenSum(a):\r\n sum = 0\r\n for b in a:\r\n sum ^= b\r\n return sum\r\n\r\nif __name__ == '__main__':\r\n inFile = open('C-small-attempt1.in', 'r')\r\n outFile = open('C-small-attempt1.out', 'w')\r\n numCases = int(inFile.readline())\r\n print(\"Cases:\", numCases)\r\n for case in range(numCases):\r\n outFile.write('Case #{0}: '.format(case + 1))\r\n\r\n inFile.readline() # discard\r\n \r\n candy = inFile.readline().rsplit()\r\n for c in range(len(candy)):\r\n candy[c] = int(candy[c])\r\n print(candy)\r\n a = brokenSum(candy)\r\n if a != 0:\r\n result = 'NO'\r\n else:\r\n m = 0\r\n for bb in range(len(candy) // 2):\r\n for i in itertools.combinations(candy, bb + 1):\r\n ii = itertools.filterfalse(lambda x: x in i, candy)\r\n if brokenSum(i) ^ a == brokenSum(i):\r\n m = max((m, sum(i), sum(ii)))\r\n if m == 0:\r\n result = 'NO'\r\n else:\r\n result = m\r\n \r\n print('result:', result)\r\n outFile.write(str(result) + '\\n')\r\n outFile.close()\r\n inFile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_76/546.py","file_name":"546.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23402127401","text":"import sys\n\ndef draw(r):\n\treturn (2*r) + 1\n\t\nout_file = open('output.out', 'w+')\nin_file = open('A-small-attempt0.in', 'r+')\nnum_cases = int(in_file.readline())\n\nfor c in range(1, num_cases+1):\n\tline = in_file.readline().strip('\\n').split()\n\n\tradio = int(line[0])\n\tpaint = int(line[1])\n\tcircles = 0\n\t\n\tpainting = draw(radio)\n\tpaint -= painting\n\t\n\twhile paint >= 0:\n\t\tpainting += 4\n\t\tcircles += 1\t\t\n\t\tpaint -= painting\n\t\t\n\tcase = 'Case #'+str(c)+': ' + str(circles)\n\tout_file.write(case+'\\n')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_120/972.py","file_name":"972.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36611832266","text":"import json\nimport os\nimport shutil\n\nimport pandas as pd\n\nfrom src.exception import *\n\n\ndef load_json(filepath, filename, should_raise=False):\n \"\"\"\n Loads JSON file as a dictionary.\n \"\"\"\n fn = filename.replace(\".json\", \"\")\n try:\n with open(f\"{filepath}/{fn}.json\", \"r\") as jsf:\n data = json.load(jsf)\n except FileNotFoundError as e:\n if not should_raise:\n data = {}\n else:\n raise e\n\n return data\n\n\ndef save_json(filepath, filename, data, **kwargs):\n \"\"\"\n Saves dictionary as a JSON file.\n \"\"\"\n with open(f\"{filepath}/{filename}.json\", \"w\") as jsf:\n json.dump(data, jsf, **kwargs)\n\n\ndef update_json(filepath, filename, data, **kwargs):\n \"\"\"\n Updates the existing json with the provided data. If the\n specified json does not exist, creates it.\n \"\"\"\n previous_data = load_json(filepath, filename)\n updated_data = {**previous_data, **data}\n save_json(filepath, filename, updated_data, **kwargs)\n\n\ndef show_json(object, **kwargs):\n return json.dumps(object, **kwargs)\n\n\ndef listdir(path, filter_files=False):\n \"\"\"\n Lists files and directories in the given path.\n \"\"\"\n try:\n folders = os.listdir(path)\n if filter_files:\n folders = _filter_files(folders)\n folders.sort()\n\n except FileNotFoundError:\n folders = []\n\n return folders\n\n\ndef check_file(fname, path):\n \"\"\"\n Checks if a file exists in the given path.\n \"\"\"\n return fname in os.listdir(path)\n\n\ndef create_folder(path, folder_name):\n \"\"\"\n Creates a folder in that path with the given name.\n If the folder already exists, does nothing.\n \"\"\"\n\n files = listdir(path)\n if folder_name not in files:\n pw = path[:-1] if path.endswith(\"/\") else path\n try:\n os.mkdir(f\"{pw}/{folder_name}/\")\n except FileExistsError:\n pass\n\n\ndef delete_folder(path, folder_name):\n \"\"\"\n Deletes the folder in the given path.\n \"\"\"\n shutil.rmtree(f\"{path}/{folder_name}\")\n\n\ndef remove_file(filepath):\n \"\"\"\n Deletes a file. If the file is not found or the given\n path correponds to a directory, does nothing.\n \"\"\"\n\n try:\n os.remove(filepath)\n except FileNotFoundError:\n pass\n except IsADirectoryError:\n pass\n\n\ndef read_ratings_as_list(filepath, filename):\n \"\"\"\n Read ratings file as a list.\n \"\"\"\n path = f\"{filepath}/{filename}\"\n ratingList = []\n\n with open(path, \"r\") as f:\n line = f.readline()\n\n while line != None and line != \"\":\n arr = line.split(\"\\t\")\n user, item = arr[0], arr[1]\n ratingList.append([user, item])\n line = f.readline()\n\n return ratingList\n\n\ndef read_negative_file(filepath, filename):\n path = f\"{filepath}/{filename}\"\n negativeList = []\n\n with open(path, \"r\") as f:\n line = f.readline()\n\n while line != None and line != \"\":\n arr = line[:-1].split(\"\\t\")\n negatives = arr[1:]\n negativeList.append(negatives)\n line = f.readline()\n\n return negativeList\n\n\ndef save_triples(path, filename, triples):\n \"\"\"\n \"\"\"\n df = pd.DataFrame.from_records(triples)\n df.to_csv(f\"{path}/{filename}\", sep=\"\\t\", header=None, index=False)\n\n\ndef save_negatives(path, filename, negatives):\n \"\"\"\n \"\"\"\n with open(f\"{path}/{filename}\", \"w\") as f_out:\n for negative in negatives:\n f_out.write(\"\\t\".join(negative))\n f_out.write(\"\\n\")\n\n\ndef _filter_files(files):\n \"\"\"\n Discards files when listing folders.\n \"\"\"\n filtered = []\n for f in files:\n if f == \".gitkeep\" or f.endswith(\".zip\") or f.endswith(\".json\"):\n continue\n\n filtered.append(f)\n\n return filtered\n","repo_name":"datamining-ufcg/pretraining-recommenders","sub_path":"src/io_util.py","file_name":"io_util.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43151701216","text":"from tempfile import NamedTemporaryFile\nimport pandas as pd\nimport streamlit as st\nimport pysrt\nimport re\nimport pickle\nimport nltk\n\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom pymystem3 import Mystem\n\nst.title('Определение уровня английского языка в фильмах')\nst.header('Загрузите файл с субтитрами фильма и алгоритм определит уровень:')\n\n\ndef load(file):\n with open(file, 'rb') as fid:\n return pickle.load(fid)\n\n\nlogreg = load('model.pcl')\n\nvectorizer = load('vectorizer.pcl')\n\nuploaded_file = st.file_uploader(\"Choose a .srt file\", type=\"srt\")\n\nif uploaded_file is not None:\n with NamedTemporaryFile(suffix='.srt', delete=False) as tempfile:\n tempfile.write(uploaded_file.getbuffer())\n sub = pysrt.open(tempfile.name, encoding='utf-8')\n texts = sub.text\n\n\n def prepare_text(text):\n del_n = re.compile('\\n')\n del_tags = re.compile('<[^>]*>')\n del_brackets = re.compile('\\([^)]*\\)')\n clean_text = re.compile('[^а-яa-z\\s]')\n del_spaces = re.compile('\\s{2,}')\n text = del_n.sub(' ', str(text).lower())\n text = del_tags.sub('', text)\n text = del_brackets.sub('', text)\n res_text = clean_text.sub('', text)\n return del_spaces.sub(' ', res_text)\n\n\n stop_words = stopwords.words('english')\n\n\n def del_stopwords(text):\n clean_tokens = tuple(\n map(lambda x: x if x not in stop_words else '', word_tokenize(text))\n )\n res_text = ' '.join(clean_tokens)\n return res_text\n\n\n m = Mystem()\n\n\n def lemmatize(text):\n lemmatized_text = ''.join(m.lemmatize(text))\n return lemmatized_text.split('|')\n\n\n def transformation(subs):\n clean_sub_string = prepare_text(subs)\n clean_sub_string = del_stopwords(clean_sub_string)\n clean_sub_string = lemmatize(clean_sub_string)\n\n return clean_sub_string[0]\n\n\n words_a1 = ['through', 'than', 'nine', 'bar', 'internet', 'chicken', 'kilometre', 'door', 'interest', 'quarter',\n 'like', 'email', 'high', 'guess', 'nose', 'upstairs', 'best', 'capital', 'cut', 'east', 'half', 'below',\n 'eighteen', 'onion', 'park', 'favorite', 'people', 'positive', 'sit', 'smart', 'project', 'December',\n 'never', 'science', 'usually', 'could', 'supermarket', 'piano', 'please', 'his', 'own', 'programme',\n 'player', 'office', 'hotel', 'pig', 'work', 'change', 'see', 'culture', 'find', 'Friday', 'live',\n 'bike', 'quickly', 'sea', 'tourist', 'son', 'bedroom', 'aunt', 'job', 'umbrella', 'bored', 'hand',\n 'send', 'as', 'miss', 'write', 'bag', 'open', 'egg', 'club', 'common', 'thank', 'cup', 'evening',\n 'fifth', 'banana', 'week', 'before', 'believe', 'market', 'Tuesday', 'room', 'yellow', 'without',\n 'thanks', 'all', 'list', 'explain', 'front', 'dog', 'April', 'nothing', 'check', 'useful', 'meaning',\n 'still', 'easy', 'bird', 'plan', 'hard', 'hungry', 'any', 'build', 'far', 'pay', 'relax', 'August',\n 'bus', 'complete', 'sure', 'expensive', 'us', 'pants', 'much', 'quick', 'nurse', 'every', 'terrible',\n 'farmer', 'study', 'teenager', 'come', 'milk', 'coat', 'nobody', 'take', 'nice', 'weather', 'ago',\n 'love', 'afternoon', 'great', 'eat', 'partner', 'sweater', 'sick', 'black', 'goodbye', 'theatre',\n 'Thursday', 'me', 'rain', 'bath', 'together', 'million', 'dad', 'away', 'sister', 'near', 'OK', 'shirt',\n 'two', 'basketball', 'jacket', 'painting', 'ready', 'dress', 'Sunday', 'month', 'to', 'else', 'body',\n 'dirty', 'description', 'false', 'choose', 'possible', 'reading', 'building', 'poor', 'beer', 'head',\n 'course', 'grandfather', 'wrong', 'girl', 'kind', 'friendly', 'wake', 'tonight', 'understand',\n 'delicious', 'future', 'use', 'title', 'forget', 'baby', 'minute', 'hospital', 'food', 'excited',\n 'opinion', 'color', 'perfect', 'sing', 'student', 'worker', 'shoe', 'teacher', 'fill', 'trousers',\n 'grow', 'married', 'dark', 'above', 'Monday', 'return', 'phrase', 'fruit', 'waiter', 'play', 'strong',\n 'vegetable', 'rice', 'bank', 'career', 'we', 'eleven', 'meal', 'period', 'metre', 'it', 'anyone',\n 'history', 'again', 'taxi', 'warm', 'pretty', 'early', 'feel', 'doctor', 'snake', 'favourite', 'free',\n 'everybody', 'October', 'would', 'house', 'water', 'well', 'angry', 'space', 'time', 'when', 'do',\n 'man', 'music', 'TV', 'flat', 'walk', 'face', 'or', 'difficult', 'always', 'online', 'reader', 'who',\n 'paint', 'sugar', 'buy', 'driver', 'want', 'morning', 'life', 'slow', 'prepare', 'euro', 'bread',\n 'back', 'pepper', 'juice', 'brother', 'autumn', 'stand', 'dear', 'June', 'I', 'journey', 'begin',\n 'tree', 'art', 'healthy', 'key', 'across', 'turn', 'blog', 'oh', 'long', 'big', 'ocean', 'not', 'film',\n 'passport', 'festival', 'program', 'say', 'bring', 'actor', 'blond', 'compare', 'car', 'mom', 'famous',\n 'cheese', 'end', 'soon', 'my', 'day', 'no one', 'radio', 'large', 'south', 'few', 'city', 'airport',\n 'cream', 'topic', 'leave', 'lesson', 'natural', 'red', 'salad', 'midnight', 'them', 'writing', 'listen',\n 'way', 'modern', 'vacation', 'blonde', 'cousin', 'trip', 'their', 'put', 'grey', 'tomorrow', 'street',\n 'amazing', 'practise', 'chair', 'gym', 'paragraph', 'idea', 'spend', 'can', 'enough', 'land', 'her',\n 'lion', 'tell', 'language', 'baseball', 'swimming', 'foot', 'sound', 'fast', 'she', 'video', 'left',\n 'bad', 'boyfriend', 'will', 'funny', 'the', 'no', 'object', 'four', 'meeting', 'make', 'popular',\n 'learn', 'holiday', 'glass', 'place', 'diet', 'twice', 'drink', 'three', 'beginning', 'everyone',\n 'some', 'product', 'shower', 'dictionary', 'husband', 'movie', 'statement', 'maybe', 'picture',\n 'design', 'think', 'policeman', 'visit', 'college', 'so', 'remember', 'road', 'table', 'improve', 'ice',\n 'wear', 'break', 'menu', 'actress', 'interview', 'scientist', 'fact', 'light', 'moment', 'cafe', 'here',\n 'artist', 'quiet', 'adult', 'country', 'have', 'lunch', 'awesome', 'carry', 'imagine', 'traffic', 'off',\n 'line', 'September', 'feeling', 'quite', 'sport', 'family', 'February', 'probably', 'restaurant',\n 'chart', 'action', 'between', 'hot', 'for', 'join', 'green', 'fish', 'bye', 'kitchen', 'soup', 'they',\n 'ask', 'pink', 'each', 'once', 'T-shirt', 'test', 'mouse', 'river', 'travel', 'note', 'ball', 'create',\n 'answer', 'thousand', 'another', 'plane', 'price', 'keep', 'magazine', 'match', 'its', 'spelling',\n 'theater', 'sun', 'into', 'area', 'shop', 'island', 'call', 'coffee', 'telephone', 'spring', 'farm',\n 'text', 'tooth', 'decide', 'fire', 'shopping', 'by', 'at', 'business', 'dish', 'name', 'phone', 'even',\n 'your', 'garden', 'report', 'sixty', 'ever', 'fly', 'rule', 'show', 'skill', 'wonderful', 'important',\n 'newspaper', 'brown', 'short', 'arrive', 'date', 'hair', 'pair', 'just', 'interested', 'mother',\n 'negative', 'second', 'style', 'article', 'cook', 'father', 'if', 'seven', 'type', 'mountain', 'enjoy',\n 'up', 'boot', 'very', 'bottle', 'cost', 'help', 'tea', 'forty', 'dinner', 'song', 'winter', 'down',\n 'fantastic', 'follow', 'move', 'nineteen', 'sorry', 'person', 'something', 'prefer', 'hat', 'behind',\n 'cold', 'draw', 'dollar', 'lot', 'sell', 'more', 'party', 'after', 'interesting', 'ten', 'gray',\n 'customer', 'wife', 'guitar', 'one', 'because', 'he', 'hundred', 'little', 'book', 'cannot', 'member',\n 'mouth', 'university', 'someone', 'reason', 'CD', 'health', 'police', 'purple', 'must', 'truck',\n 'chocolate', 'world', 'cent', 'real', 'concert', 'cow', 'also', 'this', 'become', 'grandparent',\n 'photo', 'hi', 'personal', 'exciting', 'practice', 'young', 'where', 'but', 'yesterday', 'point',\n 'drive', 'camera', 'yard', 'thing', 'eye', 'often', 'machine', 'age', 'card', 'success', 'thirty',\n 'writer', 'computer', 'good', 'full', 'library', 'section', 'six', 'station', 'centre', 'museum',\n 'thirsty', 'everything', 'happy', 'most', 'cat', 'really', 'somebody', 'outside', 'store', 'thirteen',\n 'neighbor', 'west', 'hate', 'first', 'final', 'center', 'wall', 'describe', 'information', 'repeat',\n 'eight', 'pen', 'there', 'introduce', 'seventy', 'same', 'clothes', 'game', 'late', 'mile', 'flight',\n 'letter', 'mall', 'special', 'that', 'order', 'o’clock', 'football', 'then', 'colour', 'DVD', 'piece',\n 'let', 'around', 'village', 'fifteen', 'woman', 'situation', 'team', 'cool', 'fourteen', 'fine',\n 'butter', 'hello', 'add', 'beautiful', 'post', 'similar', 'map', 'year', 'discuss', 'seventeen',\n 'exercise', 'eighty', 'sentence', 'summer', 'fourth', 'leg', 'opposite', 'should', 'sixteen', 'win',\n 'air', 'our', 'pound', 'snow', 'stop', 'ticket', 'blue', 'what', 'home', 'problem', 'teach', 'both',\n 'downstairs', 'animal', 'group', 'correct', 'right', 'tired', 'today', 'cinema', 'start', 'Wednesday',\n 'on', 'example', 'elephant', 'grandmother', 'ear', 'him', 'new', 'small', 'visitor', 'sometimes',\n 'with', 'however', 'horse', 'true', 'need', 'January', 'wait', 'singer', 'sleep', 'finish', 'message',\n 'clock', 'how', 'band', 'try', 'word', 'century', 'hear', 'story', 'dangerous', 'jeans', 'go', 'cake',\n 'hour', 'star', 'under', 'neighborhood', 'ride', 'meet', 'last', 'you', 'activity', 'laugh', 'stay',\n 'news', 'old', 'part', 'boy', 'arm', 'homework', 'white', 'fifty', 'over', 'get', 'March', 'afraid',\n 'present', 'only', 'tomato', 'form', 'difference', 'weekend', 'north', 'past', 'bicycle', 'salt',\n 'yourself', 'from', 'watch', 'desk', 'plant', 'detail', 'school', 'different', 'dance', 'swim',\n 'toilet', 'cooking', 'company', 'and', 'third', 'climb', 'include', 'die', 'exam', 'fall', 'lie',\n 'routine', 'boring', 'during', 'pencil', 'skirt', 'orange', 'happen', 'hope', 'pool', 'anything', 'too',\n 'sheep', 'train', 'know', 'extra', 'carrot', 'clean', 'fun', 'main', 'which', 'dancing', 'lose', 'yes',\n 'give', 'mum', 'May', 'child', 'dancer', 'mean', 'model', 'spell', 'subject', 'look', 'television',\n 'cheap', 'sad', 'be', 'read', 'page', 'photograph', 'advice', 'November', 'bed', 'tall', 'busy',\n 'money', 'now', 'number', 'of', 'hobby', 'floor', 'flower', 'night', 'birthday', 'town', 'address',\n 'ninety', 'uncle', 'local', 'other', 'window', 'agree', 'dialogue', 'speak', 'hey', 'beach', 'meat',\n 'geography', 'share', 'mistake', 'until', 'close', 'parent', 'about', 'neighbour', 'paper', 'classroom',\n 'fat', 'talk', 'box', 'later', 'Saturday', 'yeah', 'sandwich', 'conversation', 'run', 'twenty',\n 'welcome', 'question', 'in', 'July', 'class', 'apartment', 'twelve', 'event', 'tennis', 'bathroom',\n 'better', 'breakfast', 'website', 'wine', 'out', 'result', 'five', 'next', 'girlfriend', 'potato',\n 'bill', 'many', 'born', 'friend', 'rich', 'wash', 'daughter', 'meter', 'apple', 'boat', 'why']\n\n words_a2 = ['hill', 'oil', 'petrol', 'refuse', 'bar', 'cause', 'luck', 'pattern', 'equipment', 'argue', 'lost',\n 'neck', 'appearance', 'realize', 'vehicle', 'cookie', 'tower', 'badly', 'complain', 'biology', 'almost',\n 'medical', 'ground', 'smoke', 'award', 'distance', 'already', 'discussion', 'high', 'lab', 'material',\n 'movement', 'oven', 'best', 'gate', 'appear', 'survey', 'upstairs', 'bone', 'jewelry', 'produce',\n 'half', 'recent', 'media', 'while', 'informal', 'sign', 'printer', 'offer', 'injury', 'nowhere',\n 'unfortunately', 'track', 'suggest', 'cartoon', 'comedy', 'thief', 'straight', 'polite', 'bottom',\n 'please', 'his', 'own', 'average', 'bit', 'except', 'missing', 'platform', 'penny', 'mathematics',\n 'loud', 'lamp', 'hall', 'avoid', 'definitely', 'foreign', 'desert', 'frog', 'figure', 'successful',\n 'sea', 'perhaps', 'series', 'billion', 'essay', 'predict', 'condition', 'perform', 'as', 'stair',\n 'prison', 'round', 'leader', 'pain', 'less', 'brain', 'before', 'normally', 'fan', 'single', 'soap',\n 'habit', 'danger', 'exactly', 'site', 'ski', 'soldier', 'grass', 'energy', 'heavy', 'all', 'typical',\n 'shut', 'underground', 'compete', 'usual', 'check', 'professor', 'abroad', 'arrange', 'soft', 'illness',\n 'reach', 'poster', 'any', 'pay', 'sadly', 'empty', 'wooden', 'yet', 'character', 'quality', 'sure',\n 'pants', 'industry', 'heat', 'researcher', 'alone', 'manner', 'bin', 'greet', 'pass', 'succeed',\n 'broken', 'available', 'along', 'driving', 'weak', 'degree', 'themselves', 'according', 'ancient',\n 'used', 'president', 'understanding', 'cross', 'somewhere', 'simple', 'transport', 'ship', 'ability',\n 'decision', 'furniture', 'jump', 'musician', 'crowd', 'itself', 'knowledge', 'basketball', 'clerk',\n 'queen', 'react', 'van', 'touch', 'noisy', 'electric', 'sir', 'although', 'chance', 'error', 'guide',\n 'male', 'stamp', 'lovely', 'replace', 'percent', 'blood', 'attend', 'professional', 'purpose', 'scary',\n 'smartphone', 'step', 'speaker', 'ourselves', 'button', 'future', 'use', 'taste', 'downtown', 'receive',\n 'passenger', 'suggestion', 'plate', 'refrigerator', 'region', 'tourism', 'since', 'finally', 'mobile',\n 'intelligent', 'lifestyle', 'dark', 'bridge', 'fever', 'god', 'herself', 'pet', 'pick', 'spider',\n 'promise', 'permission', 'national', 'variety', 'direct', 'teaching', 'strange', 'alive', 'garbage',\n 'fail', 'daily', 'hers', 'sailing', 'goal', 'stone', 'criminal', 'target', 'connect', 'free',\n 'individual', 'zero', 'railway', 'record', 'tablet', 'certain', 'involve', 'tradition', 'feature',\n 'extremely', 'general', 'everyday', 'comment', 'race', 'cloud', 'safe', 'publish', 'flat', 'candy',\n 'suddenly', 'allow', 'per', 'express', 'awful', 'back', 'birth', 'continue', 'mark', 'state', 'notice',\n 'finger', 'dear', 'experiment', 'king', 'climate', 'athlete', 'respond', 'side', 'salary', 'dead',\n 'assistant', 'case', 'count', 'search', 'ocean', 'toy', 'lemon', 'factor', 'power', 'film', 'chat',\n 'seem', 'employer', 'thinking', 'program', 'ill', 'onto', 'crime', 'competition', 'brilliant', 'chef',\n 'dessert', 'colleague', 'enter', 'sneaker', 'accept', 'cycle', 'fashion', 'huge', 'rock', 'term', 'top',\n 'ideal', 'affect', 'repair', 'shall', 'behavior', 'cry', 'mirror', 'fridge', 'wave', 'engine', 'score',\n 'instruction', 'solution', 'whole', 'brush', 'schedule', 'lady', 'clothing', 'moon', 'journalist',\n 'singing', 'runner', 'can', 'identify', 'print', 'impossible', 'land', 'architecture', 'learning',\n 'baseball', 'anyway', 'church', 'tour', 'review', 'digital', 'divorced', 'request', 'experience',\n 'route', 'several', 'worst', 'personality', 'himself', 'continent', 'size', 'uniform', 'holiday',\n 'quietly', 'secondly', 'height', 'suppose', 'toward', 'knife', 'carpet', 'method', 'training', 'unit',\n 'borrow', 'closet', 'pronounce', 'shout', 'farming', 'light', 'narrow', 'response', 'traveler', 'metal',\n 'disease', 'adult', 'have', 'drug', 'burn', 'clever', 'musical', 'deep', 'thought', 'stress', 'lend',\n 'blow', 'sheet', 'instead', 'symbol', 'between', 'advantage', 'kill', 'fish', 'further', 'device',\n 'medicine', 'audience', 'gap', 'normal', 'camping', 'chemistry', 'rise', 'cupboard', 'golf', 'shape',\n 'disagree', 'weight', 'reduce', 'active', 'inside', 'deal', 'electricity', 'hurt', 'screen',\n 'explanation', 'novel', 'closed', 'surprised', 'matter', 'maths', 'network', 'motorcycle', 'organize',\n 'specific', 'text', 'farm', 'fire', 'pleased', 'context', 'image', 'quantity', 'businessman', 'social',\n 'trouble', 'worried', 'belt', 'save', 'expect', 'gallery', 'report', 'reporter', 'prize', 'fit',\n 'airline', 'fly', 'sense', 'support', 'wild', 'stomach', 'bright', 'cell', 'secretary',\n 'transportation', 'ending', 'technology', 'board', 'none', 'particular', 'second', 'plastic', 'care',\n 'cook', 'castle', 'rest', 'extreme', 'profile', 'engineer', 'detective', 'fork', 'arrangement',\n 'reception', 'square', 'teenage', 'ankle', 'cash', 'copy', 'effect', 'joke', 'least', 'rubbish',\n 'campus', 'user', 'expression', 'math', 'electronic', 'unhappy', 'biscuit', 'invite', 'towel', 'chip',\n 'recipe', 'field', 'soccer', 'drop', 'asleep', 'bear', 'alternative', 'after', 'mostly', 'temperature',\n 'war', 'tie', 'owner', 'coach', 'steal', 'level', 'nature', 'little', 'book', 'overseas', 'dry',\n 'anymore', 'anywhere', 'original', 'push', 'advertise', 'twin', 'argument', 'neither', 'wedding',\n 'wood', 'worse', 'view', 'tidy', 'background', 'seat', 'attack', 'direction', 'lazy', 'smoking',\n 'population', 'recycle', 'truck', 'hole', 'secret', 'recognize', 'position', 'cooker', 'bean', 'corner',\n 'washing', 'analyze', 'adventure', 'employee', 'clear', 'data', 'immediately', 'sweet', 'happily',\n 'charity', 'develop', 'guy', 'drive', 'gift', 'contain', 'progress', 'rude', 'such', 'director',\n 'couple', 'hide', 'advertising', 'boss', 'document', 'likely', 'lucky', 'accident', 'good', 'surprise',\n 'download', 'act', 'camp', 'helpful', 'forest', 'physics', 'sort', 'fix', 'gas', 'thick', 'outside',\n 'heart', 'mine', 'store', 'invitation', 'conference', 'web', 'invention', 'scene', 'role', 'beef',\n 'guest', 'regular', 'laptop', 'wow', 'first', 'jazz', 'attention', 'fight', 'final', 'manager',\n 'collect', 'column', 'knock', 'patient', 'wish', 'exact', 'protect', 'physical', 'kid', 'ordinary',\n 'myself', 'gun', 'against', 'process', 'crazy', 'nervous', 'wide', 'village', 'service', 'nearly',\n 'actually', 'smell', 'rate', 'ah', 'easily', 'independent', 'instructor', 'insect', 'worry', 'earn',\n 'bowl', 'hero', 'option', 'headache', 'catch', 'exist', 'invent', 'skin', 'amount', 'correctly',\n 'pollution', 'recommend', 'stove', 'listener', 'shoulder', 'may', 'anybody', 'expert', 'international',\n 'fresh', 'depend', 'thin', 'fear', 'nut', 'society', 'whose', 'diary', 'subway', 'surprising', 'low',\n 'home', 'grocery', 'death', 'running', 'earth', 'clearly', 'downstairs', 'major', 'tip', 'raise',\n 'memory', 'following', 'refer', 'strategy', 'traditional', 'lorry', 'valley', 'railroad', 'either',\n 'start', 'mind', 'female', 'pub', 'ring', 'especially', 'attractive', 'circle', 'discover', 'focus',\n 'mail', 'classical', 'possibility', 'sky', 'fishing', 'drama', 'virus', 'need', 'speed', 'jam', 'pop',\n 'sleep', 'completely', 'environment', 'finish', 'incredible', 'wait', 'consider', 'winner', 'double',\n 'comfortable', 'century', 'achieve', 'smile', 'scared', 'apply', 'sock', 'pocket', 'fair', 'possession',\n 'star', 'voice', 'introduction', 'provide', 'wet', 'ride', 'wheel', 'source', 'advertisement', 'stage',\n 'factory', 'last', 'drawing', 'stay', 'destroy', 'beat', 'fortunately', 'united', 'necessary',\n 'careful', 'choice', 'electrical', 'separate', 'behave', 'based', 'belong', 'present', 'serious',\n 'past', 'skiing', 'stupid', 'plant', 'disaster', 'behaviour', 'parking', 'fiction', 'pilot', 'designer',\n 'yours', 'working', 'increase', 'eader', 'slowly', 'gold', 'third', 'jewellery', 'fall', 'coast',\n 'manage', 'mention', 'department', 'instrument', 'app', 'pack', 'recording', 'system', 'hope', 'trash',\n 'education', 'knee', 'palace', 'benefit', 'blank', 'included', 'train', 'certainly', 'fun', 'sail',\n 'lift', 'trainer', 'cover', 'human', 'noise', 'rather', 'pull', 'employ', 'connected', 'discovery',\n 'shake', 'look', 'dream', 'hold', 'cigarette', 'firstly', 'feed', 'credit', 'hockey', 'photograph',\n 'recently', 'army', 'evidence', 'number', 'suit', 'hit', 'planet', 'sale', 'crowded', 'unusual',\n 'officer', 'lead', 'laughter', 'middle', 'mayor', 'roof', 'painter', 'serve', 'might', 'elevator',\n 'author', 'including', 'celebrity', 'opportunity', 'carefully', 'prevent', 'throw', 'excellent',\n 'disappear', 'marry', 'lawyer', 'celebrate', 'community', 'close', 'kilometer', 'communicate', 'reply',\n 'sauce', 'lock', 'flu', 'storm', 'fat', 'remove', 'talk', 'lecture', 'later', 'architect', 'creative',\n 'forward', 'towards', 'run', 'speech', 'spoon', 'link', 'government', 'dentist', 'welcome', 'enormous',\n 'flying', 'monkey', 'question', 'loudly', 'structure', 'law', 'able', 'relationship', 'solve', 'task',\n 'traveller', 'item', 'code', 'organization', 'research', 'better', 'differently', 'everywhere', 'wind',\n 'curly', 'lake', 'public', 'peace', 'silver', 'boil', 'tool', 'wash', 'control', 'among', 'block',\n 'formal', 'season']\n\n words_b1 = ['theory', 'campaign', 'impact', 'force', 'kiss', 'impressive', 'favour', 'familiar', 'weigh', 'fasten',\n 'layer', 'hurry', 'specifically', 'coloured', 'educate', 'award', 'like', 'weapon', 'ingredient',\n 'statue', 'photography', 'cut', 'determined', 'while', 'balance', 'grain', 'talent', 'mood', 'smart',\n 'lip', 'academic', 'lay', 'poetry', 'donate', 'location', 'practical', 'automatically', 'bury',\n 'marketing', 'private', 'gentleman', 'relate', 'press', 'average', 'profession', 'except', 'category',\n 'upset', 'celebration', 'drum', 'lonely', 'immediate', 'regularly', 'generally', 'valuable', 'supply',\n 'survive', 'brand', 'freeze', 'live', 'pressure', 'percentage', 'signal', 'embarrassed', 'portrait',\n 'export', 'effective', 'hand', 'appointment', 'previous', 'trade', 'secondary', 'limit', 'earthquake',\n 'confirm', 'sensible', 'frighten', 'poem', 'wool', 'injure', 'assignment', 'discount', 'remain',\n 'safety', 'account', 'directly', 'spirit', 'frightening', 'market', 'scan', 'measure', 'horrible',\n 'historic', 'consist', 'costume', 'living', 'curtain', 'coal', 'ad', 'bend', 'receipt', 'qualify',\n 'episode', 'keyboard', 'occasion', 'aim', 'escape', 'servant', 'bell', 'guilty', 'competitor', 'bomb',\n 'amazed', 'exit', 'happiness', 'necessarily', 'supporter', 'still', 'container', 'path', 'leaf',\n 'issue', 'string', 'impression', 'far', 'automatic', 'empty', 'frame', 'volunteer', 'version', 'remind',\n 'ought', 'laboratory', 'southern', 'surface', 'suffer', 'performance', 'custom', 'authority',\n 'currency', 'pass', 'magic', 'dirt', 'historical', 'smooth', 'truth', 'confident', 'property', 'tongue',\n 'used', 'folk', 'belief', 'content', 'sand', 'poisonous', 'transport', 'identity', 'dust', 'shell',\n 'addition', 'embarrassing', 'fry', 'covered', 'prediction', 'represent', 'touch', 'wing', 'sex',\n 'successfully', 'net', 'friendship', 'photographer', 'annoying', 'slightly', 'standard', 'contrast',\n 'expand', 'organized', 'decade', 'announce', 'documentary', 'resource', 'robot', 'head', 'wrong',\n 'kind', 'departure', 'bride', 'mixture', 'iron', 'upon', 'import', 'base', 'hunt', 'cottage', 'cotton',\n 'breathe', 'meanwhile', 'destination', 'participate', 'since', 'apart', 'politician', 'flow', 'leather',\n 'port', 'unable', 'written', 'expedition', 'production', 'script', 'fixed', 'mental', 'clue', 'highly',\n 'similarity', 'define', 'ghost', 'annoyed', 'guard', 'incredibly', 'bank', 'direct', 'relaxing',\n 'consequence', 'pin', 'shine', 'tyre', 'ugly', 'require', 'tight', 'confused', 'warm', 'daily',\n 'criminal', 'growth', 'risk', 'row', 'corn', 'gentle', 'planning', 'water', 'feature', 'eventually',\n 'edge', 'comment', 'invest', 'race', 'reaction', 'viewer', 'face', 'flour', 'employment', 'treat',\n 'backward', 'reference', 'slow', 'intend', 'expected', 'punish', 'ton', 'tiny', 'typically', 'clause',\n 'unlike', 'state', 'unless', 'repeated', 'entry', 'proud', 'statistic', 'experiment', 'software',\n 'colored', 'journey', 'nail', 'hire', 'battery', 'key', 'reflect', 'unemployed', 'count', 'surely',\n 'queue', 'disappointing', 'fur', 'originally', 'painful', 'grade', 'heavily', 'dressed', 'program',\n 'management', 'responsible', 'stuff', 'fashionable', 'ambition', 'pale', 'economy', 'poverty',\n 'technical', 'function', 'client', 'fitness', 'equally', 'improvement', 'breath', 'enemy', 'contact',\n 'development', 'garage', 'rent', 'imaginary', 'printing', 'cream', 'central', 'district',\n 'unemployment', 'throat', 'marriage', 'entrance', 'total', 'mix', 'repair', 'bite', 'complaint',\n 'protest', 'cap', 'intelligence', 'commit', 'vote', 'wave', 'talented', 'sharp', 'equal', 'countryside',\n 'whole', 'mild', 'particularly', 'confuse', 'calm', 'silent', 'exhibition', 'bee', 'peaceful',\n 'backwards', 'principal', 'worldwide', 'responsibility', 'plus', 'studio', 'tour', 'editor', 'advise',\n 'request', 'politics', 'innocent', 'experience', 'entertainment', 'will', 'tail', 'related', 'medium',\n 'glad', 'rare', 'worst', 'substance', 'federal', 'favor', 'IT', 'border', 'attraction', 'therefore',\n 'nuclear', 'toe', 'entertain', 'engaged', 'place', 'royal', 'continuous', 'trend', 'appreciate', 'warn',\n 'alcohol', 'communication', 'annoy', 'shiny', 'pan', 'slave', 'unfair', 'pretend', 'pleasure', 'gather',\n 'brave', 'achievement', 'basis', 'coin', 'leading', 'revise', 'political', 'clever', 'personally',\n 'musical', 'deep', 'effectively', 'organizer', 'symptom', 'combine', 'obviously', 'ambitious',\n 'various', 'rarely', 'spread', 'stranger', 'further', 'presentation', 'indeed', 'remote', 'grateful',\n 'liquid', 'prepared', 'recommendation', 'muscle', 'normal', 'respect', 'operation', 'cupboard', 'rise',\n 'heating', 'once', 'sink', 'deal', 'northern', 'theme', 'profit', 'note', 'application', 'disadvantage',\n 'investigate', 'promote', 'sample', 'tend', 'breathing', 'goods', 'unnecessary', 'reject', 'spring',\n 'western', 'decorate', 'fire', 'involved', 'by', 'experienced', 'perfectly', 'influence', 'encourage',\n 'throughout', 'murder', 'pot', 'fairly', 'click', 'commercial', 'waste', 'election', 'suitable', 'rule',\n 'hurricane', 'located', 'agreement', 'advanced', 'highlight', 'powerful', 'row1', 'diamond', 'rough',\n 'board', 'old-fashioned', 'uncomfortable', 'host', 'element', 'type', 'exchange', 'locate',\n 'concentrate', 'currently', 'consumer', 'campus', 'move', 'soil', 'set', 'emotion', 'flood', 'racing',\n 'convince', 'unlikely', 'drop', 'alternative', 'definite', 'alarm', 'needle', 'prove', 'candidate',\n 'mystery', 'coach', 'content1', 'level', 'indoors', 'agent', 'overseas', 'doubt', 'prisoner', 'illegal',\n 'horror', 'switch', 'original', 'push', 'frightened', 'range', 'scientific', 'captain', 'beauty',\n 'difficulty', 'neither', 'worse', 'graduate', 'quotation', 'view', 'rope', 'stadium', 'reality',\n 'whether', 'neighbourhood', 'pour', 'indirect', 'this', 'attitude', 'underwear', 'clear', 'retire',\n 'disappointed', 'religion', 'headline', 'young', 'journal', 'point', 'basic', 'religious', 'yard',\n 'seriously', 'whenever', 'ceiling', 'tin', 'age', 'centre', 'act', 'union', 'sort', 'technique',\n 'staff', 'obvious', 'essential', 'competitive', 'mainly', 'cheerful', 'store', 'mine', 'passion',\n 'prince', 'qualification', 'mad', 'eastern', 'helicopter', 'childhood', 'assist', 'hate', 'mud',\n 'center', 'shelf', 'cloth', 'update', 'complex', 'fold', 'repeat', 'knock', 'package', 'hang',\n 'retired', 'worth', 'security', 'intention', 'mall', 'that', 'rugby', 'channel', 'explore', 'honest',\n 'excitement', 'analyse', 'judge', 'release', 'emergency', 'immigrant', 'reliable', 'loss', 'injured',\n 'spicy', 'cool', 'primary', 'tire', 'arrival', 'administration', 'cultural', 'worry', 'fascinating',\n 'producer', 'possibly', 'convenient', 'conclude', 'bake', 'connection', 'comparison', 'explode',\n 'ignore', 'keen', 'till', 'win', 'highway', 'label', 'nor', 'cruel', 'fear', 'plot', 'bubble',\n 'forever', 'tape', 'priest', 'tube', 'diagram', 'examine', 'engineering', 'admire', 'frequently',\n 'chain', 'persuade', 'previously', 'outdoors', 'careless', 'tip', 'admit', 'silly', 'raise',\n 'following', 'payment', 'file', 'tax', 'value', 'atmosphere', 'ring', 'summarize', 'punishment',\n 'indicate', 'relative', 'lack', 'champion', 'occur', 'hardly', 'leisure', 'neat', 'relaxed', 'spending',\n 'chapter', 'policy', 'need', 'alcoholic', 'killing', 'global', 'analysis', 'shift', 'double',\n 'pleasant', 'fence', 'educated', 'frozen', 'luxury', 'fancy', 'chest', 'consume', 'go', 'chemical',\n 'financial', 'qualified', 'poison', 'relation', 'ceremony', 'battle', 'generous', 'unpleasant',\n 'literature', 'naturally', 'definition', 'quit', 'spoken', 'determine', 'afford', 'separate',\n 'strength', 'kick', 'nation', 'attract', 'despite', 'collection', 'swim', 'accommodation', 'official',\n 'whatever', 'apologize', 'reservation', 'climb', 'though', 'damage', 'trick', 'lie', 'announcement',\n 'violent', 'warning', 'shoot', 'absolutely', 'mention', 'sculpture', 'fuel', 'duty', 'pack',\n 'challenge', 'dislike', 'tent', 'benefit', 'aged', 'branch', 'extra', 'theirs', 'summary', 'simply',\n 'pipe', 'sail', 'trainer', 'cover', 'wonder', 'importance', 'approximately', 'pull', 'strongly',\n 'charge', 'claim', 'conclusion', 'latest', 'shake', 'environmental', 'victim', 'cheat', 'cheap',\n 'effort', 'flag', 'spot', 'similarly', 'shy', 'totally', 'sudden', 'solid', 'now', 'due', 'native',\n 'poet', 'sexual', 'suit', 'translate', 'setting', 'treatment', 'sailor', 'court', 'cable', 'lead',\n 'local', 'mess', 'aware', 'current', 'divide', 'generation', 'ours', 'properly', 'album', 'economic',\n 'legal', 'arrest', 'romantic', 'share', 'ban', 'roll', 'stick', 'close', 'narrative', 'slice', 'bother',\n 'giant', 'fighting', 'outdoor', 'pray', 'quote', 'translation', 'educational', 'indoor', 'powder',\n 'prayer', 'explosion', 'glove', 'better', 'ahead', 'attach', 'deliver', 'length', 'sight', 'result',\n 'next', 'drunk', 'seed', 'youth', 'plenty', 'access', 'proper', 'princess', 'within', 'block', 'odd']\n\n words_b2 = ['mechanical', 'bar', 'favour', 'interpretation', 'heaven', 'crack', 'numerous', 'relatively', 'survey',\n 'proof', 'produce', 'genuinely', 'significantly', 'proposal', 'aggressive', 'democracy', 'interact',\n 'timing', 'bunch', 'psychological', 'funding', 'manufacturing', 'romance', 'somehow', 'reward',\n 'proceed', 'obtain', 'collector', 'acre', 'darkness', 'exhibit', 'justice', 'leadership', 'vision',\n 'counter', 'furious', 'instance', 'alongside', 'construction', 'adjust', 'firefighter', 'exit', 'bond',\n 'corporation', 'specialize', 'makeup', 'obstacle', 'automatic', 'curved', 'embrace', 'random', 'yet',\n 'depressed', 'publishing', 'candle', 'landing', 'scale', 'expertise', 'installation', 'enthusiasm',\n 'globalization', 'tunnel', 'additional', 'dramatically', 'honey', 'ship', 'commitment', 'contract',\n 'so-called', 'deadly', 'frequent', 'weird', 'transmit', 'owe', 'participant', 'hesitate', 'declare',\n 'sweep', 'wrong', 'inevitably', 'title', 'heel', 'besides', 'emission', 'shore', 'annually', 'command',\n 'soul', 'accidentally', 'surgery', 'preference', 'hopefully', 'testing', 'dairy', 'weakness', 'tap',\n 'chop', 'brief', 'edition', 'incident', 'ultimate', 'opera', 'erupt', 'time', 'referee', 'unite', 'ton',\n 'acquire', 'drought', 'honor', 'margin', 'eager', 'satisfy', 'probability', 'hire', 'accurately',\n 'criticism', 'blanket', 'nerve', 'offense', 'humor', 'specify', 'ethnic', 'technological', 'severe',\n 'launch', 'spiritual', 'organic', 'struggle', 'formerly', 'govern', 'immune', 'measurement',\n 'potential', 'adequately', 'deck', 'leave', 'district', 'restore', 'progressive', 'scholarship',\n 'territory', 'elegant', 'combination', 'chair', 'consideration', 'finance', 'heal', 'booking', 'plus',\n 'evil', 'unknown', 'short-term', 'roughly', 'chase', 'observation', 'habitat', 'sticky', 'moreover',\n 'medium', 'worst', 'sector', 'proportion', 'classify', 'colourful', 'spokesperson', 'weekly',\n 'confidence', 'retain', 'picture', 'hip', 'textbook', 'stroke', 'disorder', 'decline', 'narrow',\n 'humorous', 'incentive', 'concrete', 'devote', 'gaming', 'sufficient', 'strict', 'reporting',\n 'entertaining', 'mode', 'ambitious', 'biological', 'insight', 'hurt', 'screen', 'consistently', 'minor',\n 'forum', 'sidewalk', 'neutral', 'shortly', 'promising', 'surrounding', 'survival', 'goods', 'protein',\n 'trouble', 'trading', 'astonishing', 'observer', 'decent', 'buck', 'even', 'loose', 'exception', 'rub',\n 'negative', 'fully', 'scandal', 'terminal', 'accountant', 'compound', 'shock', 'lighting', 'blind',\n 'collapse', 'strictly', 'capture', 'stunning', 'wise', 'permit', 'engage', 'crew', 'passionate',\n 'constantly', 'innovation', 'preparation', 'grab', 'terms', 'decrease', 'switch', 'accent', 'range',\n 'judgement', 'fortune', 'seat', 'rank', 'prior', 'jail', 'harbour', 'generate', 'overnight', 'comic',\n 'cruise', 'indication', 'affair', 'opening', 'probable', 'outline', 'necessity', 'fix', 'failure',\n 'guideline', 'packet', 'lottery', 'litre', 'strike', 'expose', 'threat', 'withdraw', 'founder',\n 'senior', 'military', 'courage', 'input', 'insert', 'herb', 'primarily', 'hearing', 'disappointment',\n 'sincere', 'oxygen', 'steep', 'bat', 'bitter', 'acknowledge', 'navigation', 'precede', 'contribution',\n 'preserve', 'amount', 'whereas', 'fellow', 'basement', 'spectacular', 'refugee', 'highway', 'ballet',\n 'purely', 'multiple', 'specialist', 'broad', 'partly', 'notion', 'associate', 'suffering', 'neat',\n 'colony', 'forbid', 'desperately', 'dramatic', 'shortage', 'pension', 'try', 'nursing', 'mysterious',\n 'patch', 'genetic', 'spare', 'resolve', 'championship', 'stage', 'psychologist', 'basket', 'spice',\n 'core', 'negotiation', 'auction', 'worm', 'income', 'promotion', 'memorable', 'tag', 'uncertainty',\n 'acid', 'root', 'accommodation', 'official', 'reveal', 'challenging', 'trace', 'participation',\n 'make-up', 'bold', 'grand', 'flexible', 'arise', 'entirely', 'crop', 'inform', 'delighted', 'matching',\n 'equivalent', 'spot', 'disturb', 'credit', 'seeker', 'inflation', 'prime', 'willing', 'budget',\n 'divide', 'imply', 'ladder', 'senate', 'suspend', 'database', 'defender', 'hilarious', 'forward',\n 'appeal', 'interpret', 'alter', 'structure', 'voluntary', 'transition', 'economics', 'gang', 'wind',\n 'curious', 'artwork', 'enjoyable', 'pharmacy', 'ink', 'considerably', 'encounter', 'forecast',\n 'shocking', 'depart', 'temple', 'tonne', 'distant', 'artificial', 'flash', 'display', 'pregnant',\n 'revolution', 'monthly', 'rat', 'slogan', 'material', 'gradually', 'journalism', 'tropical', 'stare',\n 'positive', 'firmly', 'anticipate', 'construct', 'ethical', 'vital', 'automatically', 'emerge', 'holy',\n 'sophisticated', 'exotic', 'nickel', 'violence', 'agriculture', 'tone', 'chairman', 'compose',\n 'largely', 'flame', 'illustrate', 'calculate', 'desert', 'tragic', 'figure', 'mate', 'pursuit', 'raw',\n 'failed', 'trial', 'clarify', 'round', 'advance', 'dynamic', 'survivor', 'nearby', 'perspective',\n 'folding', 'viewpoint', 'medal', 'annual', 'stream', 'ID', 'abroad', 'gig', 'enthusiastic',\n 'limitation', 'wander', 'bombing', 'barely', 'disabled', 'convention', 'ongoing', 'outfit', 'gorgeous',\n 'otherwise', 'passage', 'handle', 'precisely', 'majority', 'openly', 'fraction', 'receiver', 'panel',\n 'prompt', 'mineral', 'loyal', 'delete', 'modest', 'bid', 'jury', 'rail', 'clerk', 'humour', 'edit',\n 'van', 'icon', 'depressing', 'gender', 'minister', 'intense', 'strengthen', 'voting', 'workforce',\n 'angle', 'downtown', 'session', 'tank', 'incorporate', 'charming', 'concept', 'bug', 'barrier',\n 'efficiently', 'slope', 'national', 'elect', 'risky', 'craft', 'assume', 'accuse', 'destruction',\n 'feel', 'target', 'free', 'thus', 'equip', 'aid', 'comprehensive', 'hell', 'beg', 'shallow', 'vary',\n 'steady', 'back', 'stand', 'spill', 'intellectual', 'cancer', 'selection', 'considerable', 'cast',\n 'spectator', 'volume', 'infrastructure', 'immigration', 'power', 'inherit', 'warming', 'harbor',\n 'ancestor', 'steam', 'ultimately', 'myth', 'term', 'relieved', 'anxious', 'anger', 'questionnaire',\n 'predictable', 'cry', 'certificate', 'equal', 'approval', 'treasure', 'demonstration', 'badge',\n 'relief', 'split', 'schedule', 'commander', 'implication', 'ridiculous', 'rush', 'hunting', 'intended',\n 'implement', 'resign', 'magnificent', 'guarantee', 'concentration', 'commission', 'regional',\n 'threaten', 'legend', 'labour', 'undertake', 'visual', 'mosquito', 'rebuild', 'tendency', 'inspector',\n 'wealth', 'scratch', 'contest', 'governor', 'abstract', 'inevitable', 'concern', 'thumb', 'objective',\n 'conventional', 'skull', 'speculate', 'modify', 'dare', 'activate', 'evolve', 'purchase', 'evaluation',\n 'harmful', 'editorial', 'shape', 'beside', 'wealthy', 'disappoint', 'creature', 'industrial', 'dive',\n 'sample', 'cue', 'rival', 'select', 'fool', 'making', 'means', 'appropriate', 'former', 'psychology',\n 'witness', 'transportation', 'adapt', 'extensive', 'vertical', 'novelist', 'starve', 'rescue', 'grant',\n 'offence', 'propose', 'lung', 'contemporary', 'orchestra', 'permanent', 'motivation', 'agency', 'bear',\n 'conservation', 'alarm', 'rating', 'internal', 'insurance', 'disability', 'obesity', 'registration',\n 'conservative', 'estimate', 'worse', 'deliberate', 'framework', 'permanently', 'extraordinary',\n 'distribution', 'position', 'wisdom', 'seek', 'offender', 'senator', 'therapist', 'load', 'document',\n 'brick', 'historian', 'resort', 'lyric', 'additionally', 'newly', 'complex', 'patient', 'overcome',\n 'worth', 'tune', 'joy', 'obligation', 'abandon', 'overall', 'scholar', 'undergo', 'steadily',\n 'distinct', 'distribute', 'reinforce', 'map', 'hidden', 'foundation', 'secure', 'initially', 'reserve',\n 'naked', 'invade', 'nutrition', 'motion', 'institute', 'low', 'swallow', 'nowadays', 'genuine', 'motor',\n 'reputation', 'conduct', 'faculty', 'instant', 'scenario', 'unity', 'derive', 'graphics', 'elderly',\n 'urge', 'shift', 'precious', 'delivery', 'colorful', 'pace', 'complicated', 'council', 'settler',\n 'integrate', 'theft', 'multiply', 'fundamental', 'fortunate', 'maximum', 'conflict', 'analyst', 'pitch',\n 'shaped', 'cabin', 'leaflet', 'bacteria', 'broadcaster', 'fuel', 'challenge', 'trap', 'significance',\n 'animation', 'super', 'password', 'info', 'model', 'latest', 'subject', 'hold', 'feed', 'domestic',\n 'cute', 'landscape', 'mayor', 'shelter', 'attorney', 'current', 'accompany', 'phase', 'occupy',\n 'lately', 'close', 'lively', 'applicant', 'fake', 'defense', 'transfer', 'imagination', 'licence',\n 'reasonably', 'stable', 'controversial', 'procedure', 'creation', 'constant', 'convenience', 'valid',\n 'absolute', 'differ', 'crash', 'outcome', 'upper', 'sometime', 'miner', 'evolution', 'controversy',\n 'transform', 'audio', 'shot', 'nightmare', 'negotiate', 'fabulous', 'carbon', 'institution', 'high',\n 'nasty', 'consult', 'coverage', 'project', 'emotional', 'academic', 'teen', 'track', 'restrict',\n 'associated', 'delay', 'electronics', 'goodness', 'remark', 'initiative', 'mass', 'illustration',\n 'install', 'visa', 'subsequent', 'pile', 'dig', 'self', 'radiation', 'discount', 'notebook', 'attempt',\n 'feedback', 'beneficial', 'finding', 'AIDS', 'labor', 'organ', 'kindergarten', 'illusion', 'justify',\n 'issue', 'perceive', 'broadcast', 'mosque', 'pose', 'presence', 'steel', 'dull', 'martial', 'resist',\n 'consultant', 'initial', 'asset', 'sum', 'privacy', 'delight', 'fault', 'innovative', 'terrorist',\n 'comprise', 'obey', 'extent', 'step', 'opponent', 'ownership', 'external', 'spoil', 'vitamin',\n 'emphasize', 'miserable', 'component', 'shame', 'apology', 'furthermore', 'division', 'examination',\n 'empire', 'fever', 'rapid', 'contribute', 'sympathetic', 'hypothesis', 'essentially', 'usage', 'gallon',\n 'evident', 'unfold', 'recovery', 'racial', 'house', 'fossil', 'assure', 'extend', 'league', 'workshop',\n 'solar', 'fund', 'harm', 'praise', 'revenue', 'mission', 'upward', 'drag', 'trait', 'extension',\n 'shocked', 'defend', 'tension', 'origin', 'mount', 'confusing', 'relieve', 'grade', 'characteristic',\n 'depth', 'defeat', 'facility', 'boost', 'stuff', 'tackle', 'part-time', 'saving', 'protection',\n 'criticize', 'helmet', 'ideal', 'detect', 'awareness', 'potentially', 'representative', 'stance',\n 'likewise', 'cancel', 'armed', 'terrorism', 'inquiry', 'principal', 'print', 'breast', 'genius',\n 'realistic', 'register', 'stiff', 'adequate', 'conscious', 'partnership', 'plain', 'object', 'cliff',\n 'whom', 'retirement', 'federal', 'favor', 'palm', 'resident', 'temporary', 'widespread', 'make',\n 'litter', 'unique', 'interaction', 'publication', 'logo', 'float', 'graphic', 'observe', 'line', 'blow',\n 'chart', 'gesture', 'deliberately', 'convincing', 'headquarters', 'consequently', 'outstanding',\n 'wound', 'cure', 'parliament', 'mortgage', 'excessive', 'price', 'being', 'regard', 'reckon', 'diverse',\n 'disk', 'decorate', 'species', 'remarkably', 'accuracy', 'afterward', 'fundamentally', 'approve',\n 'lane', 'sexy', 'firework', 'castle', 'suburb', 'canal', 'broadly', 'fragment', 'junior', 'extract',\n 'significant', 'mechanism', 'urgent', 'slip', 'civilization', 'homeless', 'parade', 'motivate',\n 'fabric', 'basically', 'overseas', 'limited', 'bullet', 'criterion', 'circumstance', 'unexpected',\n 'somewhat', 'demand', 'via', 'agenda', 'but', 'dime', 'association', 'jet', 'afterwards', 'monster',\n 'altogether', 'accurate', 'comparative', 'recruit', 'loan', 'mixed', 'ruin', 'fold', 'wrap',\n 'tournament', 'pointed', 'popularity', 'process', 'duration', 'aspect', 'stall', 'rubber', 'closely',\n 'wire', 'rate', 'welfare', 'interval', 'existence', 'elsewhere', 'sentence', 'critic', 'fulfill',\n 'recall', 'instantly', 'impatient', 'major', 'scream', 'following', 'chief', 'sibling', 'file', 'value',\n 'spokesman', 'increasingly', 'restriction', 'wolf', 'speed', 'tragedy', 'operator', 'zone', 'visible',\n 'recession', 'pill', 'patience', 'distinguish', 'cite', 'pity', 'operate', 'debate', 'identical',\n 'literally', 'melt', 'priority', 'assessment', 'dismiss', 'beat', 'truly', 'cop', 'medication',\n 'briefly', 'penalty', 'troop', 'infection', 'enhance', 'thorough', 'golden', 'detail', 'expense',\n 'full-time', 'kit', 'hunger', 'routine', 'therapy', 'output', 'enquiry', 'slide', 'aged', 'puzzle',\n 'cowboy', 'trust', 'virtual', 'sustainable', 'long-term', 'bargain', 'trillion', 'rental', 'emphasis',\n 'ethic', 'thesis', 'format', 'cable', 'wrist', 'rhythm', 'citizen', 'arms', 'aside', 'household',\n 'terribly', 'deserve', 'wherever', 'crisis', 'divorce', 'creativity', 'elbow', 'convinced',\n 'membership', 'nevertheless', 'actual', 'robbery', 'dominate', 'tissue', 'impressed', 'stimulate',\n 'impress', 'assistance', 'resolution', 'sequence', 'elementary', 'temporarily', 'teens', 'upwards',\n 'deposit', 'opposition', 'minority', 'scare', 'impose', 'vast', 'pride', 'committee', 'satisfaction',\n 'casual', 'bet', 'downwards', 'awkward', 'victory', 'seminar', 'balloon', 'monument', 'assumption',\n 'prohibit', 'account', 'exploit', 'disagreement', 'lower', 'requirement', 'desperate', 'critical',\n 'lifetime', 'unconscious', 'reach', 'debt', 'establish', 'concerned', 'precise', 'executive', 'venue',\n 'deny', 'flavour', 'spokeswoman', 'relevant', 'master', 'speculation', 'severely', 'clinic', 'forgive',\n 'assess', 'protester', 'moving', 'philosophy', 'deadline', 'county', 'exceed', 'corporate', 'fond',\n 'rocket', 'norm', 'professional', 'metaphor', 'dot', 'unacceptable', 'hunt', 'résumé', 'hook',\n 'expedition', 'recognition', 'pick', 'settle', 'universal', 'economist', 'apparently', 'discipline',\n 'incorrect', 'housing', 'investment', 'donation', 'honesty', 'screening', 'independence', 'shooting',\n 'whisper', 'anxiety', 'anniversary', 'sporting', 'moral', 'index', 'regret', 'addiction', 'joint',\n 'maintain', 'shade', 'placement', 'apparent', 'determination', 'hence', 'recover', 'corridor', 'choir',\n 'logical', 'isolate', 'bush', 'tsunami', 'physician', 'clip', 'minimum', 'greatly', 'function',\n 'phenomenon', 'urban', 'accommodate', 'curve', 'submit', 'variation', 'sensitive', 'envelope', 'way',\n 'shall', 'trip', 'entire', 'sponsorship', 'commonly', 'pure', 'manufacture', 'retail', 'invasion',\n 'excuse', 'tribe', 'status', 'completion', 'pupil', 'expansion', 'pursue', 'dependent', 'capable',\n 'border', 'adopt', 'coincidence', 'inner', 'disc', 'efficient', 'diversity', 'defence', 'dozen',\n 'slave', 'reasonable', 'universe', 'stretch', 'exclude', 'fraud', 'inch', 'remarkable', 'burn', 'rid',\n 'rely', 'dealer', 'scheme', 'tear', 'attachment', 'spread', 'demonstrate', 'eliminate', 'artistic',\n 'presidential', 'rise', 'crucial', 'fame', 'rapidly', 'sponsor', 'satellite', 'cheek', 'estate', 'rob',\n 'healthcare', 'convert', 'widely', 'tough', 'regulate', 'peer', 'sense', 'distract', 'wildlife',\n 'marathon', 'cell', 'date', 'lord', 'host', 'comfort', 'extreme', 'mechanic', 'very', 'affordable',\n 'tale', 'surgeon', 'outer', 'frequency', 'panic', 'critically', 'freedom', 'entrepreneur', 'level',\n 'possess', 'alien', 'portion', 'conspiracy', 'signature', 'lean', 'detailed', 'offensive', 'bent',\n 'consistent', 'principle', 'blame', 'wage', 'absorb', 'civil', 'thoroughly', 'infer', 'lens',\n 'extensively', 'greenhouse', 'approach', 'suspect', 'progress', 'surround', 'ashamed', 'insist',\n 'perception', 'freely', 'gene', 'ambulance', 'cope', 'isolated', 'interrupt', 'sympathy', 'wheat',\n 'emotionally', 'flavor', 'found', 'balanced', 'package', 'consumption', 'formation', 'certainty',\n 'massive', 'compulsory', 'terror', 'congress', 'opposed', 'prospect', 'draft', 'genre', 'sufficiently',\n 'optimistic', 'fantasy', 'investigation', 'subsequently', 'shadow', 'arrow', 'administration',\n 'accomplish', 'punk', 'fare', 'investor', 'acceptable', 'reduction', 'rural', 'catch', 'evaluate',\n 'workplace', 'recruitment', 'silk', 'desire', 'expectation', 'capacity', 'bias', 'racism', 'depression',\n 'plot', 'stock', 'chain', 'grocery', 'racist', 'dominant', 'swear', 'fee', 'fulfil', 'appropriately',\n 'publicity', 'dishonest', 'inspire', 'confusion', 'exposure', 'programming', 'hollow', 'marker',\n 'battle', 'dump', 'enable', 'gay', 'confess', 'judgment', 'slight', 'convey', 'oppose', 'beyond',\n 'downward', 'cheer', 'pause', 'terrify', 'honour', 'spite', 'occasionally', 'deeply', 'globe',\n 'license', 'silence', 'firm', 'era', 'whoever', 'composer', 'literary', 'athletic', 'feather', 'bound',\n 'ensure', 'amusing', 'trigger', 'unfortunate', 'faith', 'offend', 'address', 'regulation', 'aircraft',\n 'useless', 'inhabitant', 'mistake', 'curriculum', 'assign', 'gain', 'satisfied', 'rose', 'circuit',\n 'cave', 'decoration', 'democratic', 'skilled', 'monitor', 'classic', 'occupation', 'bill',\n 'exploration', 'revision', 'discourage', 'parallel']\n\n words_c1 = ['lawmaker', 'constitute', 'niche', 'submission', 'intact', 'endure', 'gross', 'pioneer', 'rebel',\n 'limb', 'allocation', 'gathering', 'film-maker', 'aftermath', 'soak', 'collaborate', 'distort',\n 'beverage', 'academy', 'activist', 'proclaim', 'regulator', 'occasional', 'align', 'rape', 'radar',\n 'consolidate', 'spin', 'inclined', 'capability', 'remedy', 'consent', 'relevance', 'simulation',\n 'analogy', 'acre', 'accused', 'applicable', 'plunge', 'infect', 'funeral', 'preside', 'postwar',\n 'counter', 'initiate', 'harvest', 'intermediate', 'dedicated', 'noon', 'merchant', 'correlate',\n 'guidance', 'appealing', 'snap', 'exit', 'contempt', 'agricultural', 'migration', 'autonomy',\n 'correction', 'collective', 'motorist', 'long-standing', 'medieval', 'tumor', 'intimate', 'cemetery',\n 'invisible', 'acceptance', 'kidney', 'gravity', 'amend', 'indictment', 'commentary', 'nod', 'fate',\n 'complement', 'weed', 'misery', 'retreat', 'assault', 'productive', 'ecological', 'minute',\n 'bankruptcy', 'transcript', 'mobile', 'equality', 'closure', 'cabinet', 'directory', 'buffer',\n 'citizenship', 'councillor', 'reproduction', 'thread', 'nonetheless', 'boundary', 'liberal', 'inhibit',\n 'lengthy', 'bow', 'contend', 'militant', 'accelerate', 'neighbouring', 'diagnose', 'privilege',\n 'transparency', 'congregation', 'bass', 'grace', 'misleading', 'mathematical', 'compensation',\n 'arbitrary', 'eager', 'conversion', 'constitution', 'disposal', 'immense', 'plea', 'deputy', 'cautious',\n 'slot', 'atrocity', 'dissolve', 'melody', 'authentic', 'decisive', 'realm', 'structural', 'segment',\n 'companion', 'aesthetic', 'top', 'yell', 'grid', 'robust', 'accountability', 'spine', 'presumably',\n 'provoke', 'humanity', 'absent', 'constitutional', 'productivity', 'booking', 'bulk', 'philosopher',\n 'forthcoming', 'operational', 'turnover', 'sound', 'mask', 'burden', 'allowance', 'brutal', 'dictator',\n 'backing', 'meantime', 'secondly', 'discourse', 'successive', 'insufficient', 'strain', 'clinical',\n 'entitle', 'mobilize', 'explicit', 'reform', 'hostage', 'manuscript', 'massacre', 'whip', 'amid',\n 'glory', 'continually', 'republic', 'tactical', 'withdrawal', 'horn', 'aspire', 'forth', 'stir',\n 'premier', 'preservation', 'buck', 'orientation', 'manifest', 'combat', 'just', 'ironic', 'inspect',\n 'terminal', 'precedent', 'rally', 'cease', 'straightforward', 'timber', 'naval', 'bureaucracy',\n 'versus', 'expenditure', 'triumph', 'embassy', 'administrative', 'plead', 'reven', 'mature', 'barrel',\n 'unprecedented', 'remains', 'hazard', 'peculiar', 'injustice', 'empirical', 'ease', 'setup',\n 'worthwhile', 'appreciation', 'decision-making', 'exploitation', 'adaptation', 'dense', 'displace',\n 'jurisdiction', 'trio', 'stabilize', 'desirable', 'detection', 'burst', 'substitute', 'carve', 'intake',\n 'villager', 'assemble', 'whilst', 'communist', 'alike', 'testimony', 'sphere', 'hardware', 'civilian',\n 'attendance', 'rotation', 'whatsoever', 'judicial', 'injection', 'bat', 'consecutive', 'dominance',\n 'distinctive', 'synthesis', 'taxpayer', 'doctrine', 'costly', 'minimize', 'architectural', 'residence',\n 'overwhelming', 'mob', 'carriage', 'essence', 'scrutiny', 'supervision', 'abuse', 'instrumental',\n 'pension', 'renew', 'footage', 'patch', 'beast', 'solidarity', 'sovereignty', 'spare', 'spouse',\n 'empower', 'dictate', 'endorse', 'foreigner', 'divert', 'auction', 'specialized', 'acid', 'syndrome',\n 'memo', 'trace', 'rumor', 'log', 'flourish', 'resemble', 'shoot', 'surgical', 'consultation', 'solely',\n 'reside', 'respective', 'morality', 'resume', 'insult', 'involvement', 'distinction', 'aggression',\n 'liberation', 'integration', 'overly', 'sentiment', 'youngster', 'solo', 'strategic', 'incarcerate',\n 'organizational', 'midst', 'cynical', 'hopeful', 'entity', 'nest', 'march', 'plug', 'mere', 'intent',\n 'alliance', 'punch', 'processor', 'verbal', 'grin', 'facilitate', 'filter', 'propaganda', 'idiot',\n 'concede', 'marine', 'objection', 'engagement', 'acquisition', 'warrior', 'bay', 'biography', 'arena',\n 'ranking', 'settlement', 'commissioner', 'counselor', 'verdict', 'hostile', 'supplement', 'linger',\n 'chamber', 'accordance', 'aspiration', 'acute', 'sketch', 'maximize', 'standing', 'striking',\n 'flexibility', 'petition', 'yield', 'hierarchy', 'prejudice', 'violate', 'shrink', 'await', 'attribute',\n 'assembly', 'dynamic', 'forge', 'coalition', 'exile', 'simultaneously', 'testify', 'revival',\n 'systematic', 'ensue', 'comparable', 'consistency', 'adverse', 'gig', 'casino', 'transaction', 'wholly',\n 'endeavour', 'induce', 'circulate', 'pastor', 'formula', 'congratulate', 'trophy', 'influential',\n 'skeptical', 'gut', 'tactic', 'oblige', 'spectacle', 'assurance', 'spectrum', 'boast', 'delicate',\n 'assassination', 'earnings', 'aluminum', 'net', 'tribal', 'deem', 'consciousness', 'outsider',\n 'meaningful', 'verify', 'upcoming', 'momentum', 'wit', 'suite', 'accordingly', 'denounce',\n 'shareholder', 'stereotype', 'corresponding', 'originate', 'stem', 'donor', 'sin', 'vicious', 'listing',\n 'craft', 'differentiate', 'storage', 'prospective', 'methodology', 'contrary', 'abortion',\n 'restoration', 'invoke', 'dual', 'cluster', 'complexity', 'optical', 'soar', 'sheer', 'enterprise',\n 'dispose', 'charm', 'suspicious', 'interactive', 'latter', 'resignation', 'stab', 'battlefield',\n 'coincide', 'workout', 'conception', 'leap', 'autumn', 'encouraging', 'hydrogen', 'intellectual',\n 'inequality', 'liter', 'vein', 'liver', 'legacy', 'adolescent', 'incarceration', 'crown', 'presently',\n 'prescribe', 'fundraising', 'regime', 'competent', 'exclusion', 'thought-provoking', 'saint', 'seal',\n 'thereafter', 'total', 'grip', 'characterize', 'leak', 'fairness', 'echo', 'tuition', 'proceedings',\n 'cutting', 'outlet', 'magnificent', 'marginal', 'integrity', 'sack', 'terrific', 'amendment',\n 'protocol', 'humanitarian', 'variable', 'default', 'apparatus', 'parameter', 'ash', 'columnist',\n 'competence', 'handling', 'breakdown', 'enact', 'confront', 'rear', 'array', 'legislature', 'stimulus',\n 'lesbian', 'toss', 'harsh', 'elaborate', 'enforcement', 'enrich', 'prominent', 'seldom', 'motive',\n 'pledge', 'absurd', 'stumble', 'mercy', 'render', 'ideological', 'literacy', 'pregnancy', 'legendary',\n 'bleed', 'slash', 'symbolic', 'designate', 'accomplishment', 'horizon', 'prestigious', 'deploy',\n 'shrug', 'anonymous', 'vacuum', 'dependence', 'fit', 'landmark', 'province', 'spy', 'overturn',\n 'disrupt', 'explosive', 'fragile', 'compel', 'transparent', 'abundance', 'halt', 'presidency', 'prey',\n 'depict', 'damaging', 'defect', 'interior', 'albeit', 'firearm', 'correspond', 'stability', 'arguably',\n 'credibility', 'wipe', 'representation', 'reassure', 'slavery', 'raid', 'contemplate', 'residue',\n 'senator', 'thankfully', 'anchor', 'coup', 'recipient', 'mining', 'experimental', 'counterpart',\n 'dignity', 'explicitly', 'enquire', 'quest', 'overlook', 'density', 'exclusively', 'thrilled',\n 'declaration', 'bare', 'eternal', 'frustrating', 'rip', 'lawn', 'crawl', 'curiosity', 'elite', 'fatal',\n 'inclusion', 'destructive', 'fine', 'merge', 'coordinate', 'ironically', 'correspondent', 'suspicion',\n 'supportive', 'keen', 'allocate', 'intersection', 'civic', 'oral', 'flawed', 'engaging', 'removal',\n 'assertion', 'miracle', 'handful', 'nonprofit', 'separation', 'endeavor', 'instruct', 'faculty',\n 'canvas', 'pirate', 'revenge', 'extremist', 'outlook', 'substantial', 'inflict', 'embed', 'bizarre',\n 'high-profile', 'insider', 'legislative', 'suspension', 'parish', 'capitalism', 'correspondence',\n 'minimal', 'overwhelm', 'poll', 'exaggerate', 'specification', 'drown', 'gambling', 'portray',\n 'problematic', 'ally', 'republicn.', 'flesh', 'neighboring', 'premise', 'slam', 'skip', 'predator',\n 'prosecute', 'contender', 'surge', 'deed', 'peak', 'spell', 'cattle', 'notorious', 'disastrous',\n 'enrol', 'compile', 'embarrassment', 'torture', 'contradiction', 'width', 'inquire', 'attorney',\n 'halfway', 'counseling', 'protective', 'residential', 'undoubtedly', 'compelling', 'devise', 'unify',\n 'preach', 'vulnerable', 'postpone', 'occurrence', 'guilt', 'verse', 'inject', 'sanction', 'allege',\n 'probe', 'recount', 'predecessor', 'prevail', 'regain', 'banner', 'glorious', 'renowned', 'refusal',\n 'rage', 'genocide', 'non-profit', 'heighten', 'laser', 'twist', 'confirmation', 'fleet', 'catalog',\n 'ideology', 'profitable', 'fibre', 'roster', 'commentator', 'bail', 'tremendous', 'specimen',\n 'filmmaker', 'seize', 'affection', 'administrator', 'commerce', 'substitution', 'haunt', 'activation',\n 'sigh', 'monk', 'desktop', 'stake', 'vessel', 'meditation', 'dismissal', 'virtue', 'statistical',\n 'passive', 'surplus', 'prevention', 'prescription', 'apparel', 'cooperate', 'resistance', 'descent',\n 'pond', 'backup', 'beloved', 'revolutionary', 'broadband', 'supposedly', 'simulate', 'betray',\n 'archive', 'thrive', 'cooperative', 'rod', 'shatter', 'cultivate', 'divine', 'longtime',\n 'discrimination', 'span', 'content', 'disclosure', 'infamous', 'proceeding', 'machinery', 'adhere',\n 'prosecutor', 'credible', 'theatrical', 'substantially', 'strip', 'indicator', 'constraint',\n 'capitalist', 'peasant', 'squad', 'utilize', 'confine', 'reluctant', 'electoral', 'auto',\n 'intervention', 'processing', 'practitioner', 'uphold', 'exert', 'parental', 'enroll', 'spark',\n 'tighten', 'obsess', 'dose', 'willingness', 'ambassador', 'deployment', 'implementation', 'behavioral',\n 'gallon', 'nursery', 'weaken', 'courtesy', 'tenant', 'beneficiary', 'ruling', 'complication', 'appoint',\n 'debris', 'counselling', 'terminate', 'mobility', 'varied', 'reverse', 'dawn', 'sole', 'circulation',\n 'detain', 'warrant', 'franchise', 'scattered', 'harmony', 'screw', 'obsession', 'timely', 'widen',\n 'thereby', 'clash', 'say', 'venture', 'frustration', 'memoir', 'municipal', 'tackle', 'tolerance',\n 'admission', 'beneath', 'clarity', 'unconstitutional', 'humble', 'deficiency', 'rock', 'fixture',\n 'memorial', 'trail', 'evolutionary', 'diplomat', 'liberty', 'reminder', 'documentation', 'feat',\n 'principal', 'philosophical', 'sustain', 'interfere', 'kingdom', 'legislation', 'generic', 'module',\n 'formulate', 'shipping', 'solicitor', 'landlord', 'supervise', 'incidence', 'socialist', 'outbreak',\n 'dilemma', 'intensive', 'notify', 'marketplace', 'rational', 'merger', 'availability', 'coordinator',\n 'disturbing', 'erect', 'blessing', 'efficiency', 'interference', 'accumulation', 'superior', 'novel',\n 'parliament', 'provincial', 'patron', 'retrieve', 'collision', 'worship', 'discretion', 'nominee',\n 'conserve', 'strive', 'feminist', 'exclusive', 'interim', 'calculation', 'devastate', 'conscience',\n 'breach', 'conviction', 'ritual', 'veteran', 'privatization', 'inventory', 'thoughtful', 'deteriorate',\n 'mandate', 'extract', 'demon', 'bishop', 'cognitive', 'slap', 'proceeds', 'vow', 'crude', 'pronounced',\n 'freshman', 'sake', 'pit', 'functional', 'militia', 'catalogue', 'aide', 'commodity', 'strand',\n 'rejection', 'corrupt', 'irrelevant', 'pump', 'refuge', 'breakthrough', 'enthusiast', 'superb',\n 'homeland', 'undermine', 'manipulate', 'subscription', 'intensity', 'undergraduate', 'merit',\n 'succession', 'establishment', 'allegation', 'favourable', 'domain', 'magnetic', 'fiber', 'monopoly',\n 'lifelong', 'excellence', 'flee', 'threshold', 'tender', 'premium', 'contention', 'diplomatic', 'tide',\n 'patent', 'set-up', 'benchmark', 'bounce', 'ratio', 'pop', 'buddy', 'foster', 'lawsuit', 'referendum',\n 'mutual', 'reportedly', 'seemingly', 'cop', 'maintenance', 'faction', 'rookie', 'ignorance', 'toxic',\n 'quota', 'transit', 'caution', 'boom', 'heritage', 'liable', 'loom', 'merely', 'sensation',\n 'sensitivity', 'rental', 'reflection', 'congressional', 'adjacent', 'bonus', 'projection', 'intervene',\n 'crush', 'stark', 'identification', 'defy', 'composition', 'trademark', 'profound', 'lesser', 'unveil',\n 'notably', 'devil', 'grief', 'configuration', 'effectiveness', 'summit', 'endorsement', 'evoke',\n 'whereby', 'sexuality', 'casualty', 'long-time', 'blend', 'outing', 'scope', 'administer', 'handy',\n 'hatred', 'vocal', 'persist', 'guerrilla', 'disruption', 'comply', 'passing', 'successor', 'validity',\n 'personnel', 'respectively', 'lineup', 'contractor', 'cater', 'deposit', 'cling', 'reasoning', 'swing',\n 'disclose', 'preliminary', 'accessible', 'presume', 'opt', 'duo', 'tobacco', 'glimpse',\n 'classification', 'newsletter', 'defensive', 'confrontation', 'lad', 'interface', 'beam', 'drain',\n 'distress', 'investigator', 'dedication', 'colonial', 'gaze', 'namely', 'chunk', 'rifle', 'attain',\n 'reconstruction', 'inmate', 'audit', 'optimism', 'harassment', 'lethal', 'imagery', 'chronic',\n 'custody', 'vice', 'coastal', 'backdrop', 'delegation', 'mandatory', 'alignment', 'trauma',\n 'proposition', 'councilor', 'frustrated', 'linear', 'magnitude', 'irony', 'prosperity', 'driving',\n 'rhetoric', 'creator', 'advocate', 'encouragement', 'reproduce', 'confession', 'widow', 'corruption',\n 'inappropriate', 'excess', 'hail', 'pipeline', 'diagnosis', 'indulge', 'sophomore', 'breed', 'missile',\n 'cargo', 'creep', 'applaud', 'execution', 'trustee', 'hook', 'toll', 'accountable', 'viable', 'reign',\n 'turnout', 'squeeze', 'rebellion', 'query', 'dumb', 'bind', 'prevalence', 'logic', 'allegedly',\n 'remainder', 'asylum', 'inadequate', 'replacement', 'vibrant', 'conceal', 'well', 'provision', 'assert',\n 'rehabilitation', 'tribute', 'compromise', 'critique', 'subscriber', 'ego', 'subtle', 'well-being',\n 'isolation', 'subsidy', 'integral', 'intensify', 'imminent', 'mainstream', 'choir', 'stun', 'loop',\n 'partially', 'physician', 'dip', 'deprive', 'dam', 'abolish', 'utility', 'integrated', 'epidemic',\n 'equation', 'riot', 'imprisonment', 'pad', 'intriguing', 'prosecution', 'articulate', 'aluminium',\n 'conquer', 'burial', 'vanish', 'mill', 'commence', 'suck', 'favorable', 'treaty', 'patrol',\n 'realization', 'gear', 'fluid', 'suppress', 'alert', 'cult', 'evacuate', 'mainland', 'regulatory',\n 'debut', 'embody', 'nonsense', 'descend', 'collaboration', 'bless', 'spam', 'frankly', 'nationwide',\n 'secular', 'glance', 'theology', 'behalf', 'deficit', 'inspiration', 'presidential', 'condemn',\n 'cocktail', 'superintendent', 'dimension', 'line-up', 'conceive', 'bench', 'elevate', 'oversee',\n 'discard', 'worthy', 'accusation', 'ballot', 'offspring', 'ward', 'discharge', 'grind', 'restraint',\n 'trailer', 'transmission', 'weave', 'fierce', 'dub', 'underlying', 'diminish', 'instinct', 'odds',\n 'nomination', 'tempt', 'angel', 'inspection', 'loyalty', 'consensus', 'magical', 'compensate', 'embark',\n 'suicide', 'inherent', 'copyright', 'smash', 'vague', 'authorize', 'pathway', 'counsellor', 'tolerate',\n 'selective', 'alien', 'offering', 'theoretical', 'infant', 'insertion', 'sacred', 'sue', 'noble',\n 'suburban', 'delegate', 'accumulate', 'pulse', 'ministry', 'absence', 'sacrifice', 'correlation',\n 'legitimate', 'manipulation', 'rotate', 'likelihood', 'felony', 'nominate', 'compassion', 'incur',\n 'governance', 'mentor', 'imprison', 'violation', 'drift', 'predominantly', 'coordination', 'utterly',\n 'blast', 'precision', 'radical', 'notable', 'sceptical', 'adjustment', 'convict', 'expire', 'hint',\n 'inability', 'crystal', 'kidnap', 'ray', 'outrage', 'charter', 'terrain', 'lap', 'sword', 'compliance',\n 'fade', 'hostility', 'tribunal', 'encompass', 'supreme', 'moderate', 'recruitment', 'transformation',\n 'persistent', 'junction', 'escalate', 'partial', 'supervisor', 'constituency', 'countless', 'troubled',\n 'browser', 'vulnerability', 'parliamentary', 'regardless', 'large-scale', 'emergence', 'detention',\n 'institutional', 'blade', 'flaw', 'indigenous', 'texture', 'justification', 'upgrade', 'lobby', 'shed',\n 'chaos', 'warfare', 'neglect', 'pole', 'arm', 'grasp', 'revive', 'dispute', 'amateur', 'educator',\n 'contributor', 'adoption', 'endless', 'execute', 'license', 'concession', 'grave', 'copper',\n 'reliability', 'exceptional', 'rumour', 'surrender', 'trigger', 'revelation', 'post-war', 'steer',\n 'portfolio', 'denial', 'enforce', 'surveillance', 'confer', 'layout', 'appetite', 'situated',\n 'spotlight', 'compute', 'psychiatric', 'magistrate', 'eligible', 'tenure', 'modification', 'fiscal',\n 'serial', 'readily', 'warehouse']\n\n\n def word_count(row, words):\n counter = 0\n sub_words = row['subs'].split()\n for x in sub_words:\n for y in words:\n if x == y:\n counter += 1\n part = counter / len(sub_words)\n return part\n\n\n def prob(probabilities):\n token = 0\n max = -1\n level = ['A2', 'B1', 'B2', 'C1']\n\n for x in range(len(probabilities[0])):\n if probabilities[0][x] > token:\n\n token = probabilities[0][x]\n max = x\n\n return level[max]\n\n\n def probabilities(film):\n sub = transformation(film)\n \n sub_df = pd.DataFrame([sub], columns=['subs'])\n sub_df['A1'] = sub_df.apply(word_count, axis=1, words=words_a1)\n sub_df['A2'] = sub_df.apply(word_count, axis=1, words=words_a2)\n sub_df['B1'] = sub_df.apply(word_count, axis=1, words=words_b1)\n sub_df['B2'] = sub_df.apply(word_count, axis=1, words=words_b2)\n sub_df['C1'] = sub_df.apply(word_count, axis=1, words=words_c1)\n vec_sub = vectorizer.transform(sub_df['subs']).toarray()\n col_vec_sub = vectorizer.get_feature_names_out()\n df_test_vec = pd.DataFrame(vec_sub, columns=col_vec_sub)\n df_test = sub_df.join(df_test_vec, lsuffix='_left', rsuffix='_right')\n features = df_test.drop(['subs'], axis=1)\n probabilities = logreg.predict_proba(features)\n result = prob(probabilities)\n return result\n\n\n result = probabilities(texts)\n st.write(result)\n","repo_name":"DarovskyS/Movies","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":72255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17838058778","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\n\r\nimport sys\r\n\r\nfrom PyQt6.QtWidgets import QApplication, QMainWindow\r\nfrom PyQt6.QtOpenGLWidgets import QOpenGLWidget\r\nfrom PyQt6.QtCore import *\r\n\r\nimport math\r\nimport numpy as np\r\n\r\nfrom PIL import Image\r\n\r\n# Load image using PIL\r\ndef load_texture(file_path):\r\n img = Image.open(file_path)\r\n img_data = img.tobytes(\"raw\", \"RGB\", 0, -1)\r\n\r\n texture = glGenTextures(1)\r\n glBindTexture(GL_TEXTURE_2D, texture)\r\n\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\r\n\r\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.width, img.height, 0, GL_RGB, GL_UNSIGNED_BYTE, img_data)\r\n print(img.width, img.height)\r\n \r\n return texture\r\n\r\nclass MeshLoader :\r\n def __init__(self):\r\n self.nV = 0 # 정점의 개수\r\n self.nF = 0 # 면의 개수\r\n self.vBuffer = None # 정점 버퍼\r\n self.iBuffer = None # 면을 표현하는 인덱스 버퍼\r\n def make_display_list(self): \r\n self.list = glGenLists(1)\r\n glNewList(self.list, GL_COMPILE)\r\n self.draw()\r\n glEndList()\r\n\r\n def draw_display_list(self):\r\n glCallList(self.list)\r\n\r\n def loadMesh(self, filename) :\r\n with open(filename, 'rt') as inputfile:\r\n # with 구문의 내부 블럭 시작\r\n self.nV = int(next(inputfile))\r\n self.vBuffer = np.zeros((self.nV*3, ), dtype=float)\r\n for i in range(self.nV):\r\n verts = next(inputfile).split()\r\n self.vBuffer[i*3: i*3+3] = verts[0:3]\r\n\r\n coordMin = self.vBuffer.min()\r\n coordMax = self.vBuffer.max()\r\n scale = max([coordMin, coordMax], key=abs)\r\n self.vBuffer /= scale\r\n\r\n self.nF = int(next(inputfile))\r\n self.iBuffer = np.zeros((self.nF*3, ), dtype=int)\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n self.nBuffer = np.zeros((self.nV*3, ), dtype=float)\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n for i in range(self.nF):\r\n idx = next(inputfile).split() # idx[0]: 면을 구성하는 점의 개수\r\n # 필요한 정보는 idx[1], idx[2], idx[3] = idx[1:4]\r\n self.iBuffer[i*3: i*3+3] = idx[1:4]\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n index = self.iBuffer[i*3: i*3+3]\r\n p0 = self.vBuffer[index[0]*3: index[0]*3+3]\r\n p1 = self.vBuffer[index[1]*3: index[1]*3+3]\r\n p2 = self.vBuffer[index[2]*3: index[2]*3+3]\r\n u = p1 - p0\r\n v = p2 - p0\r\n N = np.cross(u, v)\r\n\r\n self.nBuffer[index[0]*3:index[0]*3+3] += N\r\n self.nBuffer[index[1]*3:index[1]*3+3] += N\r\n self.nBuffer[index[2]*3:index[2]*3+3] += N\r\n\r\n for i in range(self.nV) :\r\n N = self.nBuffer[i*3: i*3+3]\r\n N = N / np.linalg.norm(N)\r\n self.nBuffer[i*3: i*3+3] = N\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n # with 구문의 내부 블럭 끝\r\n\r\n self.make_display_list() ####################################################\r\n\r\n def draw(self):\r\n \r\n glBegin(GL_TRIANGLES)\r\n for i in range(self.nF): \r\n # 각 면을 그린다.\r\n # 각 면을 구성하는 정점의 번호는 \r\n v = self.iBuffer[i*3: i*3+3]\r\n\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n N = self.nBuffer[i*3: i*3+3]\r\n\r\n glNormal3fv(self.nBuffer[v[0]*3: v[0]*3+3])\r\n glVertex3fv(self.vBuffer[v[0]*3: v[0]*3+3])\r\n glNormal3fv(self.nBuffer[v[1]*3: v[1]*3+3])\r\n glVertex3fv(self.vBuffer[v[1]*3: v[1]*3+3])\r\n glNormal3fv(self.nBuffer[v[2]*3: v[2]*3+3])\r\n glVertex3fv(self.vBuffer[v[2]*3: v[2]*3+3]) \r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n glEnd()\r\n\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\nmat_spec = [1, 1, 0.5, 1]\r\nmat_diff = [1, 1, 0.5, 1]\r\nmat_ambi = [0, 0, 0, 1]\r\nmat_shin = [120]\r\n\r\nlit_spec = [1, 1, 1, 1]\r\nlit_diff = [1, 1, 1, 1]\r\nlit_ambi = [0, 0, 0, 1]\r\n\r\nlight_pos = [1, 2, 1, 0]\r\n\r\ndef LightSet():\r\n glMaterialfv(GL_FRONT, GL_SPECULAR, mat_spec)\r\n glMaterialfv(GL_FRONT, GL_DIFFUSE, mat_diff)\r\n glMaterialfv(GL_FRONT, GL_AMBIENT, mat_ambi)\r\n glMaterialfv(GL_FRONT, GL_SHININESS, mat_shin)\r\n\r\n glLightfv(GL_LIGHT0, GL_SPECULAR, lit_spec)\r\n glLightfv(GL_LIGHT0, GL_DIFFUSE, lit_diff)\r\n glLightfv(GL_LIGHT0, GL_AMBIENT, lit_ambi)\r\n\r\n glEnable(GL_LIGHTING)\r\n glEnable(GL_LIGHT0)\r\n\r\ndef LightPositioning() :\r\n glLightfv(GL_LIGHT0, GL_POSITION, light_pos)\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n\r\nclass MyGLWidget(QOpenGLWidget):\r\n\r\n def __init__(self, parent=None):\r\n super().__init__(parent)\r\n \r\n\r\n def initializeGL(self):\r\n # OpenGL 그리기를 수행하기 전에 각종 상태값을 초기화\r\n glClearColor(0.5, 0.5, 0.0, 1.0) \r\n glEnable(GL_DEPTH_TEST)\r\n glEnable(GL_TEXTURE_2D)\r\n self.tex1 = load_texture('./Textures/space.jpg')\r\n self.tex2 = load_texture('./Textures/spheremap.jpg')\r\n\r\n self.myLoader = MeshLoader()\r\n self.myLoader.loadMesh(\"./Textures/cow.txt\")\r\n self.angle = 0\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n LightSet()\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n# \r\n def resizeGL(self, width, height):\r\n # 카메라의 투영 특성을 여기서 설정\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluPerspective(60, width/height, 0.1, 100)\r\n\r\n def paintGL(self):\r\n \r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity() \r\n\r\n gluLookAt(0,1.5,2.5, 0,0.5,0, 0,1,0)\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n LightPositioning()\r\n## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n\r\n glBindTexture(GL_TEXTURE_2D, self.tex1)\r\n glEnable(GL_TEXTURE_GEN_S)\r\n glEnable(GL_TEXTURE_GEN_T)\r\n\r\n glTexGenf(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR)\r\n glTexGenf(GL_T, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR)\r\n glPushMatrix()\r\n glRotatef(self.angle, 0, 1, 0)\r\n self.myLoader.draw_display_list()\r\n glPopMatrix()\r\n\r\n glTexGenf(GL_S, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR)\r\n glTexGenf(GL_T, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR)\r\n glPushMatrix()\r\n glTranslatef(-1.5, 0, 0)\r\n glRotatef(self.angle, 0, 1, 0)\r\n self.myLoader.draw_display_list()\r\n glPopMatrix()\r\n\r\n glBindTexture(GL_TEXTURE_2D, self.tex2)\r\n glTexGenf(GL_S, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP)\r\n glTexGenf(GL_T, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP)\r\n glPushMatrix()\r\n glTranslatef(1.5, 0, 0)\r\n glRotatef(self.angle, 0, 1, 0)\r\n self.myLoader.draw_display_list()\r\n glPopMatrix()\r\n self.angle += 1.0\r\n\r\n\r\nclass MyWindow(QMainWindow):\r\n\r\n def __init__(self, title=''):\r\n QMainWindow.__init__(self) # QMainWindow 슈퍼 클래스의 초기화\r\n self.setWindowTitle(title)\r\n\r\n self.glWidget = MyGLWidget() # OpenGL Widget\r\n self.setCentralWidget(self.glWidget)\r\n\r\n self.timer = QTimer(self)\r\n self.timer.setInterval(1)\r\n self.timer.timeout.connect(self.timeout)\r\n self.timer.start()\r\n\r\n def timeout(self):\r\n self.glWidget.update()\r\n \r\ndef main(argv = []):\r\n app = QApplication(argv)\r\n window = MyWindow('Mesh Visualization')\r\n #window.setFixedSize(600, 600)\r\n window.show()\r\n app.exec()\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv)","repo_name":"dknife/2023Graphics","sub_path":"Ex/Textures/06_meshTextureOn.py","file_name":"06_meshTextureOn.py","file_ext":"py","file_size_in_byte":7936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2193258121","text":"## Part of codes are referred from: \n## https://github.com/peterwestai2/symbolic-knowledge-distillation/blob/main/purification_code/predict.py\n\nimport os\nimport sys\nimport argparse\nimport pandas as pd\nimport sklearn.metrics\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dev_result_file\", type=str)\n parser.add_argument(\"--recalls\", nargs=\"+\", type=float, default=[.5, .6, .7, .8, .9])\n parser.add_argument(\"--do_plot\", action=\"store_true\", help=\"plot the precison-recall curve\")\n\n args = parser.parse_args()\n res = pd.read_csv(args.dev_result_file, sep=\"\\t\")\n\n for ts in [0.5, 0.6, 0.7, 0.8, 0.9]: \n\n res[\"new_pred\"] = res[\"score\"].map(lambda x: \"v\" if x >= ts else \"i\")\n acc = accuracy_score(res.label, res[\"new_pred\"])\n precision = precision_score(res.label, res[\"new_pred\"], pos_label=\"v\")\n recall = recall_score(res.label, res[\"new_pred\"], pos_label=\"v\")\n f1 = f1_score(res.label, res[\"new_pred\"], pos_label=\"v\")\n print(\"The threshold at {}: acc {:.5f}; precision {:.5f}; recall {:.5f}; f1 {:.5f}\".format(ts, acc, precision, recall, f1))\n\n val_label = res.label\n val_pred = res.score\n\n val_ps, val_rs, val_thresh = sklearn.metrics.precision_recall_curve(y_true=val_label, \n probas_pred=val_pred,\n pos_label=\"v\")\n if args.do_plot:\n import matplotlib.pyplot as plt\n\n plt.figure(1) \n plt.title('Precision/Recall Curve')\n plt.ylabel('Precision')\n plt.xlabel('Recall')\n\n plt.plot(val_ps, val_rs)\n plt.show()\n \n file_ext = args.dev_result_file.split(\"/\")[-1].split(\".\")[0]\n plt.savefig(file_ext + \"_prcurve.png\")\n\n for recall in args.recalls:\n idx = 0 \n while val_rs[idx] > recall:\n idx +=1\n print('Val precision@{:.0f}%: {:.3f}, threshold={:.5f}'.format(recall*100, val_ps[idx], val_thresh[idx]))\n\n\nif __name__== \"__main__\":\n main()\n","repo_name":"HKUST-KnowComp/FolkScope","sub_path":"src/classifier/precision_recall_selection.py","file_name":"precision_recall_selection.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"44310962095","text":"import logging\nimport sys\n\nfrom spaceone.core import utils\nfrom spaceone.core.config import default_conf\n\n_REMOTE_URL = []\n_GLOBAL = {}\n_LOGGER = logging.getLogger(__name__)\n\n\ndef init_conf(package, **kwargs):\n set_default_conf()\n\n _GLOBAL['PACKAGE'] = package\n _GLOBAL['SERVICE'] = package.rsplit('.', 1)[-1:][0]\n\n if 'server_type' in kwargs:\n _GLOBAL['SERVER_TYPE'] = kwargs['server_type']\n\n if 'port' in kwargs:\n _GLOBAL['PORT'] = kwargs['port']\n\n\ndef set_default_conf():\n for key, value in vars(default_conf).items():\n if not key.startswith('__'):\n _GLOBAL[key] = value\n\n\ndef get_package():\n return _GLOBAL['PACKAGE']\n\n\ndef get_service():\n return _GLOBAL['SERVICE']\n\n\ndef get_extension_apis():\n return _GLOBAL.get('EXTENSION_APIS', {})\n\n\ndef get_handler(name):\n return _GLOBAL.get('HANDLERS', {}).get(name, {})\n\n\ndef get_connector(name):\n return _GLOBAL.get('CONNECTORS', {}).get(name, {})\n\n\ndef set_service_config():\n \"\"\"\n Get config from service ({package}.conf.global_conf)\n \"\"\"\n\n package = _GLOBAL['PACKAGE']\n if package is None:\n raise ValueError(f'Package is undefined.')\n\n global_module = __import__(f'{package}.conf.global_conf', fromlist=['global_conf'])\n for key, value in vars(global_module).items():\n if not key.startswith('__'):\n _GLOBAL[key] = value\n\n\ndef get_global(key=None, default=None):\n if key:\n return _GLOBAL.get(key, default)\n else:\n return _GLOBAL\n\n\ndef set_global(**config):\n global_conf = get_global()\n\n for key, value in config.items():\n if key in global_conf:\n if not isinstance(value, type(global_conf[key])) and global_conf[key] is not None:\n value_type_name = type(global_conf[key]).__name__\n raise ValueError(f'Value type is invalid. (GLOBAL.{key} = {value_type_name})')\n\n if isinstance(value, dict):\n global_conf[key] = utils.deep_merge(value, global_conf[key])\n else:\n global_conf[key] = value\n\n\ndef set_remote_conf_from_file(config_yml: str):\n file_conf = utils.load_yaml_from_file(config_yml)\n url_conf: list = file_conf.get('REMOTE_URL', [])\n\n for url in url_conf:\n endpoint_info = utils.parse_endpoint(url)\n if endpoint_info['scheme'] == 'file':\n yaml_file = f'{endpoint_info[\"path\"]}'\n conf_to_merge = utils.load_yaml_from_file(yaml_file)\n set_global(**conf_to_merge)\n elif endpoint_info['scheme'] in ['http', 'https']:\n conf_to_merge = utils.load_yaml_from_url(url)\n set_global(**conf_to_merge)\n\n\ndef set_file_conf(config_yml: str):\n file_conf = utils.load_yaml_from_file(config_yml)\n global_conf = file_conf.get('GLOBAL', {})\n set_global(**global_conf)\n","repo_name":"100sun/python-core","sub_path":"src/spaceone/core/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"10071402008","text":"import random\n# Bubble the lighter elements to the front of the array\n\n\ndef bubblesort(elems):\n # from left to right\n # if left > right swap\n # check until at start of array\n # ex 3 4 2\n # is 4 > 2 ? swap resulting in 324\n # is 3 > 2 ? swap 234\n # start again up to before last elem\n for x in range(len(elems)):\n for y in range(len(elems)-1, x-1, -1):\n if elems[y-1] > elems[y]:\n elems[y-1], elems[y] = elems[y], elems[y-1]\n\n\ntest = [random.randint(0, 10) for x in range(10)]\nprint(test)\nbubblesort(test)\nprint(test)\n","repo_name":"xavierpjb/AlgoDataStruct","sub_path":"python/IkAlgs/sorting/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3115850173","text":"from datetime import datetime, timedelta\r\nimport os\r\nfrom airflow import DAG\r\nfrom airflow.models import Variable\r\nfrom airflow.operators.dummy_operator import DummyOperator\r\nfrom airflow.operators.postgres_operator import PostgresOperator\r\nfrom operators import (StageToRedshiftOperator, LoadFactOperator,\r\n LoadDimensionOperator, DataQualityOperator)\r\nfrom helpers import SqlQueries\r\n\r\n# get S3 bucket path from Airflow variables\r\n# S3 bucket path can be set/modified in Airflow variables\r\n\r\ns3_bucket = Variable.get('s3_bucket')\r\ns3_prefix_log = Variable.get('s3_prefix_log')\r\ns3_prefix_song = Variable.get('s3_prefix_song')\r\n\r\n# define defautl argument for DAG\r\ndefault_args = {\r\n 'owner': 'sdelopez',\r\n 'start_date': datetime(2019, 1, 12),\r\n 'depends_on_past': False,\r\n 'retries': 3,\r\n 'retry_delay': 300,\r\n}\r\n\r\n\r\ndag = DAG('02_sparkify_etl_dag',\r\n default_args=default_args,\r\n description='Extract from S3, Transform and Load data in Redshift with Airflow pipeline',\r\n schedule_interval='@hourly',\r\n catchup=False\r\n )\r\n\r\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\r\n\r\n# Define task to Extract songs and events data to staging table\r\nstage_events_to_redshift = StageToRedshiftOperator(\r\n task_id='Stage_events',\r\n dag=dag,\r\n s3_bucket=s3_bucket,\r\n s3_prefix=s3_prefix_log,\r\n table='staging_events',\r\n copy_options=\"JSON 's3://udacity-dend/log_json_path.json'\"\r\n)\r\n\r\nstage_songs_to_redshift = StageToRedshiftOperator(\r\n task_id='Stage_songs',\r\n dag=dag,\r\n s3_bucket=s3_bucket,\r\n s3_prefix=s3_prefix_song,\r\n table='staging_songs',\r\n copy_options=\"FORMAT AS JSON 'auto'\"\r\n)\r\n\r\nload_songplays_table = LoadFactOperator(\r\n task_id='Load_songplays_fact_table',\r\n dag=dag,\r\n table='songplays',\r\n select_sql=SqlQueries.songplays_table_insert,\r\n append_data = False\r\n)\r\n\r\nload_user_dimension_table = LoadDimensionOperator(\r\n task_id='Load_user_dim_table',\r\n dag=dag,\r\n table='users',\r\n select_sql=SqlQueries.users_table_insert,\r\n append_data = False\r\n)\r\n\r\nload_song_dimension_table = LoadDimensionOperator(\r\n task_id='Load_song_dim_table',\r\n dag=dag,\r\n table='songs',\r\n select_sql=SqlQueries.songs_table_insert,\r\n append_data = False\r\n)\r\n\r\nload_artist_dimension_table = LoadDimensionOperator(\r\n task_id='Load_artist_dim_table',\r\n dag=dag,\r\n table='artists',\r\n select_sql=SqlQueries.artists_table_insert,\r\n append_data = False\r\n)\r\n\r\nload_time_dimension_table = LoadDimensionOperator(\r\n task_id='Load_time_dim_table',\r\n dag=dag,\r\n table='time',\r\n select_sql=SqlQueries.time_table_insert,\r\n append_data = False\r\n)\r\n\r\nrun_quality_checks = DataQualityOperator(\r\n task_id='Run_data_quality_checks',\r\n dag=dag,\r\n dq_checks=[{'check_sql': \"SELECT COUNT(*) FROM users WHERE userid is null\", 'expected_result': 0},\r\n {'check_sql': \"SELECT COUNT(*) FROM songs WHERE songid is null\", 'expected_result': 0},\r\n { 'check_sql': \"SELECT COUNT(*) FROM artists WHERE name IS NULL\", 'expected_result': 0 },\r\n { 'check_sql': \"SELECT COUNT(*) FROM time WHERE weekday IS NULL\", 'expected_result': 0 },\r\n { 'check_sql': \"SELECT COUNT(*) FROM songplays WHERE songid IS NULL\", 'expected_result': 0 }\r\n ],\r\n redshift_conn_id='redshift'\r\n)\r\n\r\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\r\n\r\n# Define tasks DAG for Sparkify ETL process\r\n\r\n\r\nstart_operator >> [stage_events_to_redshift, stage_songs_to_redshift]\r\n[stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table\r\n\r\nload_songplays_table >> [load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table]\r\n\r\n[load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table] >> run_quality_checks\r\nrun_quality_checks >> end_operator","repo_name":"sdelopez/05_udacity_data_engineering_project_data_pipelines","sub_path":"airflow/dags/sparkify_etl_dag.py","file_name":"sparkify_etl_dag.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21790841614","text":"# zope imports\nfrom zope.event import notify\nfrom zope.formlib import form\nfrom zope.lifecycleevent import ObjectModifiedEvent\n\nfrom Products.Five.browser import pagetemplatefile\n\n# plone imports\nfrom plone.app.form import base\nfrom plone.app.form.validators import null_validator\nfrom plone.app.form.events import EditCancelledEvent, EditSavedEvent\n\n# easyshop imports\nfrom easyshop.core.config import _\nfrom easyshop.core.config import DEFAULT_SHOP_FORM\nfrom easyshop.core.interfaces import ICreditCard\nfrom easyshop.payment.content.credit_card import CreditCard\n\nclass CreditCardEditForm(base.EditForm):\n \"\"\"\n \"\"\"\n template = pagetemplatefile.ZopeTwoPageTemplateFile(DEFAULT_SHOP_FORM) \n form_fields = form.Fields(ICreditCard)\n \n label = _(u\"Edit Credit Card\")\n description = _(\"To change your credit card edit the form and press save.\")\n form_name = _(u\"Edit Credit Card\")\n\n @form.action(_(u\"label_save\", default=\"Save\"), condition=form.haveInputWidgets, name=u'save')\n def handle_save_action(self, action, data):\n \"\"\"\n \"\"\"\n if form.applyChanges(self.context, self.form_fields, data, self.adapters):\n notify(ObjectModifiedEvent(self.context))\n notify(EditSavedEvent(self.context))\n self.status = \"Changes saved\"\n else:\n notify(EditCancelledEvent(self.context))\n self.status = \"No changes\"\n\n self.context.reindexObject()\n self.nextUrl()\n\n @form.action(_(u\"label_cancel\", default=u\"Cancel\"), validator=null_validator, name=u'cancel')\n def handle_cancel_action(self, action, data):\n \"\"\"\n \"\"\" \n notify(EditCancelledEvent(self.context))\n self.nextUrl()\n \n def nextUrl(self):\n \"\"\"\n \"\"\"\n url = self.request.get(\"goto\", \"\")\n if url != \"\":\n self.request.response.redirect(url)\n else:\n parent = self.context.aq_inner.aq_parent\n url = parent.absolute_url() + \"/manage-payment-methods\"\n self.request.response.redirect(url)\n \nclass CreditCardAddForm(base.AddForm):\n \"\"\"\n \"\"\"\n template = pagetemplatefile.ZopeTwoPageTemplateFile(DEFAULT_SHOP_FORM)\n form_fields = form.Fields(ICreditCard)\n \n label = _(u\"Add Credit Card\")\n form_name = _(u\"Add Credit Card\")\n\n @form.action(_(u\"label_save\", default=u\"Save\"), condition=form.haveInputWidgets, name=u'save')\n def handle_save_action(self, action, data):\n \"\"\"\n \"\"\"\n self.createAndAdd(data)\n \n @form.action(_(u\"label_cancel\", default=u\"Cancel\"), validator=null_validator, name=u'cancel')\n def handle_cancel_action(self, action, data):\n \"\"\"\n \"\"\"\n self.context.reindexObject()\n self.nextUrl()\n \n def createAndAdd(self, data):\n \"\"\"\n \"\"\"\n # add address\n id = self.context.generateUniqueId(\"CreditCard\")\n\n credit_card = CreditCard(id)\n credit_card.card_type = data.get(\"card_type\")\n credit_card.card_owner = data.get(\"card_owner\")\n credit_card.card_number = data.get(\"card_number\")\n credit_card.card_expiration_date_month = data.get(\"card_expiration_date_month\")\n credit_card.card_expiration_date_year = data.get(\"card_expiration_date_year\")\n \n self.context._setObject(id, credit_card)\n\n credit_card.reindexObject()\n self.nextUrl()\n\n def nextUrl(self):\n \"\"\"\n \"\"\"\n url = self.request.get(\"goto\", \"\")\n if url != \"\":\n self.request.response.redirect(url)\n else:\n url = self.context.absolute_url() + \"/manage-payment-methods\"\n self.request.response.redirect(url)\n","repo_name":"ned14/Easyshop","sub_path":"src/easyshop.payment/easyshop/payment/browser/credit_card.py","file_name":"credit_card.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"32475672194","text":"from django.shortcuts import render, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Count\n\nfrom accounts.models import Car\nfrom accounts.views import getNotifications\nfrom .models import Drive\n\n\n@login_required(login_url=\"/\")\ndef drives(request):\n \"\"\" Drive management page \"\"\"\n company = request.user.profile.company\n drs = Drive.objects.filter(device__company=company).order_by('-startTime')\n\n # Search filter\n searchKey = request.GET.get(\"search-key\")\n\n if searchKey != '' and searchKey is not None:\n drs = drs.filter(device__car__licensePlate__icontains=searchKey)\n \n lastestAlerts, unreadCounts = getNotifications(company)\n\n context = {'drs': drs, \"notifications\": lastestAlerts, \"unreadNotis\": unreadCounts}\n return render(request, \"drives.html\", context)\n\n\n@login_required(login_url=\"/\")\ndef alerts(request):\n \"\"\" Alerts management page \"\"\"\n company = request.user.profile.company\n drs = Drive.objects.filter(device__company=company, alert__gt=0).order_by('-startTime').annotate(total=Count('id'))\n \n # Search filter\n searchKey = request.GET.get(\"search-key\")\n\n if searchKey != '' and searchKey is not None:\n drs = drs.filter(device__car__licensePlate__icontains=searchKey)\n \n lastestAlerts, unreadCounts = getNotifications(company)\n \n context = {'drs': drs, \"notifications\": lastestAlerts, \"unreadNotis\": unreadCounts}\n return render(request, \"alerts.html\", context)\n\n\n\n@login_required(login_url=\"/\")\ndef driveDetail(request, id):\n \"\"\" Detail of an specific drive (start, end, alerts ...) \"\"\"\n company = request.user.profile.company\n drive = Drive.objects.get(id=id)\n\n if drive.device.company != company:\n return HttpResponse('

You are not authorized to view this page

')\n\n alerts = drive.alert_set.all()\n alcohol = alerts.filter(detect=\"Alcohol\").exists()\n\n for alert in alerts:\n alert.isRead = True\n alert.save()\n \n alerts = len(alerts)\n\n lastestAlerts, unreadCounts = getNotifications(company)\n\n context = {\n \"drive\": drive, \n \"alerts\": alerts, \n \"alcohol\": alcohol, \n \"notifications\": lastestAlerts, \n \"unreadNotis\": unreadCounts\n }\n return render(request, \"driveDetail.html\", context)\n\n\n@login_required(login_url=\"/\")\ndef carDrives(request, id):\n \"\"\" Return every drives of individual car \"\"\"\n company = request.user.profile.company\n car = Car.objects.get(id=id)\n\n if car.company != company:\n return HttpResponse('

You are not authorized to view this page

')\n \n drives = Drive.objects.filter(device__car=car).order_by('-startTime')\n\n lastestAlerts, unreadCounts = getNotifications(company)\n context = {\n 'drs': drives, \n 'car': car, \n \"notifications\": lastestAlerts, \n \"unreadNotis\": unreadCounts\n }\n return render(request, \"car.html\", context)","repo_name":"NgoQuocBao1010/Scientific-Project","sub_path":"src/realtime/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"944277610","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 16 09:05:22 2017\r\n\r\n@author: Kiran\r\n\"\"\"\r\nimport secrets\r\nimport base64\r\nfrom PIL import Image\r\nimport os\r\nlibmat = [0 for x in range(40)]\r\nkey=[0 for x in range(40)]\r\ntxt = 'a'\r\nnum = int(-1)\r\nt = int(0)\r\npt = int(0)\r\nfor i in range(26):\r\n a = secrets.choice(range(0,10))\r\n b = secrets.choice(range(30,40))\r\n c = secrets.choice(range(10,20))\r\n d = secrets.choice(range(20,30))\r\n if(libmat[a]==0):\r\n libmat[a]=chr(ord(txt)+i)\r\n elif(libmat[b]==0):\r\n libmat[b]=chr(ord(txt)+i)\r\n elif(txt=='z'):\r\n break\r\n elif(libmat[c]==0):\r\n libmat[c]=chr(ord(txt)+i)\r\n elif(libmat[d]==0):\r\n libmat[d]=chr(ord(txt)+i)\r\n else:\r\n for j in range (40):\r\n if(libmat[j]==0):\r\n libmat[j]=chr(ord(txt)+i)\r\n break\r\nfor i in range (40):\r\n a = secrets.choice(range(0,10))\r\n b = secrets.choice(range(30,40))\r\n c = secrets.choice(range(10,20))\r\n d = secrets.choice(range(20,30))\r\n num = num + 1\r\n if(libmat[a]==0):\r\n libmat[a]=str(num)\r\n elif(libmat[b]==0):\r\n libmat[b]=str(num)\r\n elif(num==10):\r\n break\r\n elif(libmat[c]==0):\r\n libmat[c]=str(num)\r\n elif(libmat[d]==0):\r\n libmat[d]=str(num)\r\n else:\r\n for i in range (40):\r\n if(libmat[i]==0):\r\n libmat[i]=str(num)\r\n break\r\nfor i in range(40):\r\n if(libmat[i]==0):\r\n libmat[i]=\" \"\r\n break\r\nfor i in range(40):\r\n if(libmat[i]==0):\r\n libmat[i]=\"_\"\r\n break\r\nfor i in range(40):\r\n if(libmat[i]==0):\r\n libmat[i]=\",\"\r\n break\r\nfor i in range(40):\r\n if(libmat[i]==0):\r\n libmat[i]=\"$\"\r\n break\r\nuserinput = list(map(str,input().lower()))\r\ninputlen = len(userinput)\r\nmapmat = [0 for x in range(40)]\r\nencodedinput = [0 for x in range(inputlen)]\r\nprint(libmat)\r\nincrementer = secrets.choice(range(1,999))\r\nstartval = secrets.choice(range(1,9999))\r\nprint(startval)\r\nprint(incrementer)\r\nfor i in range(40):\r\n mapmat[i]=startval+incrementer*i\r\nprint(mapmat)\r\nprint(userinput)\r\nfor i in range(inputlen):\r\n tempchar = userinput[i]\r\n for j in range (40):\r\n if(tempchar==libmat[j]):\r\n print(j)\r\n encodedinput[t]=mapmat[j]\r\n t=t+1\r\nprint(encodedinput)\r\nstrout = \"\".join(str(x) for x in encodedinput)\r\nprint(strout)\r\nnewsplit = [strout[i:i+2] for i in range(0,len(strout),2)]\r\nprint(newsplit)\r\nstringoutlen = len(newsplit)\r\nk=int(0)\r\nimg = Image.new( 'RGB', (255,255), \"white\")\r\npixels = img.load() \r\nfor i in range(stringoutlen):\r\n for j in range(stringoutlen):\r\n newsplit = list(map(int, newsplit))\r\n if(k==stringoutlen or k>stringoutlen):\r\n break\r\n pixels[i,j] = (int(newsplit[k]),int(newsplit[k]),int(secrets.choice(range(0,255))))\r\n k=k+1\r\n#img.show()\r\noutpath=input(\"Enter the path\")\r\noutname=input(\"Enter the file name\")\r\nfileformat=input('Enter the file format with dot in front')\r\noutname=str(outname+fileformat)\r\nimg.save(os.path.join(outpath, outname))\r\nwith open(os.path.join(outpath, outname), \"rb\") as imageFile:\r\n str = base64.b64encode(imageFile.read())\r\n print (str)\r\nkeystr=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','0','1','2','3','4','5','6','7','8','9',' ',',','$','_']\r\nfor i in range(40):\r\n tempvar = keystr[i]\r\n for j in range(40):\r\n if(tempvar==libmat[j]):\r\n key[pt]=mapmat[j]\r\n pt=pt+1\r\n\r\n","repo_name":"KiranV01/Incognito","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2189414108","text":"import unittest\n\nfrom server import app\n\nfrom model import User, connect_to_db, db\n\nfrom seed import seed_test, load_users\n\nimport bcrypt\n\nfrom datetime import datetime\n\nclass NoDbNoSession(unittest.TestCase):\n \"\"\"tests that don't need session or db\"\"\"\n\n def setUp(self):\n \"\"\"set up test\"\"\"\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n def tearDown(self):\n \"\"\"\"\"\"\n pass\n\n def test_login_no_session(self):\n \"\"\"test login page when you're not logged in\"\"\"\n\n result = self.client.get(\"/login\")\n\n self.assertIn(b'Log In', result.data)\n self.assertIn(b'Create Profile', result.data)\n self.assertNotIn(b'Log Out', result.data)\n\n def test_create_profile_no_session(self):\n \"\"\"test create profile page\"\"\"\n\n result = self.client.get(\"/add-user-form\")\n\n self.assertIn(b'Username', result.data)\n self.assertIn(b'Email', result.data)\n self.assertNotIn(b'Log Out', result.data)\n\n\nclass YesDbNoSession(unittest.TestCase):\n \"\"\"Tests that need db but no session\"\"\"\n\n def setUp(self): #pragma no cover\n \"\"\"set up test with db, and users\"\"\"\n\n #set up testing configurations\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n #connect to db\n connect_to_db(app, 'postgres:///testdb')\n db.create_all()\n\n #add a user to db\n load_users(1)\n\n\n def tearDown(self): #pragma no cover\n \"\"\"close session at end\"\"\"\n\n db.session.remove()\n db.drop_all()\n db.engine.dispose()\n\n\n # def test_login_incorrect_info(self):\n # \"\"\"Test login with wrong password and wrong username\"\"\"\n\n # result = self.client.post('/login-check',\n # data={\"username\": 'wrong',\n # \"password\": 'wrong'},\n # follow_redirects=True)\n\n # self.assertIn(b\"Ooops. The username you \", result.data)\n # self.assertIn(b\"Log In\", result.data)\n # self.assertNotIn(b\"Log Out\", result.data)\n # self.assertNotIn(b\"Ooops. Looks like you\", result.data)\n\n\nclass EmptyDbNoSession(unittest.TestCase):\n \"\"\"Tests that need db but no session\"\"\"\n\n def setUp(self):\n \"\"\"set up test with db, and users\"\"\"\n\n #set up testing configurations\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n #connect to db\n connect_to_db(app, 'postgres:///testdb')\n db.create_all()\n\n def tearDown(self):\n \"\"\"close session at end\"\"\"\n\n db.session.remove()\n db.drop_all()\n db.engine.dispose()\n\n def test_create_profile(self):\n \"\"\"Create profile with new username\"\"\"\n\n form_data = {'name': 'newname', 'username': 'newusername', 'email': 'newemail@email.com', 'password': 'password'}\n result = self.client.post('/add-user', data=form_data, follow_redirects=True)\n\n self.assertIn(b'Welcome', result.data)\n self.assertIn(b'Log Out', result.data)\n self.assertNotIn(b'Log In', result.data)\n self.assertNotIn(b'Ooops', result.data)\n\n\n\nclass AlmostEmptyDbandSession(unittest.TestCase):\n \"\"\"Test pages with a database but no videos or other data in database \"\"\"\n \n def setUp(self):\n \"\"\"set up test with session, db, and users\"\"\"\n\n #set up testing configurations\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n #connect to db\n connect_to_db(app, 'postgres:///testdb')\n db.create_all()\n\n #add a user to db\n hashed = bcrypt.hashpw('password'.encode('utf-8'), bcrypt.gensalt())\n\n user = User(user_id= 1, name='joe', username='joey', password=hashed.decode('utf-8'), email='email@email.com')\n db.session.add(user)\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session['user_id'] = 1\n\n def tearDown(self):\n \"\"\"close session at end\"\"\"\n\n db.session.remove()\n db.drop_all()\n db.engine.dispose()\n\n\n def test_homepage(self):\n \"\"\"Test what you can see in homepage if logged in\"\"\"\n\n result = self.client.get('/',\n data={},\n follow_redirects=True)\n\n self.assertIn(b\"Log Out\", result.data)\n self.assertNotIn(b\"Log In\", result.data)\n self.assertNotIn(b\"Create Profile\", result.data)\n\n def test_logout(self):\n \"\"\"Test log out page\"\"\"\n\n result = self.client.get('/logout', data={}, follow_redirects=True)\n\n self.assertIn(b\"Yes, please\", result.data)\n self.assertIn(b\"No, take\", result.data)\n self.assertNotIn(b\"Log In\", result.data)\n\n def test_logout_check(self):\n \"\"\"Test log out check\"\"\"\n\n result = self.client.get('/logout-check', data={}, follow_redirects=True)\n\n self.assertIn(b\"Log In\", result.data)\n self.assertNotIn(b\"Log Out\", result.data)\n\n def test_profile(self):\n\n result = self.client.get('/profile', data={}, follow_redirects=True)\n self.assertIn(b\"Log Out\", result.data)\n\n def test_login_correct_info(self):\n \"\"\"Test login with correct info\"\"\"\n\n self.client.get('/logout', data={}, follow_redirects=True)\n # hashed = bcrypt.hashpw('password'.encode('utf-8'), bcrypt.gensalt())\n\n result = self.client.post('/login-check',\n data={'username': 'joey',\n 'password': 'password'},\n follow_redirects=True)\n self.assertIn(b\"You're logged in\", result.data)\n self.assertIn(b\"Log Out\", result.data)\n self.assertNotIn(b\"Log In\", result.data)\n self.assertNotIn(b\"Ooops\", result.data)\n\n def test_before_login(self):\n \"\"\"test that you are redirected to login if not logged in\"\"\"\n\n self.client.get('/logout-check', data={}, follow_redirects=True)\n\n result = self.client.get('/challenge',\n data={},\n follow_redirects=True)\n\n self.assertIn(b\"Log In\", result.data)\n # self.assert\n\n\nclass SeededDbandSession(unittest.TestCase):\n \"\"\"Test pages with a database but no videos or other data in database \"\"\"\n \n def setUp(self):\n \"\"\"set up test with session, db, and users\"\"\"\n\n #set up testing configurations\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n #connect to db\n connect_to_db(app, 'postgres:///testdb')\n db.create_all()\n\n\n #seed db with data \n seed_test()\n\n #set up session\n with self.client as c:\n with c.session_transaction() as session:\n # session.rollback()\n session['user_id'] = 1\n\n def tearDown(self):\n \"\"\"close session at end\"\"\"\n\n db.session.remove()\n db.drop_all()\n db.engine.dispose()\n\n\n def test_profile(self):\n \"\"\"Test that profile exists\"\"\"\n\n result = self.client.get('/profile', data={}, follow_redirects=True)\n\n self.assertIn(b\"Details\", result.data)\n self.assertIn(b\"adjective\", result.data)\n self.assertIn(b\"Challenge\", result.data)\n self.assertIn(b'category', result.data)\n self.assertIn(b'completion', result.data)\n self.assertIn(b'social', result.data)\n\n\n\n def test_video_details(self):\n \"\"\"Test the video details page\"\"\"\n\n result = self.client.get('/video-upload/filename_1.mp4', data = {}, follow_redirects=True)\n\n # self.assertIn(b'Added', result.data)\n self.assertIn(b'adjective', result.data)\n self.assertIn(b'category', result.data)\n self.assertIn(b'1', result.data)\n\n def test_video_upload_form(self):\n \"\"\"Test the video upload form\"\"\"\n\n result = self.client.get('/video-upload/challenge/challenge', data = {}, follow_redirects=True)\n # result = self.client.get('/video-upload/', data = {}, follow_redirects=True)\n\n\n self.assertIn(b'challenge', result.data)\n self.assertIn(b'description', result.data)\n\n self.assertIn(b'Choose some tags', result.data)\n\n\n def test_point_giving(self):\n \"\"\"Test one users giving another user a point\"\"\"\n\n info = {'cat_vid_id': 'category_1'}\n result = self.client.post('/add_point', data=info, follow_redirects=True)\n\n self.assertIn(b'2', result.data)\n\n def test_social_points_update(self):\n\n info = {'cat_vid_id': 'category_1'}\n self.client.post('/add_point', data=info, follow_redirects=True)\n\n result = self.client.get('/profile', data={}, follow_redirects=True)\n\n self.assertIn(b\"level 1 | total: 1\", result.data)\n\n def test_point_flashing(self):\n \"\"\"Tests that if a user gets a point while logged out, they get a flash message when they log back in\"\"\"\n\n #first (logged in as user 1) give a point to user 2\n info = {'cat_vid_id': 'category_2'}\n self.client.post('/add_point', data=info, follow_redirects=True)\n\n #log user 1 out\n self.client.get('/logout-check', data={}, follow_redirects=True)\n\n\n # log in as user_2 and check for flash message\n login_info_1 = {'username': 'username_2', 'password':'password'}\n result_1 = self.client.post('/login-check', data=login_info_1, follow_redirects=True)\n self.assertIn(b\"You got a new category point\", result_1.data)\n\n\n\n\n\n\n######################################################################################################################################\nif __name__ == \"__main__\":\n\n unittest.main()\n\n\n ","repo_name":"ejbryant28/fun-game-project","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71316138113","text":"# Digit cancelling fractions\r\n#\r\n# https://projecteuler.net/problem=33\r\n\r\nfrom math import gcd, prod\r\n\r\ndef cancel(a, b):\r\n digitsA = list(str(a))\r\n digitsB = list(str(b))\r\n for i in range(1, 10):\r\n if str(i) in digitsA and str(i) in digitsB:\r\n digitsA.remove(str(i))\r\n digitsB.remove(str(i))\r\n if int(digitsB[0]) == 0:\r\n return False\r\n if int(digitsA[0]) / int(digitsB[0]) == a / b:\r\n return True\r\n else:\r\n break\r\n return False\r\n\r\nnumerators = []\r\ndenominators = []\r\nfor a in range(10, 99):\r\n for b in range(a + 1, 100):\r\n if cancel(a, b) == True:\r\n numerators.append(a)\r\n denominators.append(b)\r\n\r\ni = 0\r\nfor numerator in numerators:\r\n print(str(numerator) + \" / \" + str(denominators[i]))\r\n i += 1\r\n\r\nnumerator = prod(numerators)\r\ndenominator = prod(denominators)\r\n\r\nprint(denominator // gcd(numerator, denominator))\r\n","repo_name":"TomasPetrikas/Project_Euler","sub_path":"Python/PE-33.py","file_name":"PE-33.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71254709953","text":"from django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\n\nclass Review(models.Model):\n title = models.ForeignKey(\n 'titles.Title',\n on_delete=models.CASCADE,\n related_name='titles',\n verbose_name='Произведение',\n )\n text = models.TextField(\n verbose_name='Рецензия',\n )\n author = models.ForeignKey(\n 'users.User',\n on_delete=models.CASCADE,\n related_name='title_authors',\n verbose_name='Автор рецензии',\n )\n score = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(1), MaxValueValidator(10)],\n verbose_name='Оценка',\n )\n pub_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата публикации рецензии',\n db_index=True,\n )\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Рецензия'\n verbose_name_plural = 'Рецензии'\n\n\nclass Comment(models.Model):\n review = models.ForeignKey(\n 'Review',\n on_delete=models.CASCADE,\n related_name='reviews',\n verbose_name='Рецензия',\n )\n text = models.TextField(\n verbose_name='Комментарий',\n )\n author = models.ForeignKey(\n 'users.User',\n on_delete=models.CASCADE,\n related_name='comment_authors',\n verbose_name='Автор комментария',\n )\n pub_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата публикации комментария',\n db_index=True,\n )\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Комментарий'\n verbose_name_plural = 'Комментарии'\n","repo_name":"zaleksandrne/infra_sp2","sub_path":"reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12317161958","text":"'''\nCreated on Sep 26, 2013\n\n@author: joshua\n'''\n\nfrom django.utils.cache import patch_response_headers\nfrom django.conf import settings\n\nimport re\n\nimport logging\nlog = logging.getLogger(__name__)\n\nclass CachetimeLookup(object):\n def __init__(self, default=3601, patterns = None):\n self.default = default\n self.patterns = list((re.compile(pattern),timeout) for (pattern,timeout) in patterns or ())\n \n\n def get_cache_time(self, path):\n for (pattern,timeout) in self.patterns:\n if pattern.match(path):\n return timeout\n return self.default\n\nAPP_DEFAULT_CACHE = getattr(settings, 'APP_DEFAULT_CACHE', 3600)\nAPP_DEFAULT_SHORT_CACHE = getattr(settings, 'APP_DEFAULT_SHORT_CACHE', 600)\n\ncache_lookup = CachetimeLookup(\n default=getattr(settings, 'CACHE_MIDDLEWARE_SECONDS', 3601),\n patterns = (\n (u'/default_short/', APP_DEFAULT_SHORT_CACHE),\n (u'/app/(default|short)/.*', APP_DEFAULT_SHORT_CACHE),\n (u'/admin/.*', 0),\n (u'/', APP_DEFAULT_CACHE),\n )\n )\n\ndef get_headers(request):\n regex = re.compile('^HTTP_')\n d = dict((regex.sub('', header), value) for (header, value) \n in request.META.items() if header.startswith('HTTP_'))\n return d\n\nclass CacheHeadersMiddleware(object):\n \"\"\" overrides all Cache-Control settings on responses and injects headers from django.utils.cach.patch_response_headers \"\"\"\n\n def process_response(self, request, response):\n if request.method in ['GET', 'HEAD']:\n path = request.path\n max_age = cache_lookup.get_cache_time(path)\n\n # dump current Cache-Control headers\n del response['Cache-Control']\n # inject headers\n patch_response_headers(response, cache_timeout=max_age)\n\n return response\n","repo_name":"MadeInHaus/django-template","sub_path":"backend/apps/utils/cache_headers_middleware.py","file_name":"cache_headers_middleware.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23615097391","text":"import sys\r\n\r\ndef robot(s,case_num):\r\n\tc = s.split()[1:]\r\n\ttotalTime = 0\r\n\tcurrent = {'O':1,'B':1}\r\n\tct = 0\r\n\tprevc = ''\r\n\tfor i in range(0,len(c),2):\r\n\t\ttime_needed = abs(int(c[i+1]) - current[c[i]])\r\n\t\tif prevc == c[i]:\r\n\t\t\ttd = time_needed + 1\r\n\t\t\ttotalTime += td\r\n\t\t\tct += td\r\n\t\telse:\r\n\t\t\tif time_needed <= ct:\r\n\t\t\t\tct = 1\r\n\t\t\t\ttotalTime += 1\r\n\t\t\telse:\r\n\t\t\t\tct = time_needed - ct + 1\r\n\t\t\t\ttotalTime += ct\r\n\t\tcurrent[c[i]] = int(c[i+1])\r\n\t\tprevc = c[i]\r\n\t\r\n\treturn 'Case #' + str(case_num) + ': ' + str(totalTime)\r\n\r\n\r\nfilename = sys.argv[1]\r\nf = open(filename, 'r')\r\ninputs = f.readlines()\r\ncase_num = 1\r\noutputs = []\r\nfor i in inputs[1:]:\r\n\toutputs.append(robot(i,case_num)+'\\n')\r\n\tcase_num += 1\r\n\r\n\r\nfo = open(filename.split('.')[0]+'.out','w+')\r\nfo.writelines(outputs)\r\nfo.close()\r\nf.close()\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_74/679.py","file_name":"679.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17959092571","text":"import os\nimport torch\n# 0. pt模型下载及初始化\nmodel = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True)\nmodel.eval()\n# inference\nx = torch.randn(1, 3, 640, 640)\n# 1. pt ---> onnx\ntorch_out = torch.onnx._export(model, x, \"yolop.onnx\", export_params=True)\n\n# 2. onnx --> onnxsim\nos.system(\"python3 -m onnxsim yolop.onnx sim.onnx\")\n\n# 3. onnx --> ncnn\nos.system(\"onnx2ncnn sim.onnx ncnn.param ncnn.bin\")\n\n# 4. ncnn --> optmize ---> ncnn\nos.system(\"ncnnoptimize ncnn.param ncnn.bin opt.param opt.bin 1\") # 数字0 代表fp32 ;1代表fp16\n\n# ==== pnnx方法\n# # 1. pt --> torchscript\n# traced_script_module = torch.jit.trace(model, torch.randn(1, 3, 640, 640), strict=False)\n# traced_script_module.save(\"ts.pt\")\n\n# # 2. ts --> pnnx --> ncnn\n# os.system(\"pnnx ts.pt inputshape=[1,3,640,640] device=cpu\") # 可能错误\n\n# # 3. ncnn ---> optmize ----> ncnn\n# os.system(\"ncnnoptimize ts.ncnn.param ts.ncnn.bin opt.param opt.bin 1\") # 数字0 代表fp32 ;1代表fp16\n\n\n# 两种均报错\n\n# 模型复现失败,急用的可前往: https://github.com/EdVince/YOLOP-NCNN\n","repo_name":"Baiyuetribe/ncnn-models","sub_path":"object_dection/yolop/models/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"61"} +{"seq_id":"6057720258","text":"import sys\nimport numpy as np\n\n\ndef p1(arrival, buses):\n min_wait = sys.maxsize\n bus_time = 0\n for bus in buses:\n diff = arrival/bus[1]\n wait_time = ((int(diff)+1) * bus[1]) - arrival\n if wait_time < min_wait:\n min_wait = wait_time\n bus_time = bus[1]\n\n return min_wait * bus_time\n\n\ndef p2(buses):\n N = 1\n for bus in buses:\n N = N * bus[1]\n\n return sum((m - r) * N // m * pow(N // m, -1, m) for r, m in buses) % N\n\n\nwith open(\"data/day13.txt\") as f:\n arrival = int(f.readline())\n buses = [\n (i, int(bus))\n for i, bus in enumerate(f.readline().strip().split(\",\"))\n if bus != \"x\"\n ]\n\nprint(\"Part 1: \"+str(p1(arrival, buses)))\nprint(\"Part 2: \"+str(p2(buses)))\n","repo_name":"FreddieBrown/Advent-of-Code-2020","sub_path":"src/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28758670540","text":"\"\"\"\n编写一个函数来查找字符串数组中的最长公共前缀。\n\n如果不存在公共前缀,返回空字符串 \"\"。\n\n示例 1:\n\n输入: [\"flower\",\"flow\",\"flight\"]\n输出: \"fl\"\n示例 2:\n\n输入: [\"dog\",\"racecar\",\"car\"]\n输出: \"\"\n解释: 输入不存在公共前缀。\n说明:\n\n所有输入只包含小写字母 a-z 。\n\"\"\"\nfrom typing import List\n\n\n\"\"\"\n方法一:\n\t思路:找到最短字符串当做基准元素。依次将基准元素和后面的元素进行比较不断更新基准元素,直到基准元素和所有元素都满足最长公共前缀的条件\n\"\"\"\ndef longestCommonPrefix(strs: List[str]) -> str:\n\t# 处理临界\n\tif len(strs) == 0:\n\t\treturn ''\n\t# 最小串当做基准元素\n\tprefix = min(strs, key=lambda x: len(x))\n\tfor i in range(len(strs)):\n\t\tfor j in range(len(prefix)):\n\t\t\tif strs[i].find(prefix) != 0:\n\t\t\t\tprefix = prefix[:len(prefix) - 1]\n\t\t\t\tcontinue\n\treturn prefix\n\n\n\"\"\"\n方法二: zip函数\n\t思路:zip函数将传入对象打包成元组 \n\t[\"flower\",\"flow\",\"flight\"] 打包后的结果:[('f', 'f', 'f'),('l', 'l', 'l'),('o', 'o', 'i'),('w', 'w', 'g')]\n\tset()函数,用来建立无序不重复的元素集,如果打包好的列表里的元组set后的长度为1,则是每个字符串中公有的字母,算为一个前缀字母,遇到set后不为1的元组时,则已经有不同字母了,退出查找,返回已找到的前缀字符串。\n\n\"\"\"\ndef longestCommonPrefix1(strs: List[str]) -> str:\n\t# 处理临界\n\tif len(strs) == 0:\n\t\treturn ''\n\tprefix = ''\n\tfor i in zip(*strs):\n\t\tif len(set(i)) == 1:\n\t\t\tprefix += i[0]\n\t\telse:\n\t\t\treturn prefix\n\treturn prefix\n\n\nif __name__ == '__main__':\n\tstrs = [\"flower\",\"flow\",\"flight\"]\n\tprint(longestCommonPrefix(strs))\n\tprint(longestCommonPrefix1(strs))","repo_name":"chenjb04/fucking-algorithm","sub_path":"LeetCode/数组/14最长公共前缀.py","file_name":"14最长公共前缀.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"23573840861","text":"\n\ndef add_to_list(G, x, M):\n for i, xM in enumerate(G):\n if M == xM[1]:\n xM[0] += x\n break\n else:\n G.append([x,M])\n G.sort(key=lambda x: x[1])\n\nT = int(input())\n\nfor t in range(T):\n N, K = [int(x) for x in input().split()]\n \n G = [[1, N]]\n \n while True:\n x, M = G.pop()\n if x >= K:\n break\n K -= x\n add_to_list(G, x, M//2)\n add_to_list(G, x, (M-1)//2)\n \n print(\"Case #{0}: {1} {2}\".format(t+1, M//2, (M-1)//2))\n \n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/926.py","file_name":"926.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70028543236","text":"from cgi_simple import (\n checkbox,\n div,\n equation,\n fieldset,\n flex_col,\n flex_row,\n flex_wrapper,\n labeled_textarea,\n labeled_text_input,\n labeled_number_input,\n li,\n minus,\n number_input,\n ol,\n option,\n p,\n plus,\n select,\n sidelabel,\n span,\n textarea,\n text_input,\n ul,\n underlabel,\n underlabeled_checkbox,\n)\n\nfrom sheet_data import (\n ATTRIBUTE_SHORTHAND,\n ATTRIBUTE_SKILLS,\n ATTRIBUTES,\n ROLL20_CALC,\n SUBSKILLS,\n)\nimport re\n\n\ndef create_page(destination):\n return flex_col(\n {\"class\": \"page creation-page\"},\n [\n \"\"\"\n This tab is used to create your character.\n \"\"\",\n creation_guidance(),\n subskill_rowids(),\n ],\n )\n\n\ndef creation_guidance():\n return flex_col(\n {\"class\": \"creation-guidance\"},\n [\n creation_step(\n \"Concept\",\n \"\"\"\n Choose a short phrase that describes the core concept of your character.\n \"\"\",\n text_input({\"name\": \"concept\"}),\n ),\n creation_step(\n \"Goals\",\n \"\"\"\n Choose your character's motivations and goals.\n \"\"\",\n textarea({\"name\": \"motivation_and_goals\"}),\n ),\n creation_step(\n \"Species\",\n \"\"\"\n Choose your character's species.\n You should add any numerical changes, such as attribute modifiers, in the Modifiers tab.\n Non-numeric abilities, such as darkvision, should go in the Abilities tab if you want to be able to push a button to reference them.\n If you don't care about seeing the abilities that often, you can record them under \"Passive Abilities\" in the Identity tab.\n Important defensive abilities that you'll want to reference when you're being attacked can go in the large free text area next to your Defenses in the Core tab.\n
\n You should use these same four locations whenever you gain new special abilities from any source, not just from your species.\n \"\"\",\n select(\n {\"class\": \"species\", \"name\": \"species\"},\n [\n option({\"value\": \"\"}, \"\"),\n option({\"value\": \"human\"}, \"Human\"),\n option({\"value\": \"dwarf\"}, \"Dwarf\"),\n option({\"value\": \"elf\"}, \"Elf\"),\n option({\"value\": \"gnome\"}, \"Gnome\"),\n option({\"value\": \"half-elf\"}, \"Half-elf\"),\n option({\"value\": \"half-orc\"}, \"Half-orc\"),\n option({\"value\": \"halfling\"}, \"Halfling\"),\n option({\"value\": \"custom\"}, \"Custom\"),\n option({\"value\": \"animal hybrid\"}, \"(Animal Hybrid)\"),\n option({\"value\": \"changeling\"}, \"(Changeling)\"),\n option({\"value\": \"dragon\"}, \"(Dragon)\"),\n option({\"value\": \"drow\"}, \"(Drow)\"),\n option({\"value\": \"dryaidi\"}, \"(Dryaidi)\"),\n option({\"value\": \"eladrin\"}, \"(Eladrin)\"),\n option({\"value\": \"orc\"}, \"(Orc)\"),\n option({\"value\": \"oozeborn\"}, \"(Oozeborn)\"),\n option({\"value\": \"tiefling\"}, \"(Tiefling)\"),\n ],\n ),\n ),\n creation_step(\n \"Size\",\n \"\"\"\n Set your character's size.\n Normally, your size is Medium.\n Some special abilities can increase your size.\n Changing your size here automatically updates your base speed, Reflex defense, and Stealth skill.\n \"\"\",\n select(\n {\"class\": \"size\", \"name\": \"size\"},\n [\n option({\"value\": \"\"}, \"\"),\n option({\"value\": \"fine\"}, \"Fine\"),\n option({\"value\": \"diminuitive\"}, \"Diminuitive\"),\n option({\"value\": \"tiny\"}, \"Tiny\"),\n option({\"value\": \"small\"}, \"Small\"),\n option({\"value\": \"medium\"}, \"Medium\"),\n option({\"value\": \"large\"}, \"Large\"),\n option({\"value\": \"huge\"}, \"Huge\"),\n option({\"value\": \"gargantuan\"}, \"Gargantuan\"),\n option({\"value\": \"colossal\"}, \"Colossal\"),\n ],\n ),\n ),\n creation_step(\n \"Languages\",\n \"\"\"\n Choose the languages your character can speak.\n \"\"\",\n text_input({\"name\": \"languages\"}),\n ),\n creation_step(\n \"Attributes\",\n \"\"\"\n Choose your character's attributes, not counting any species modifiers.\n As you level up, your attributes increase.\n You can add those improvements in the second row.\n If you have special abilities that modify your attributes, you can add those on the Modifiers tab.\n \"\"\",\n [\n flex_row(\n {\"class\": \"attributes\"},\n [\n labeled_number_input(\n \"Str\", input_attributes={\"name\": f\"strength_at_creation\"}\n ),\n labeled_number_input(\n \"Dex\", input_attributes={\"name\": f\"dexterity_at_creation\"}\n ),\n labeled_number_input(\n \"Con\",\n input_attributes={\"name\": f\"constitution_at_creation\"},\n ),\n labeled_number_input(\n \"Int\",\n input_attributes={\"name\": f\"intelligence_at_creation\"},\n ),\n labeled_number_input(\n \"Per\", input_attributes={\"name\": f\"perception_at_creation\"}\n ),\n labeled_number_input(\n \"Wil\", input_attributes={\"name\": f\"willpower_at_creation\"}\n ),\n ],\n ),\n flex_row(\n {\"class\": \"attribute-scaling\"},\n [\n labeled_number_input(\n \"+Str\", input_attributes={\"name\": f\"strength_level_scaling\"}\n ),\n labeled_number_input(\n \"+Dex\", input_attributes={\"name\": f\"dexterity_level_scaling\"}\n ),\n labeled_number_input(\n \"+Con\",\n input_attributes={\"name\": f\"constitution_level_scaling\"},\n ),\n labeled_number_input(\n \"+Int\",\n input_attributes={\"name\": f\"intelligence_level_scaling\"},\n ),\n labeled_number_input(\n \"+Per\", input_attributes={\"name\": f\"perception_level_scaling\"}\n ),\n labeled_number_input(\n \"+Wil\", input_attributes={\"name\": f\"willpower_level_scaling\"}\n ),\n ],\n ),\n ],\n ),\n creation_step(\n \"Base class\",\n \"\"\"\n Choose your character's base class.\n This automatically modifies all of the appropriate statistics, so you shouldn't need to add any custom modifiers to represent your base class.\n \"\"\",\n select(\n {\"class\": \"base-class\", \"name\": \"base_class\"},\n [\n option({\"value\": \"\"}, \"\"),\n option({\"value\": \"barbarian\"}, \"Barbarian\"),\n option({\"value\": \"cleric\"}, \"Cleric\"),\n option({\"value\": \"druid\"}, \"Druid\"),\n option({\"value\": \"fighter\"}, \"Fighter\"),\n option({\"value\": \"monk\"}, \"Monk\"),\n option({\"value\": \"paladin\"}, \"Paladin\"),\n option({\"value\": \"ranger\"}, \"Ranger\"),\n option({\"value\": \"rogue\"}, \"Rogue\"),\n option({\"value\": \"sorcerer\"}, \"Sorcerer\"),\n option({\"value\": \"warlock\"}, \"Warlock\"),\n option({\"value\": \"wizard\"}, \"Wizard\"),\n option({\"value\": \"dragon\"}, \"(Dragon)\"),\n option({\"value\": \"harpy\"}, \"(Harpy)\"),\n option({\"value\": \"oozeborn\"}, \"(Oozeborn)\"),\n option({\"value\": \"brute\"}, \"(Monster - Brute)\"),\n option({\"value\": \"leader\"}, \"(Monster - Leader)\"),\n option({\"value\": \"mystic\"}, \"(Monster - Mystic)\"),\n option({\"value\": \"skirmisher\"}, \"(Monster - Skirmisher)\"),\n option({\"value\": \"sniper\"}, \"(Monster - Sniper)\"),\n option({\"value\": \"warrior\"}, \"(Monster - Warrior)\"),\n ],\n ),\n ),\n creation_step(\n \"Archetypes\",\n \"\"\"\n Choose your character's first class archetype.\n As you gain new class archetypes, you should record them here.\n You will need to add custom modifiers in the Modifiers tab to reflect any numeric effects of your class archetypes.\n If your archetypes give you special abilities, you should add those in the Abilities tab.\n
\n If you have a specific number of abilities known, such as spells or maneuvers, you can record that number as a modifier in the Modifiers tab.\n That will cause you to see that ability listed in \"Abilities Known\" section of the Identity tab so you can more easily remember how many you are supposed to know.\n \"\"\",\n flex_col(\n [\n flex_row(\n {\"class\": \"archetype\"},\n [\n labeled_text_input(\n \"Name\",\n {\"class\": \"archetype-name\"},\n {\"name\": f\"archetype_name_{i}\"},\n ),\n underlabel(\n \"Rank\",\n number_input({\"name\": f\"archetype_rank_{i}\"}),\n ),\n ],\n )\n for i in range(3)\n ]\n ),\n ),\n creation_step(\n \"Weapons\",\n \"\"\"\n Choose or record the weapon groups your character can use.\n \"\"\",\n text_input({\"name\": \"weapon_groups\"}),\n ),\n insight_points_step(),\n skills_step(),\n creation_step(\n \"Items\",\n \"\"\"\n Choose your character's starting items.\n Over time, you'll find many more items, so you should go to the Items tab to record your choices.\n \"\"\",\n \"\",\n ),\n creation_step(\n \"Personality\",\n \"\"\"\n Describe your character's core personality.\n This can be vague, and it can change over time, but it can be useful to record something as a guide.\n \"\"\",\n textarea({\"class\": \"personality\", \"name\": \"personality\"}),\n ),\n creation_step(\n \"Background\",\n \"\"\"\n Describe your character's background.\n This can be as sparse or extensive as you want; there's no one right way to create a character.\n \"\"\",\n textarea({\"class\": \"background\", \"name\": \"background\"}),\n ),\n creation_step(\n \"Appearance\",\n \"\"\"\n Describe your character's appearance.\n This can be as sparse or extensive as you want; there's no one right way to create a character.\n \"\"\",\n textarea({\"class\": \"appearance\", \"name\": \"appearance\"}),\n ),\n creation_step(\n \"Alignment\",\n \"\"\"\n Choose your character's alignment: good or evil, and lawful or chaotic.\n You can decide to stay neutral along either or both alignment dimensions.\n \"\"\",\n textarea({\"class\": \"alignment\", \"name\": \"alignment\"}),\n ),\n creation_step(\n \"Name\",\n \"\"\"\n Choose your character's name.\n \"\"\",\n text_input({\"name\": \"character_name\"}),\n ),\n creation_step(\n \"Finishing up\",\n \"\"\"\n Set your level to 1 at the top of the Core tab, since you're done now!\n You can also choose a chat color for your abilities there, which will help you stand out from other characters in the game.\n \"\"\",\n \"\",\n ),\n feats_step(),\n ],\n )\n\n\ndef creation_step(header, explanation, mechanics):\n return flex_row(\n {\"class\": \"creation-step\"},\n [\n div({\"class\": \"explanation\"}, f\"{header}: \" + explanation),\n div({\"class\": \"mechanics\"}, mechanics),\n ],\n )\n\n\ndef insight_points_step():\n max_insight_points = text_input(\n {\"class\": \"inline-number\", \"readonly\": True, \"name\": \"insight_points\"}\n )\n\n return creation_step(\n \"Insight points\",\n f\"\"\"\n Spend your character's insight points.\n You can use this section to track what you spent insight points on.\n If you spend insight points to learn an additional standard special ability, such as a spell or maneuver, you can record that as a modifer in the Modifiers tab.\n That will keep the number listed in the \"Abilities Known\" section of the Identity tab accurate for you.\n
\n The specific effects of abilities you learn with insight points can be tracked in the Abilities tab if you want to have a button representing the ability, or in the Identity tab if you don't need that.\n
\n As a reminder, you have {max_insight_points} total insight points.\n \"\"\",\n textarea({\"name\": \"insight_points_tracking\"}),\n )\n\n\ndef skills_step():\n class_skills = text_input(\n {\"class\": \"inline-number\", \"readonly\": True, \"name\": \"class_skill_count\"}\n )\n other_trainable_skills = text_input(\n {\"class\": \"inline-number\", \"readonly\": True, \"name\": \"nonclass_skill_count\"}\n )\n\n return creation_step(\n \"Skills\",\n f\"\"\"\n Assign your character's trained skills.\n
\n As a reminder, you should train {class_skills} class skills and {other_trainable_skills} other skills.\n \"\"\",\n fieldset(\n {\"class\": \"repeating_trainedskills\"},\n trained_skill(),\n ),\n )\n\n\ndef trained_skill():\n return flex_row(\n {\"class\": \"skill-row\"},\n [\n select(\n {\"class\": \"trained-skill\", \"name\": \"trained_skill\"},\n [\n option({\"value\": \"\"}, \"\"),\n option({\"value\": \"Awareness\"}, \"Awareness\"),\n option({\"value\": \"Balance\"}, \"Balance\"),\n option({\"value\": \"Climb\"}, \"Climb\"),\n option({\"value\": \"Craft (alchemy)\"}, \"Craft (alchemy)\"),\n option({\"value\": \"Craft (bone)\"}, \"Craft (bone)\"),\n option({\"value\": \"Craft (ceramics)\"}, \"Craft (ceramics)\"),\n option({\"value\": \"Craft (leather)\"}, \"Craft (leather)\"),\n option({\"value\": \"Craft (manuscripts)\"}, \"Craft (manuscripts)\"),\n option({\"value\": \"Craft (metal)\"}, \"Craft (metal)\"),\n option({\"value\": \"Craft (poison)\"}, \"Craft (poison)\"),\n option({\"value\": \"Craft (stone)\"}, \"Craft (stone)\"),\n option({\"value\": \"Craft (textiles)\"}, \"Craft (textiles)\"),\n option({\"value\": \"Craft (wood)\"}, \"Craft (wood)\"),\n option({\"value\": \"Creature Handling\"}, \"Creature Handling\"),\n option({\"value\": \"Deception\"}, \"Deception\"),\n option({\"value\": \"Deduction\"}, \"Deduction\"),\n option({\"value\": \"Devices\"}, \"Devices\"),\n option({\"value\": \"Disguise\"}, \"Disguise\"),\n option({\"value\": \"Endurance\"}, \"Endurance\"),\n option({\"value\": \"Flexibility\"}, \"Flexibility\"),\n option({\"value\": \"Intimidate\"}, \"Intimidate\"),\n option({\"value\": \"Jump\"}, \"Jump\"),\n option({\"value\": \"Knowledge (arcana)\"}, \"Knowledge (arcana)\"),\n option(\n {\"value\": \"Knowledge (dungeoneering)\"},\n \"Knowledge (dungeoneering)\",\n ),\n option(\n {\"value\": \"Knowledge (engineering)\"}, \"Knowledge (engineering)\"\n ),\n option({\"value\": \"Knowledge (items)\"}, \"Knowledge (items)\"),\n option({\"value\": \"Knowledge (local)\"}, \"Knowledge (local)\"),\n option({\"value\": \"Knowledge (nature)\"}, \"Knowledge (nature)\"),\n option({\"value\": \"Knowledge (planes)\"}, \"Knowledge (planes)\"),\n option({\"value\": \"Knowledge (religion)\"}, \"Knowledge (religion)\"),\n option({\"value\": \"Linguistics\"}, \"Linguistics\"),\n option({\"value\": \"Medicine\"}, \"Medicine\"),\n option({\"value\": \"Perform\"}, \"Perform\"),\n option({\"value\": \"Persuasion\"}, \"Persuasion\"),\n option({\"value\": \"Profession\"}, \"Profession\"),\n option({\"value\": \"Ride\"}, \"Ride\"),\n option({\"value\": \"Sleight of Hand\"}, \"Sleight of Hand\"),\n option({\"value\": \"Social Insight\"}, \"Social Insight\"),\n option({\"value\": \"Stealth\"}, \"Stealth\"),\n option({\"value\": \"Survival\"}, \"Survival\"),\n option({\"value\": \"Swim\"}, \"Swim\"),\n ],\n ),\n text_input(\n {\n \"class\": \"hidden\",\n \"name\": \"front_rowid\",\n \"readonly\": True,\n }\n ),\n underlabeled_checkbox(\n \"Class?\",\n input_attributes={\n \"name\": \"is_class_skill\",\n },\n ),\n ]\n )\n\n\ndef feats_step():\n return creation_step(\n \"Feats\",\n \"\"\"\n If you're playing with feats, you can record your feats here.\n You'll need to record any effects of those feats manually as modifiers or abilities.\n \"\"\",\n fieldset(\n {\"class\": \"repeating_feats\"},\n labeled_text_input(\"Feat name\", input_attributes={\"name\": \"feat_name\"}),\n ),\n )\n\n\ndef subskill_rowids():\n return span(\n {\"class\": \"hidden\"},\n [\n text_input({\"name\": \"craft_alchemy_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_bone_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_ceramics_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_leather_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_manuscripts_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_metal_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_poison_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_stone_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_textiles_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_wood_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"craft_untrained_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"knowledge_arcana_subskill_rowid\", \"readonly\": True}),\n text_input(\n {\"name\": \"knowledge_dungeoneering_subskill_rowid\", \"readonly\": True}\n ),\n text_input(\n {\"name\": \"knowledge_engineering_subskill_rowid\", \"readonly\": True}\n ),\n text_input({\"name\": \"knowledge_items_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"knowledge_local_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"knowledge_nature_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"knowledge_planes_subskill_rowid\", \"readonly\": True}),\n text_input({\"name\": \"knowledge_religion_subskill_rowid\", \"readonly\": True}),\n text_input(\n {\"name\": \"knowledge_untrained_subskill_rowid\", \"readonly\": True}\n ),\n ],\n )\n","repo_name":"Vadskye/Rise","sub_path":"character_sheet/creation_page.py","file_name":"creation_page.py","file_ext":"py","file_size_in_byte":22383,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"38720404904","text":"import unittest\n#from ..Controllers.Create_teacher import *\nfrom ..Controllers.Controller import *\nfrom .Storage import *\nimport copy\n\n\nclass TestCreateIndividualPlan(unittest.TestCase):\n interface = ReadyStorage()\n logic = Controller(interface)\n\n def test_constructor(self):\n self.assertEqual(self.logic.interface, self.interface)\n\n def test_create_i_plan(self):\n new = IndividualPlan(1, \"IdPlan1\", date(2020, 10, 10), [])\n idx = self.logic.add_i_plan(new)\n self.assertEqual(idx, new.get_i_plan_id())\n self.assertEqual(self.logic.interface.get_individual_plan_by_id(idx), new)\n\n existing = self.logic.interface.get_individual_plan_by_id(0)\n with self.assertRaises(RuntimeError) as exception_msg:\n self.logic.add_i_plan(existing)\n self.assertEqual(str(exception_msg), f\"Add: Individual plan with id {existing.get_i_plan_id()} \"\n f\"already in Storage\")\n\n def test_delete_i_plan(self):\n delete_id = 0\n existing = self.logic.interface.get_individual_plan_by_id(delete_id)\n\n idx = self.logic.del_i_plan(delete_id)\n self.assertEqual(idx, existing.get_i_plan_id())\n\n with self.assertRaises(RuntimeError) as exception_msg:\n self.logic.del_i_plan(delete_id)\n self.assertEqual(str(exception_msg), f\"Delete: No Individual plan by this id {delete_id} in Storage\")\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"TagunovVitaliy/institute","sub_path":"areas/area-6/src/Integration tests/Test_create_i_plan.py","file_name":"Test_create_i_plan.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8052166955","text":"import json\nimport glob\nimport re\nimport os\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='Evaluate output of one synthesizer.')\n\nparser.add_argument('--result', type=str, default='output/__result__',\n help='result dir')\nparser.add_argument('--summary', type=str, default='output/__summary__',\n help='result dir')\n\n\ndef method_name_order(name):\n if 'identity' in name.lower():\n return 0\n if 'clbn' in name.lower():\n return 1\n if 'privbn' in name.lower():\n return 2\n if 'medgan' in name.lower():\n return 3\n if 'veegan' in name.lower():\n return 4\n if 'tablegan' in name.lower():\n return 5\n if 'tvae' in name.lower():\n return 8\n if 'tgan' in name.lower():\n return 9\n return 6\n\ndef coverage(datasets, results):\n ticks = []\n values = []\n\n results = list(results)\n results = sorted(results, key=lambda x: method_name_order(x[0]))\n\n for model, result in results:\n covered = set()\n for item in result:\n assert(item['dataset'] in datasets)\n covered.add(item['dataset'])\n\n ticks.append(model)\n values.append(len(covered) / len(datasets))\n\n plt.cla()\n plt.bar(list(range(len(values))), values, tick_label=ticks)\n plt.xticks(rotation=-90)\n plt.title(\"coverage\")\n plt.ylim(0, 1)\n\n plt.savefig(\"{}/coverage.jpg\".format(summary_dir), bbox_inches='tight')\n\n\ndef save_barchart(barchart, filename):\n barchart = pd.DataFrame(barchart, columns=['synthesizer', 'metric', 'val'])\n\n methods = set()\n for item in barchart['synthesizer']:\n methods.add(item)\n methods = list(methods)\n methods = sorted(methods, key=method_name_order)\n\n barchart.pivot(\"metric\", \"synthesizer\", \"val\")[methods].plot(kind='bar')\n plt.title(dataset)\n plt.xlabel(None)\n plt.legend(title=None, loc=(1.04,0))\n plt.savefig(filename, bbox_inches='tight')\n\n\ndef dataset_performance(dataset, results):\n synthesizer_metric_perform = {}\n\n for synthesizer, all_result in results:\n for one_result in all_result:\n if one_result['dataset'] != dataset:\n continue\n\n if one_result['step'] == 0:\n synthesizer_name = synthesizer\n else:\n synthesizer_name = synthesizer + \"_\" + str(one_result['step'])\n\n\n if not synthesizer_name in synthesizer_metric_perform:\n synthesizer_metric_perform[synthesizer_name] = {}\n try:\n synthesizer_metric_perform[synthesizer_name]['_distance'] = [one_result['distance']]\n except:\n synthesizer_metric_perform[synthesizer_name]['_distance'] = [0]\n\n for model_metric_score in one_result['performance']:\n for metric, v in model_metric_score.items():\n if metric == \"name\":\n continue\n else:\n if not metric in synthesizer_metric_perform[synthesizer_name]:\n synthesizer_metric_perform[synthesizer_name][metric] = []\n\n synthesizer_metric_perform[synthesizer_name][metric].append(v)\n\n if len(synthesizer_metric_perform) == 0:\n return\n\n plt.cla()\n\n barchart = []\n barchart_d = []\n for synthesizer, metric_perform in synthesizer_metric_perform.items():\n for k, v in metric_perform.items():\n v_t = np.mean(v)\n if k == 'r2':\n v_t = v_t.clip(-1, 1)\n if 'likelihood' in k:\n v_t = v_t.clip(-20, 0)\n if k == '_distance':\n barchart_d.append((synthesizer, k, v_t))\n else:\n barchart.append((synthesizer, k, v_t))\n\n save_barchart(barchart, \"{}/{}.jpg\".format(summary_dir, dataset))\n save_barchart(barchart_d, \"{}/{}_d.jpg\".format(summary_dir, dataset))\n\n return synthesizer_metric_perform\n\ndef generate_tabular_result(dataset_perform):\n df = pd.DataFrame(data={'alg': []})\n\n for dataset, alg_metric_perform in dataset_perform.items():\n for alg, metric_perform in alg_metric_perform.items():\n for metric, perform in metric_perform.items():\n column_name = \"{}/{}\".format(dataset, metric)\n row_name = alg\n\n if not column_name in df.columns:\n df[column_name] = [None] * len(df)\n\n if not row_name in set(df['alg'].unique()):\n row_id = len(df)\n df.loc[row_id] = [None] * len(df.columns)\n df['alg'][row_id] = alg\n else:\n row_id = df[df['alg'] == row_name].index[0]\n\n df[column_name][row_id] = np.mean(perform)\n\n df.to_csv(\"{}/results.csv\".format(summary_dir))\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n result_files = glob.glob(\"{}/*.json\".format(args.result))\n summary_dir = args.summary\n\n if not os.path.exists(summary_dir):\n os.makedirs(summary_dir)\n\n datasets = glob.glob(\"data/*/*.npz\")\n datasets = [re.search('.*/([^/]*).npz', item).group(1) for item in datasets]\n\n results = []\n for result_file in result_files:\n model = re.search('.*/([^/]*).json', result_file).group(1)\n with open(result_file) as f:\n res = json.load(f)\n\n results.append((model, res))\n\n coverage(datasets, results)\n\n dataset_perform = {}\n for dataset in datasets:\n perform = dataset_performance(dataset, results)\n if perform is None:\n continue\n else:\n dataset_perform[dataset] = perform\n\n generate_tabular_result(dataset_perform)\n","repo_name":"dungdinhanh/GDEGAN","sub_path":"SDGym-0.2.2/sdgym/utils/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5247451840","text":"import os\nfrom util import *\nfrom model import *\nfrom config import *\nfrom tqdm import tqdm\nfrom rouge_score import rouge_scorer\nimport tempfile\n\n\nMETRIC_LIST = ['BLEU3', 'BLEU4', 'CIDEr', 'ROUGE_L']\nPREDICT_METHOD_LIST = ['greedy', 'beam_3', 'beam_5']\n\n\nif __name__ == '__main__':\n train_data = read_train_pkl()\n encoder, decoder = create_model(\n train_data['code_voc_size'],\n train_data['com_voc_size'],\n train_data['max_length_code']\n )\n encoder, decoder = restore_model(encoder, decoder)\n test_data = read_testset()\n\n print('arch:', ARCH)\n print(\"Reading model...\")\n\n checkpoint_dir = get_checkpoint_dir()\n log_file = open(checkpoint_dir+\"/parameters\", \"a\")\n\n for method in PREDICT_METHOD_LIST:\n log_file.write(method+'\\n')\n log_file.flush()\n print('\\n'+method+'\\n')\n\n if method == 'beam_3' or method == 'beam_5':\n beam_k = int(method.split('_')[1])\n elif method == 'greedy':\n beam_k = 1\n\n total_score = dict()\n for metric in METRIC_LIST:\n total_score[metric] = 0\n\n for data in tqdm(test_data):\n predict = integrated_prediction(\n data['code'],\n encoder,\n decoder,\n train_data,\n beam_k,\n method,\n )\n\n for metric in METRIC_LIST:\n score = integrated_score(metric, data['comment'], predict)\n total_score[metric] += score\n\n for metric in METRIC_LIST:\n total_score[metric] = total_score[metric] / len(test_data)\n log_file.write(' '+metric+\"=\"+str(round(total_score[metric], 4))+\"\\n\")\n log_file.flush()\n print(metric+\"=\"+str(round(total_score[metric], 4)))\n\n log_file.close()\n","repo_name":"yurong0404/ComCNN","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"4817527905","text":"import random\n# print(random.random())\n# print(random.uniform(1,2))\n# print(random.randint(-1,2))\n# print(random.randrange(1,10,3))\n# List = [1,3,4,[5,6],7,8,9,10]\n# while 1:\n # print(random.choice(List))\n# print(random.sample(List,2))\n# print(random.shuffle(List))\n# List = [0,1,2,3,4,5,6,7,8,9]\n# print(random.sample(List,4))\n# def Verification():\n# List1 = ['1','2','3','4','5','6','7','8','9']\n# List2 = random.sample(List1,6)\n# str1 = \"\".join(List2)\n# print(str1)\n# def Verification():\n# List1 = ['1','2','3','4','5','6','7','8','9']\n# List2 = random.sample(List1,6)\n# str1 = \"\".join(List2)\n# return str1\n# print(Verification())\n\ndef code(n=6): #数字验证码\n s = ''\n for i in range(n):\n str1 = random.randint(0,9)\n s += str(str1)\n return s\nprint(code())","repo_name":"duwei19961021/pythonxx","sub_path":"day 18/random模块.py","file_name":"random模块.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29101965715","text":"def fibonacci(n):\r\n # Initialize the first two terms of the sequence\r\n a, b = 0, 1\r\n for i in range(n):\r\n # Print the current term\r\n \r\n # Calculate the next term\r\n a, b = b, a + b\r\n print(a)\r\n\r\ndef main():\r\n # Get the number of terms from the user\r\n num_terms = int(input(\"Enter the number of terms: \"))\r\n\r\n # Call the fibonacci function\r\n fibonacci(num_terms)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Karamiii/PythonClass","sub_path":"Random/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10953407147","text":"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('fast')\n\n\n# In[4]:\n\n\ndf = pd.read_csv('wine.csv')\ndf.head()\n\n\n# # 3A\n\n# In[12]:\n\n\n# Function called for solution for 3.1\ndef attribute_describe(df):\n\n print(\"Mean = \",df.mean(),\"Median = \", df.median(),\"Standard Deviation =\",df.std(),\"Range = \",df.max() - df.min())\n print(\"25th percentile =\", df.quantile(q = 0.25),\"50th percentile =\", df.quantile(q = 0.50),\"75th percentile\", df.quantile(q = 0.75),end = '\\n\\n')\n\n\n# In[13]:\n\n\n# Solution to 3.1\narr = []\narr.append(df['Alcohol'])\narr.append(df['Malic acid'])\narr.append(df['Ash'])\narr.append(df['Alcalinity of ash'])\n\nfor x in arr:\n attribute_describe(x)\n\n\n# # 3B\n\n# In[18]:\n\n\n# Solution for 3.2\nboxplot = df.boxplot(column =['Ash','Malic acid'],by=\"Class\",figsize = [7,7])\nplt.show()\n\n# ## 3C Histogram Proanthocyanins\n\n# In[ ]:\n\n\n# Solution for 3.3\ndf1=df['Proanthocyanins']\ndf1.hist(bins=16)\nplt.title(\"Proanthocyanins\")\nplt.show()\n\n# ## 3C Proline\n\n# In[ ]:\n\n\ndf2=df['Proline']\ndf2.hist(bins=16)\nplt.title(\"Proline\")\nplt.show()\n\n# ## 3D\n\n# In[14]:\n\n\ndf1 = df[['Flavanoids', 'Total phenols','Ash','Malic acid']]\ndf2 = df['Class']\npd.plotting.scatter_matrix(df1, diagonal = 'kde',alpha = 0.5,figsize = [10,10],c = df2)\n\nplt.show()\n\n\n# ## 3E\n\n# In[50]:\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nplt.style.use('classic')\nfig = plt.figure(figsize = [10,10])\nax = fig.add_subplot(projection = '3d')\ndfcolor = df['Class']\nx=df['Proanthocyanins']\ny=df['Flavanoids']\nz=df['Total phenols']\nax.scatter(x, y, z,c=dfcolor)\nax.set_xlabel(\"Proanthocyanins\",fontweight=\"bold\")\nax.set_ylabel(\"Flavanoids\",fontweight=\"bold\")\nax.set_zlabel(\"Total phenols\",fontweight=\"bold\")\nplt.title(\"Three Dimensional Scatter Plot\",fontweight=\"bold\")\nplt.show()\n\n\n# ## **3F**\n\n# In[42]:\n\n\nimport scipy.stats as stats\ndef qq_x(x,title):\n z = (x-np.mean(x))/np.std(x)\n stats.probplot(z, dist=\"norm\", plot=plt)\n plt.title(title)\n plt.show()\n \nqq_x(df['Ash'],\"Q-Q Plot for Ash column\")\nqq_x(df['OD280/OD315 of diluted wines'], \"Q-Q Plot for Diluted Wines Column\")","repo_name":"sudz286/ALDA_Fall21","sub_path":"HW1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5194337856","text":"\nimport re\n\n#simple class to read fasta formated file and lookup sequences by identifier\nclass FastaFile:\n def __init__(self, fname, reverseRe = '^\\>Reverse_'):\n self.fname = fname\n self._sequences = dict()\n self.reverseRe = reverseRe\n self.read()\n\n def read(self):\n inF = open(self.fname, 'rU')\n lines = inF.readlines()\n\n for i, line in enumerate(lines):\n #skip reverse matches\n if re.match(self.reverseRe, line):\n continue\n\n if re.match('^\\>[a-z]+\\|\\w+\\|', line):\n elems = line.split('|')\n assert(len(elems) >= 3)\n id = elems[1]\n self._sequences[id] = lines[i + 1].strip()\n\n def getSequence(self, id):\n if id not in self._sequences.keys():\n raise RuntimeError('{} not found in fasta file'.format(id))\n return self._sequences[id]\n\n","repo_name":"ajmaurais/getResidueNumbers","sub_path":"src/fastaFile.py","file_name":"fastaFile.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10969840486","text":"from ui import *\nfrom data_manager import *\n\n\ndef start_module():\n options=[\"Create student\",\"Read student by id\",\"Read all students\",\"Update student\",\"Delete student\",\"Activate / Deactivate Student\"]\n table=import_data(\"student.txt\")\n while True:\n print_menu(\"Students\",options,\"Main menu\")\n inputs=get_inputs(\"Please enter a number:\",\"\")\n option=inputs[0]\n if option==\"1\":\n create_student(table)\n elif option==\"2\":\n stud_id=get_inputs(\"Enter the student id: \",\"\")\n print_result(read_student(table,stud_id),\"\")\n elif option==\"3\":\n print_result(read_students(table),\"\")\n elif option==\"4\":\n print_result(update_student(table),\"\")\n elif option==\"5\":\n print_result(delete_student(table),\"\")\n elif option==\"6\":\n print_result(act_deact_stud(table),\"\")\n elif option==\"0\":\n break\n\n\ndef create_student(table):\n student_list = []\n student_id = generate_random(table)\n student_name = get_inputs(\"Student's name: \", \"\")\n student_age = get_inputs(\"Student's age: \", \"\")\n student_sub = get_inputs(\"Is the student Active(1) or Not(0): \", \"\")\n student_list.append(student_id)\n student_list.append(student_name)\n student_list.append(student_age)\n student_list.append(student_sub)\n \n\n export_data(student_list,\"student.txt\", mode= \"a\")\n return student_list\n\n\ndef read_student(table, id):\n showlist = []\n app_table = import_data(\"application.txt\")\n for lines in table:\n if lines[0] == id:\n showlist.append(lines[1])\n showlist.append(lines[2])\n for lines in app_table:\n if lines[2] == id:\n showlist.append(lines[0])\n return showlist\n\ndef read_students(table):\n showlist = []\n for lines in table:\n showlist.append(lines[0])\n showlist.append(lines[1])\n \n return showlist\n\ndef update_student(table):\n update_id=get_inputs(\"Type in ID of student: \",\"\")\n row_no=0\n ID=0\n for i in range(len(table)):\n if table[i][ID]==update_id:\n row_no=i\n new_name=get_inputs(\"Type in name: \",\"\")\n new_age=get_inputs(\"Type in age: \",\"\")\n table[i][1]=new_name\n table[i][2]=new_age\n return table\n\ndef act_deact_stud(table):\n stud_id = get_inputs('Enter the student id: ',\"\")\n row_no = 0\n ID = 0\n for i in range(len(table)):\n if table[i][ID]==stud_id:\n row_no=i\n newact=get_inputs(\"Active(1) or Deactivate(0): \",\"\")\n table[i][3]=newact\n return table\n\ndef delete_student(table):\n app_table=import_data(\"application.txt\")\n delete_stud=get_inputs(\"Enter the student id: \",\"\")\n row=0\n table2=[]\n for i in range(len(app_table)):\n if delete_stud in app_table[i]:\n return \"Student can't be deleted with existing application\"\n \n for i in range(len(table)):\n if table[i][0]==delete_stud:\n row=i\n for j in range(len(table)):\n if j!=row:\n table2.append(table[j])\n return table2\n\n\n","repo_name":"gergodanko/Job-Hunter-5th-tw","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73186991235","text":"import torch\nimport torchvision\n\n\nclass QuadInMem(torch.utils.data.Dataset):\n def __init__(self, X, Y, Z, YE, YZE, label, ids):\n self.X = X\n self.Y = Y\n self.Z = Z\n self.YE = YE\n self.YZE = YZE\n self.ids = ids\n self.E = label\n\n def __getitem__(self, index):\n return {\n 'id': self.ids[index],\n 'X': self.X[index], 'Y': self.Y[index], 'Z': self.Z[index],\n 'YE': self.YE[index], 'YZE': self.YZE[index],\n }\n\n def __len__(self):\n return len(self.X)\n\n\nclass QuadOnDisk(torch.utils.data.Dataset):\n def __init__(self, X_path, Y, Z, YE, YZE, label, ids):\n self.X_path = X_path\n self.Y = Y\n self.Z = Z\n self.YE = YE\n self.YZE = YZE\n self.ids = ids\n self.E = label\n self.aug_fn = torchvision.transforms.RandomCrop(224)\n\n def __getitem__(self, index):\n img = torchvision.io.read_image(self.X_path[index]).float() / 255.\n return {\n 'id': self.ids[index],\n 'X': self.aug_fn(img), 'Y': self.Y[index], 'Z': self.Z[index],\n 'YE': self.YE[index], 'YZE': self.YZE[index],\n }\n\n def __len__(self):\n return len(self.X_path)\n","repo_name":"mnhng/CoPA","sub_path":"core/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30848803230","text":"import matplotlib.pyplot as plt\n\nfrom mfglib.alg import MFOMO, FictitiousPlay, OnlineMirrorDescent, PriorDescent\nfrom mfglib.env import Environment\n\n\ndef plot_beach_bar_exploitability() -> None:\n beach_bar = Environment.beach_bar()\n online_mirror_descent = OnlineMirrorDescent()\n\n _, expls, _ = online_mirror_descent.solve(beach_bar)\n\n plt.figure(figsize=(8, 8))\n plt.semilogy(expls, label=\"Online Mirror Descent\")\n plt.legend(loc=0)\n plt.grid()\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Exploitability\")\n plt.title(\"Beach Bar Environment\")\n plt.show()\n\n\ndef plot_fictitious_play() -> None:\n plt.figure(figsize=(8, 8))\n\n rock_paper_scissors = Environment.rock_paper_scissors()\n for alpha in [0.1, 0.5, 0.75, None]:\n _, expls, _ = FictitiousPlay(alpha=alpha).solve(\n env_instance=rock_paper_scissors,\n max_iter=300,\n atol=None,\n rtol=None,\n )\n plt.semilogy(expls, label=f\"alpha: {alpha}\")\n\n plt.legend(loc=3)\n plt.grid()\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Exploitability\")\n plt.title(\"Rock Paper Scissors Environment - Fictitious Play Algorithm\")\n plt.show()\n\n\ndef plot_online_mirror_descent() -> None:\n plt.figure(figsize=(8, 8))\n\n rock_paper_scissors = Environment.rock_paper_scissors()\n for alpha in [0.01, 0.1, 1.0, 10]:\n _, expls, _ = OnlineMirrorDescent(alpha=alpha).solve(\n env_instance=rock_paper_scissors,\n max_iter=300,\n atol=None,\n rtol=None,\n )\n plt.semilogy(expls, label=f\"alpha: {alpha}\")\n\n plt.legend(loc=3)\n plt.grid()\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Exploitability\")\n plt.title(\"Rock Paper Scissors Environment - Online Mirror Descent Algorithm\")\n plt.show()\n\n\ndef plot_prior_descent() -> None:\n plt.figure(figsize=(8, 8))\n\n rock_paper_scissors = Environment.rock_paper_scissors()\n\n eta_values = [0.01, 0.1, 1.0, 10]\n n_inner_values = [None, 100, 20, 5]\n\n for eta, n_inner in zip(eta_values, n_inner_values):\n _, expls, _ = PriorDescent(eta=eta, n_inner=n_inner).solve(\n env_instance=rock_paper_scissors,\n max_iter=300,\n atol=None,\n rtol=None,\n )\n plt.semilogy(expls, label=f\"eta: {eta}, n_inner: {n_inner}\")\n\n plt.legend(loc=3)\n plt.grid()\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Exploitability\")\n plt.title(\"Rock Paper Scissors Environment - Prior Descent Algorithm\")\n plt.show()\n\n\ndef plot_mf_omo() -> None:\n plt.figure(figsize=(8, 8))\n\n rock_paper_scissors = Environment.rock_paper_scissors()\n\n lrs = [0.01, 0.1, 1, 10]\n\n for lr in lrs:\n opt = {\"name\": \"Adam\", \"config\": {\"lr\": lr}}\n _, expls, _ = MFOMO(optimizer=opt).solve(\n env_instance=rock_paper_scissors,\n max_iter=300,\n atol=None,\n rtol=None,\n )\n plt.semilogy(expls, label=f\"Adam optimizer - lr: {lr}\")\n\n plt.legend(loc=3)\n plt.grid()\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Exploitability\")\n plt.title(\"Rock Paper Scissors Environment - MFOMO Algorithm\")\n plt.show()\n\n\ndef plot_online_mirror_descent_tuning() -> None:\n online_mirror_descent = OnlineMirrorDescent()\n\n # Run the default algorithm\n _, expls_default, _ = online_mirror_descent.solve(\n env_instance=Environment.building_evacuation(\n T=5, n_floor=10, floor_l=5, floor_w=5\n ),\n max_iter=500,\n atol=None,\n rtol=None,\n )\n\n # Tune the algorithm\n online_mirror_descent_tuned = online_mirror_descent.tune(\n env_suite=[\n Environment.building_evacuation(T=5, n_floor=10, floor_l=5, floor_w=5)\n ],\n max_iter=500,\n atol=0,\n rtol=1e-2,\n metric=\"shifted_geo_mean\",\n n_trials=20,\n timeout=60,\n )\n\n # Run the tuned algorithm\n _, expls_tuned, _ = online_mirror_descent_tuned.solve(\n env_instance=Environment.building_evacuation(\n T=5, n_floor=10, floor_l=5, floor_w=5\n ),\n max_iter=500,\n atol=None,\n rtol=None,\n )\n\n plt.figure(figsize=(8, 8))\n\n plt.semilogy(expls_default, label=\"Default\")\n plt.semilogy(expls_tuned, label=\"Tuned\")\n\n plt.legend(loc=3)\n plt.grid()\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Exploitability\")\n plt.title(\"Building Evacuation Environment - Online Mirror Descent Algorithm\")\n plt.show()\n","repo_name":"radar-research-lab/MFGLib","sub_path":"docs/source/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"71667379714","text":"\"\"\"\nUsage: phs.obs.production.isdc.worker [options]\n\nOptions:\n --java_path=PATH [default: /home/guest/relleums/java8/jdk1.8.0_111]\n --fact_tools_jar_path=PATH [default: /home/guest/relleums/fact_photon_stream/fact-tools/target/fact-tools-0.18.1.jar]\n --fact_tools_xml_path=PATH [default: /home/guest/relleums/fact_photon_stream/photon_stream_production/photon_stream_production/resources/observations_pass4.xml]\n --raw_path=PATH [default: /fact/raw/2017/09/01/20170901_139.fits.fz]\n --drs_path=PATH [default: /fact/raw/2017/09/01/20170901_129.drs.fits.gz]\n --aux_dir=PATH [default: /fact/aux/2017/09/01/]\n --out_dir=PATH [default: /home/guest/relleums/qsub]\n --out_basename=PATH [default: 20170901_139]\n --tmp_dir_basename=NAME [default: phs_obs_]\n\"\"\"\nimport docopt\nimport os\nfrom glob import glob\nfrom os.path import join\nimport subprocess\nimport tempfile\nimport shutil\n\n\ndef run(\n java_path,\n fact_tools_jar_path,\n fact_tools_xml_path,\n raw_path,\n drs_path,\n aux_dir,\n out_dir,\n out_basename,\n tmp_dir_basename,\n):\n with tempfile.TemporaryDirectory(prefix=tmp_dir_basename) as tmp:\n\n my_env = os.environ.copy()\n my_env[\"PATH\"] = java_path + my_env[\"PATH\"]\n\n subprocess.call([\n 'java',\n '-XX:MaxHeapSize=1024m',\n '-XX:InitialHeapSize=512m',\n '-XX:CompressedClassSpaceSize=64m',\n '-XX:MaxMetaspaceSize=128m',\n '-XX:+UseConcMarkSweepGC',\n '-XX:+UseParNewGC',\n '-jar', fact_tools_jar_path, fact_tools_xml_path,\n '-Dinfile=file:'+raw_path,\n '-Ddrsfile=file:'+drs_path,\n '-Daux_dir=file:'+aux_dir,\n '-Dout_path_basename=file:'+join(tmp, out_basename),\n ])\n\n for intermediate_file_path in glob(join(tmp, '*')):\n if os.path.isfile(intermediate_file_path):\n os.makedirs(out_dir, exist_ok=True, mode=0o755)\n shutil.copy(intermediate_file_path, out_dir)\n\n\ndef main():\n try:\n arguments = docopt.docopt(__doc__)\n\n run(\n java_path=arguments['--java_path'],\n fact_tools_jar_path=arguments['--fact_tools_jar_path'],\n fact_tools_xml_path=arguments['--fact_tools_xml_path'],\n raw_path=arguments['--raw_path'],\n drs_path=arguments['--drs_path'],\n aux_dir=arguments['--aux_dir'],\n out_dir=arguments['--out_dir'],\n out_basename=arguments['--out_basename'],\n tmp_dir_basename=arguments['--tmp_dir_basename'],\n )\n\n except docopt.DocoptExit as e:\n print(e)\n\nif __name__ == '__main__':\n main()\n","repo_name":"fact-project/photon_stream_production","sub_path":"photon_stream_production/isdc/worker_node_produce.py","file_name":"worker_node_produce.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27889232889","text":"import os\nos.system('cls')\n\n#CICLO MIENTRAS CONTROLADO POR PREGUNTA\n\ncontinuar=True\ncont=1\nwhile (continuar):\n input(\"Nombre del empleado: \")\n seguir=input(\"continuar con otro empleado S/N ? \").upper()\n if (seguir!=\"S\"):\n continuar=False\n \n if (cont==3):\n break\n \n cont +=1 \nprint(\"... salio del ciclo!!\")\n","repo_name":"Vickykathe/ejerciciosPython","sub_path":"13 mientras-2.py","file_name":"13 mientras-2.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29047210635","text":"\"\"\"Support for various GEOS geometry operations\n\"\"\"\n\nfrom ctypes import byref, c_void_p\n\nfrom shapely.geos import lgeos\nfrom shapely.geometry.base import geom_factory, BaseGeometry\nfrom shapely.geometry import asShape, asLineString, asMultiLineString\n\n__all__= ['operator', 'polygonize', 'linemerge', 'cascaded_union',\n 'unary_union']\n\n\nclass CollectionOperator(object):\n\n def shapeup(self, ob):\n if isinstance(ob, BaseGeometry):\n return ob\n else:\n try:\n return asShape(ob)\n except ValueError:\n return asLineString(ob)\n\n def polygonize(self, lines):\n \"\"\"Creates polygons from a source of lines\n \n The source may be a MultiLineString, a sequence of LineString objects,\n or a sequence of objects than can be adapted to LineStrings.\n \"\"\"\n source = getattr(lines, 'geoms', None) or lines\n obs = [self.shapeup(l) for l in source]\n geom_array_type = c_void_p * len(obs)\n geom_array = geom_array_type()\n for i, line in enumerate(obs):\n geom_array[i] = line._geom\n product = lgeos.GEOSPolygonize(byref(geom_array), len(obs))\n collection = geom_factory(product)\n for g in collection.geoms:\n clone = lgeos.GEOSGeom_clone(g._geom)\n g = geom_factory(clone)\n g._owned = False\n yield g\n\n def linemerge(self, lines): \n \"\"\"Merges all connected lines from a source\n \n The source may be a MultiLineString, a sequence of LineString objects,\n or a sequence of objects than can be adapted to LineStrings. Returns a\n LineString or MultiLineString when lines are not contiguous. \n \"\"\" \n source = None \n if hasattr(lines, 'type') and lines.type == 'MultiLineString': \n source = lines \n elif hasattr(lines, '__iter__'): \n try: \n source = asMultiLineString([ls.coords for ls in lines]) \n except AttributeError: \n source = asMultiLineString(lines) \n if source is None: \n raise ValueError(\"Cannot linemerge %s\" % lines)\n result = lgeos.GEOSLineMerge(source._geom) \n return geom_factory(result) \n\n def cascaded_union(self, geoms):\n \"\"\"Returns the union of a sequence of geometries\n \n This is the most efficient method of dissolving many polygons.\n \"\"\"\n L = len(geoms)\n subs = (c_void_p * L)()\n for i, g in enumerate(geoms):\n subs[i] = g._geom\n collection = lgeos.GEOSGeom_createCollection(6, subs, L)\n return geom_factory(lgeos.methods['cascaded_union'](collection))\n\n def unary_union(self, geoms):\n \"\"\"Returns the union of a sequence of geometries\n\n This method replaces :meth:`cascaded_union` as the\n prefered method for dissolving many polygons.\n\n \"\"\"\n L = len(geoms)\n subs = (c_void_p * L)()\n for i, g in enumerate(geoms):\n subs[i] = g._geom\n collection = lgeos.GEOSGeom_createCollection(6, subs, L)\n return geom_factory(lgeos.methods['unary_union'](collection))\n\noperator = CollectionOperator()\npolygonize = operator.polygonize\nlinemerge = operator.linemerge\ncascaded_union = operator.cascaded_union\nunary_union = operator.unary_union\n\nclass ValidateOp(object):\n def __call__(self, this):\n return lgeos.GEOSisValidReason(this._geom)\n\nvalidate = ValidateOp()\n\n","repo_name":"luckyjd/lms_edx","sub_path":"edx-ficus.3-3/apps/edx/venvs/edxapp/lib/python2.7/site-packages/shapely/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38858636062","text":"wished_profit = float(input())\namount_due = 0\ncocktail_price = 0\n\n\nwhile amount_due < wished_profit:\n\tcocktail_name = input()\n\tif cocktail_name == \"Party!\":\n\t\tbreak\n\tcocktails_order = int(input())\n\tcocktail_price = cocktails_order * len(cocktail_name)\n\tif cocktail_price % 2 != 0:\n\t\tcocktail_price *= 0.75\n\tamount_due += cocktail_price\n\nif wished_profit > amount_due:\n\tprint(f\"We need {wished_profit - amount_due:.2f} leva more.\")\nelse:\n\tprint(f\"Target acquired.\")\nprint(f\"Club income - {amount_due:.2f} leva.\")\n","repo_name":"Dimitrov-S-Dev-Python/SoftUni_Python_Basics","sub_path":"Exams_Tasks/PB_6_07_2019/Club.py","file_name":"Club.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7726865128","text":"import asyncio\nfrom typing import List, Tuple\nimport threading\nfrom mavsdk import System\nfrom mavsdk.mission import MissionItem, MissionPlan\nfrom mast_calculations import get_closest_masts\nfrom Video import Video\nimport os\nimport argparse\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nfrom keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\nfrom keras.utils.image_utils import img_to_array\nfrom loguru import logger\nimport sys\nimport time\nfrom os.path import dirname, realpath\n\n# Image stuff\nfrom PIL import Image\n\nMAST_HEIGHT = 5 # Relative height of old mast to starting position of drone (meters)\nACCEPTANCE_RADIUS = 5 # How close should the drone be to the mast before the mission is a success (meters)\n\nvideo = Video()\nmodel = VGG16()\ntime_start = time.time()\n\n# Initialize thread-safe variables\nobstacle_avoidance_triggered = threading.Event()\nhas_found_mast = threading.Event()\nis_returning = threading.Event()\nmast_height_altitude_reached = threading.Event()\n\n\nasync def run(entry_point: Tuple[float, float]):\n drone = System()\n\n closest_masts = get_closest_masts(\n entry_point\n ) # Should be received from the base-station in real setup\n await drone.connect(system_address=\"udp://:14540\")\n\n logger.info(\"Waiting for drone to connect...\")\n async for state in drone.core.connection_state():\n if state.is_connected:\n logger.info(f\"Connected to drone!\")\n break\n\n logger.info(\"Waiting for drone to have a global position estimate...\")\n async for health in drone.telemetry.health():\n if health.is_global_position_ok and health.is_home_position_ok:\n logger.info(\"Global position estimate OK\")\n break\n\n # Configure the drone parameters\n await drone.param.set_param_float(\"MIS_DIST_1WP\", 5000)\n await drone.param.set_param_float(\"MIS_DIST_WPS\", 5000)\n await drone.mission.set_return_to_launch_after_mission(False)\n\n logger.info(\"Arming\")\n await drone.action.arm()\n\n # Start parallel tasks\n monitor_distance_task = asyncio.ensure_future(monitor_distance(drone))\n do_mast_recognition_task = asyncio.ensure_future(do_mast_recognition())\n monitor_altitude_task = asyncio.ensure_future(monitor_altitude(drone))\n\n running_tasks = [\n monitor_distance_task,\n do_mast_recognition_task,\n monitor_altitude_task,\n ]\n\n mission_items = []\n for (mast, _) in closest_masts:\n mission_items.append(\n MissionItem(\n latitude_deg=float(mast[\"wgs84koordinat\"][\"bredde\"]),\n longitude_deg=float(mast[\"wgs84koordinat\"][\"laengde\"]),\n relative_altitude_m=MAST_HEIGHT,\n speed_m_s=3,\n is_fly_through=False,\n gimbal_pitch_deg=45,\n gimbal_yaw_deg=float(\"nan\"),\n camera_action=MissionItem.CameraAction.NONE,\n loiter_time_s=float(\"nan\"),\n camera_photo_interval_s=float(\"nan\"),\n acceptance_radius_m=ACCEPTANCE_RADIUS,\n yaw_deg=float(\"nan\"),\n camera_photo_distance_m=float(\"nan\"),\n )\n )\n\n i = 0\n while not has_found_mast.is_set() and i < 3 and i < len(mission_items):\n\n mission_plan = MissionPlan([mission_items[i]])\n\n logger.info(f\"Uploading mission {i}\")\n await drone.mission.upload_mission(mission_plan)\n\n await asyncio.sleep(1)\n\n logger.info(\"Starting mission\")\n await drone.mission.start_mission()\n\n logger.info(\n f\"Now checking mast {closest_masts[i][0]['unik_station_navn']} at {closest_masts[i][0]['wgs84koordinat']['laengde']}, {closest_masts[i][0]['wgs84koordinat']['bredde']}\"\n )\n\n while not has_found_mast.is_set() and not obstacle_avoidance_triggered.is_set():\n await asyncio.sleep(1)\n\n if obstacle_avoidance_triggered.is_set():\n logger.info(\"Clearing current mission\")\n await drone.mission.pause_mission()\n await drone.mission.clear_mission()\n\n logger.info(\"Returning to base\")\n is_returning.set()\n obstacle_avoidance_triggered.clear()\n await drone.mission.clear_mission()\n await drone.mission.upload_mission(\n MissionPlan(\n [\n MissionItem(\n float(entry_point[1]),\n float(entry_point[0]),\n MAST_HEIGHT,\n 3,\n False,\n float(\"nan\"),\n float(\"nan\"),\n MissionItem.CameraAction.NONE,\n float(\"nan\"),\n float(\"nan\"),\n float(\"nan\"),\n float(\"nan\"),\n float(\"nan\"),\n )\n ]\n )\n )\n await asyncio.sleep(1)\n await drone.mission.start_mission()\n is_finished = False\n while not is_finished:\n is_finished = await drone.mission.is_mission_finished()\n await asyncio.sleep(1)\n i += 1\n logger.info(\n f\"Mission finished. Line of sight confirmed to mast: {has_found_mast.is_set()}\"\n )\n is_returning.clear()\n await drone.mission.clear_mission()\n logger.info(\"Landing\")\n await drone.action.return_to_launch()\n\n\nasync def monitor_distance(drone: System):\n logger.debug(\"Distance monitoring enabled\")\n async for distance in drone.telemetry.distance_sensor():\n logger.debug(f\"Distance from sensor: {distance}\")\n if (\n # For some reason, the documentation is in meters,\n # but the data we get is certainly not meters.\n # Therefore, 400 meters here does not equal 400 meters in Gazebo.\n distance.current_distance_m < 400\n and mast_height_altitude_reached.is_set()\n and not is_returning.set()\n and not await drone.mission.is_mission_finished()\n ):\n obstacle_avoidance_triggered.set()\n logger.info(f\"Obstacle identified, line of sight cannot be confirmed.\")\n logger.debug(\"Distance monitoring disabled\")\n\n\n@logger.catch\nasync def monitor_altitude(drone: System):\n logger.debug(\"Altitude monitoring enabled\")\n async for pos in drone.telemetry.position():\n logger.debug(f\"Drone altitude: {pos.relative_altitude_m}\")\n # Add 2 meters to account for sensor data not always being percise.\n if pos.relative_altitude_m + 2 > MAST_HEIGHT:\n mast_height_altitude_reached.set()\n logger.info(\"Mission altitude reached.\")\n return\n\n\nasync def do_mast_recognition():\n i = 0 # May be removed, as only 1 image is needded each time.\n logger.debug(\"Mast recognition called\")\n logger.debug(\"Has Found Mast: \" + str(has_found_mast.is_set()))\n while not has_found_mast.is_set():\n if not is_returning.is_set() and mast_height_altitude_reached.is_set():\n # Capture image every 5 seconds to analyze\n logger.info(\"Taking image\")\n img = None\n while img is None:\n # Wait for the next frame\n if not video.frame_available():\n continue\n\n img = video.frame()\n im = Image.fromarray(img[:, :, ::-1])\n im = im.resize((224, 224))\n im.save(f\"images/second_{i}.jpeg\")\n if image_contains_mast(img_to_array(im)):\n has_found_mast.set()\n logger.info(\"Mast found! Returning to base.\")\n logger.debug(\"Mast-check completed.\")\n i += 1\n else:\n logger.debug(\"Mast recognition disabled while returning or taking off.\")\n await asyncio.sleep(5)\n logger.debug(\"Exiting mast recognition task\")\n\n\n@logger.catch\ndef image_contains_mast(im):\n if (\n time.time() - time_start\n ) > 200: # Cheat, and show the camera a picture of a balloon/mast\n im = img_to_array(Image.open(\"../data/balloon.jpg\").resize((224, 224)))\n logger.debug(\"image_contains_mast called\")\n image = im.reshape((1, im.shape[0], im.shape[1], im.shape[2]))\n image = preprocess_input(image)\n pred = model.predict(image)\n label = decode_predictions(pred)\n label = label[0][0]\n logger.debug(\n \"image_contains_mast returned {}, confidence: {}\",\n label[1] == \"balloon\" and label[2] >= 0.90,\n label[2],\n )\n return label[1] == \"balloon\" and label[2] >= 0.90\n\n\nasync def observe_is_in_air(drone, running_tasks):\n \"\"\"Monitors whether the drone is flying or not and\n returns after landing\"\"\"\n\n was_in_air = False\n\n async for is_in_air in drone.telemetry.in_air():\n if is_in_air:\n was_in_air = is_in_air\n\n if was_in_air and not is_in_air:\n for task in running_tasks:\n task.cancel()\n try:\n await task\n except asyncio.CancelledError:\n pass\n await asyncio.get_event_loop().shutdown_asyncgens()\n logger.info(\"Exiting\")\n return\n\n\ndef start(lat: float, lon: float):\n loop = asyncio.get_event_loop().run_until_complete(run((lon, lat)))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Automatic Drone Mast LoS confirmation tool. Default location is somewhere in Langeskov (i.e., Lars Tyndskids mark)\"\n )\n parser.add_argument(\n \"--lon\",\n metavar=\"LONGITUDE\",\n action=\"store\",\n default=10.573138,\n type=float,\n help=f\"The longitude coordinate, default={10.5731380}\",\n )\n parser.add_argument(\n \"--lat\",\n metavar=\"LATITUDE\",\n action=\"store\",\n default=55.369671,\n type=float,\n help=f\"The latitude coordinate, default={55.369671}\",\n )\n config = parser.parse_args()\n logger.remove(0)\n logger.add(\n dirname(realpath(__file__)) + \"/../logs/{time}.log\",\n format=\"{time}\\t| {level}\\t| {file}:{function}:{line} \\t- {message}>\",\n level=\"DEBUG\",\n enqueue=True,\n )\n logger.add(\n sys.stdout,\n colorize=True,\n format=\"{time}\\t| {level}\\t| {file}:{function}:{line} \\t- {message}\",\n level=\"INFO\",\n enqueue=True,\n )\n\n start(config.lat, config.lon)\n","repo_name":"Maje419/Drones","sub_path":"app/run_mission.py","file_name":"run_mission.py","file_ext":"py","file_size_in_byte":10469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71476970114","text":"a = [\"Juan\", \"Carlos\", \"Ernesto\", \"Henry\"]\n\n# Imprimir el valor de una posición especifica de la lista A\nprint(a[0])\n\n# Agregar un valor final a la lista A\na.append(\"Marcos\")\n\n# Insertar un valor en la posición indicada de la lista A\na.insert(4, \"Nicole\")\n\n# Agregar valores a la lista A\na.extend([\"John\", \"Raul\", \"Benjamin\",])\n\n# Remover valor de la lista A\na.remove(\"Juan\")\n\n# Ver desde la posición indicada hasta el final de la lista A\nb = a[2:]\nc = a[1:5]\n\nprint(a)\nprint(b)\nprint(c)\n\n","repo_name":"kevinem986/Curso_Python_Udemy","sub_path":"InitPython/seccion2/lista01.py","file_name":"lista01.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11249960821","text":"# x = 0\n\n# while x < 100:\n# print(x)\n# x = x + 1\n# print('The End')\n\n# x = 1\n\n# while x <= 100:\n# print(x)\n# x += 1\n# print('The End')\n\n\n# x = 1\n\n# while x <= 100:\n# if (x %2 ==0):\n# print(x)\n# x += 1\n# print('The End')\n\n\n# x = 1\n\n# while x <= 100:\n# if (x %2 ==0):\n# print(f'sayı çift: {x}')\n# else:\n# print(f'sayı tek: {x}') \n# x += 1\n# print('The End')\n\nname = ''\n\nwhile not name.strip():\n name = input('İsminizi Giriniz: ')\nprint(f'Merhaba, {name}')","repo_name":"code-tamer/Library","sub_path":"Business/KARIYER/PYTHON/Python_Temelleri/while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32431061234","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 18 22:49:03 2017\n\n@author: Yuhong\n\"\"\"\n\nbread_price = [[0.5,5],[0.6,5.5],[0.8,6],[1.1,6.8],[1.4,7]]\n \ndef step_gradient(b_current, w_current, points, learningRate):\n b_gradient = 0\n w_gradient = 0\n for i in range(len(points)):\n x = points[i][0]\n y = points[i][1]\n b_gradient += -1.0 * (y - ((w_current * x) + b_current))\n w_gradient += -1.0 * x * (y - ((w_current * x) + b_current))\n new_b = b_current - (learningRate * b_gradient)\n new_m = w_current - (learningRate * w_gradient)\n return [new_b, new_m]\n \ndef gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_iterations):\n b = starting_b\n m = starting_m\n for i in range(num_iterations):\n b, m = step_gradient(b, m, points, learning_rate)\n return [b, m]\n \ndef predict(b, m, wheat):\n price = m * wheat + b\n return price\n\nif __name__ == '__main__': \n b1, m1 = gradient_descent_runner(bread_price, 1, 1, 0.01, 100)\n \n price = predict(b1, m1, 0.9)\n print (\"price = {0:.2f}\".format(price))","repo_name":"yhily/deep-learning-resource","sub_path":"srcs/chap05/bread-price.py","file_name":"bread-price.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"5134084421","text":"# SPDX-License-Identifier: MIT\n# © 2020-2022 ETH Zurich and other contributors, see AUTHORS.txt for details\n\nfrom cmath import inf\nimport os\nfrom os.path import join\nimport yaml\nimport argparse\nimport numpy as np\nfrom options.test_audio2headpose_options import TestOptions\nfrom datasets import create_dataset\nfrom models import create_model\nfrom util.cfgnode import CfgNode\nimport cv2\nimport librosa\nimport soundfile as sf\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport subprocess\nfrom pathlib import Path\nimport torch\nfrom funcs import utils\nimport sys\nsys.path.append('../preprocessing/')\nfrom face_tracking.FLAME.FLAME import FLAME\nfrom face_tracking.FLAME.config import cfg as FLAME_cfg\nfrom face_tracking.FLAME.lbs import vertices2landmarks\nfrom face_tracking.render_3dmm import Render_FLAME\nfrom face_tracking.util import *\nfrom finetune import finetune\nfrom PIL import Image\n\n\ndef write_video_with_audio(audio_path, output_path, prefix='pred_', h=512, w=512, fps=25):\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n # fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n video_tmp_path = join(save_root, 'tmp.avi')\n out = cv2.VideoWriter(video_tmp_path, fourcc, fps, (w, h))\n for j in tqdm(range(nframe), position=0, desc='writing video'):\n img = cv2.imread(join(save_root, prefix + str(j+1) + '.jpg'))\n out.write(img)\n out.release()\n cmd = 'ffmpeg -y -i \"' + video_tmp_path + '\" -i \"' + \\\n audio_path + '\" -codec copy -shortest \"' + output_path + '\"'\n subprocess.call(cmd, shell=True)\n\n os.remove(video_tmp_path) # remove the template video\n\n\n\nif __name__ == '__main__':\n\n # load args \n parser = argparse.ArgumentParser()\n parser.add_argument('--dataroot', required=True)\n parser.add_argument('--dataset_names', required=True)\n parser.add_argument('--target_name', required=True)\n parser.add_argument('--checkpoint_dir', required=True)\n parser.add_argument('--out_dir', required=True)\n\n inopt = parser.parse_args()\n\n # Load default options\n Test_parser = TestOptions()\n opt = Test_parser.parse() # get training options\n\n # Overwrite with config\n opt.phase = ''\n opt.dataset_mode = 'audio' # for testing\n opt.dataroot = os.path.join(inopt.dataroot, 'audio')\n opt.dataset_names = inopt.dataset_names\n opt.FPS = 25\n opt.serial_batches = True\n opt.train_test_split = False\n\n # save to the disk\n Test_parser.print_options(opt)\n\n # Set device\n device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if len(\n opt.gpu_ids) > 0 else torch.device('cpu')\n\n # Load Model\n print('---------- Loading Model: {} -------------'.format(opt.task))\n checkpoint_path = os.path.join(inopt.checkpoint_dir, inopt.target_name, 'latest_Audio2Headpose.pkl')\n print(checkpoint_path)\n if not os.path.isfile(checkpoint_path):\n print('No fine-tuned checkpoint for headposes found..')\n finetune(name=inopt.target_name, \n dataroot=inopt.dataroot, \n dataset_names=inopt.target_name, \n target_checkpoints=inopt.checkpoint_dir, \n checkpoint_path=os.path.join(inopt.checkpoint_dir, 'Audio2Headpose_TED_checkpoint.pkl'),\n fps=25)\n\n Audio2Headpose = create_model(opt)\n Audio2Headpose.load_checkpoint(checkpoint_path)\n Audio2Headpose.eval()\n\n # Load data\n dataset = create_dataset(opt)\n fit_data_path = os.path.join(inopt.dataroot, 'video', inopt.target_name, 'track_params.pt')\n fit_data = torch.load(fit_data_path)\n\n stop_at = inf\n\n for iter, file_index in enumerate(dataset.dataset.valid_clips):\n\n if iter >= stop_at:\n break\n\n test_name = dataset.dataset.clip_names[dataset.dataset.valid_clips[file_index]]\n #____________________________________________________#\n print('Generating movement for video: ', test_name)\n\n # Get features\n audio_features = dataset.dataset.audio_features[file_index]\n # audio_expr = torch.from_numpy(np.stack([np.load(os.path.join(inopt.out_dir,'audio_expr', ae)).flatten() for ae in os.listdir(os.path.join(inopt.out_dir,'audio_expr'))], axis=0))\n # audio_expr = torch.from_numpy(np.stack([np.load(os.path.join(inopt.dataroot, 'video', inopt.target_name,'deca_expr', ae)).flatten() for ae in sorted(os.listdir(os.path.join(inopt.dataroot, 'video', inopt.target_name,'deca_expr')))], axis=0))\n \n # Audio2Headpose\n print('Headpose inference...')\n\n # Set history headposes as first tracked headposes\n init_rot = fit_data['euler'][0].numpy()\n init_trans = fit_data['trans'][0].numpy() - fit_data['trans'].mean(axis=0).numpy()\n pre_headpose = np.concatenate([init_rot, init_trans], axis=0)\n pre_headpose = np.concatenate([pre_headpose, np.zeros_like(pre_headpose)], axis=0) # headpose and velocity: velocity is zero: still head.\n\n # Set history headposes as zeros\n # pre_headpose = np.zeros(opt.A2H_wavenet_input_channels, np.float32)\n\n print('Initial Headpose for prediction: \\n', pre_headpose)\n\n # Generate headposes\n pred_Head = Audio2Headpose.generate_sequences(audio_features, pre_headpose, fill_zero=True, sigma_scale=0.3, opt=opt)\n\n # Build FLAME and Renderer\n h = fit_data['h']\n w = fit_data['w']\n focal = fit_data['focal']\n id_para = fit_data['id']\n # expr_para = audio_expr\n # nframe = min(expr_para.size()[0], pred_Head.shape[0])\n\n nframe = pred_Head.shape[0]\n\n\n cxy = torch.tensor((w / 2.0, h / 2.0), dtype=torch.float).cpu()\n\n model_3dmm = FLAME(FLAME_cfg.model)\n renderer = Render_FLAME(model_3dmm.faces_tensor, focal, h, w, 1, device)\n\n # Smooth predicted headpose\n Head_smooth_sigma = [5, 10]\n pred_headpose = utils.headpose_smooth(pred_Head[:, :6], Head_smooth_sigma, method='gaussian').astype(np.float32)\n # Head_smooth_sigma = [10, 25]\n # pred_headpose = utils.headpose_smooth(pred_Head[:, :6], Head_smooth_sigma , method='median').astype(np.float32)\n\n # Postprocessing\n if fit_data:\n \n og_rot = fit_data['euler'].numpy()\n og_trans = fit_data['trans'].numpy()\n\n mean_translation = og_trans.mean(axis=0)\n\n pred_headpose[:, 3:] += mean_translation\n # pred_headpose[:, 0] += 180\n\n # Make images\n save_root = inopt.out_dir\n os.makedirs(os.path.join(save_root, 'render'), exist_ok=True)\n os.makedirs(os.path.join(save_root, 'landmarks'), exist_ok=True)\n \n np.save(os.path.join(save_root, 'headposes.npy'), pred_headpose)\n\n deca_expr_path = os.path.join(inopt.dataroot, 'video', inopt.target_name,'deca_expr')\n audio_expr_path = os.path.join(inopt.out_dir, 'audio_expr')\n\n for i in tqdm(range(nframe), desc='rendering: '):\n R = torch.from_numpy(pred_headpose[i, 0:3]).unsqueeze(\n 0).to(device).double()\n t = torch.from_numpy(pred_headpose[i, 3:6]).unsqueeze(\n 0).to(device).double()\n \n # Zero translation\n # t = torch.tensor((0, 0, -5)).unsqueeze(\n # 0).to(device).double()\n\n id = id_para.to(device).double()\n expr = torch.from_numpy(np.load(os.path.join(audio_expr_path, '%05d.npy' % i))).to(device).double()\n\n '''\n # Original Rotation and Translation used for debug\n if 0:\n og_R = torch.from_numpy(og_rot[i]).unsqueeze(0).to(device).double()\n og_t = torch.from_numpy(og_trans[i]).unsqueeze(\n 0).to(device).double()\n print(\n f'OG Rotation euler: \\n{og_R.data}, \\nOG trans: \\n{og_t.data}')\n print(\n f'\\nPred Rotation euler: \\n{R.data},\\nPred trans: \\n{t.data}')\n\n og_rott_geo = model_3dmm.forward_geo(id, expr, og_R, og_t)\n\n og_landmarks3d = model_3dmm.get_3dlandmarks(\n id, expr, og_R, og_t, focal, cxy).cpu()\n og_proj_geo = proj_pts(og_landmarks3d, focal, cxy)\n # Porj points\n colormap_blue = plt.cm.Blues\n for num, lin in enumerate(np.linspace(0, 0.9, len(og_proj_geo[0, :, 0]))):\n plt.scatter(og_proj_geo[0, num, 0].detach().cpu(),\n og_proj_geo[0, num, 1].detach().cpu(),\n color=colormap_blue(lin),\n s=10)\n \n '''\n\n rott_geo = model_3dmm.forward_geo(id, expr, R, t)\n landmarks3d = model_3dmm.get_3dlandmarks_forehead(id, expr, R, t, focal, cxy).cpu()\n proj_geo = proj_pts(landmarks3d, focal, cxy)\n np.savetxt(os.path.join(save_root, 'landmarks', '%05d.lms' % i), proj_geo[0].detach().cpu().numpy())\n render_imgs = renderer(rott_geo.float(), model_3dmm.faces_tensor)\n img_arr = render_imgs[0, :, :, :3].cpu().numpy()\n img_arr *= 255\n img_arr = img_arr.astype(np.uint8)\n im = Image.fromarray(img_arr)\n im.save(os.path.join(save_root, 'render','%05d.png' % i))\n\n print('Finish!')\n","repo_name":"mediatechnologycenter/AvatarForge","sub_path":"motion-gan-pipeline/motion-generation/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"18850480494","text":"\nimport tensorflow as tf\n\n\nclass MultiClassTruePositives(tf.keras.metrics.Metric):\n\n def __init__(self, name='multiclass_recall', **kwargs):\n super(MultiClassTruePositives, self).__init__(name=name, **kwargs)\n self.true_positives = self.add_weight(name='true_positives',\n initializer='zeros')\n\n def update_state(self, y_true, y_pred, num_classes, positive_inds,\n sample_weight=None):\n\n y_true = tf.cast(y_true, tf.int32)\n y_pred = tf.cast(y_pred, tf.int32)\n\n for i in range(num_classes):\n if i in positive_inds:\n # Calculate true positives.\n ix = tf.cast(i, tf.int32)\n true = tf.equal(y_true, ix)\n pred = tf.equal(y_pred, ix)\n tp_bool = tf.math.logical_and(true, pred)\n if sample_weight is not None:\n tp_bool = tf.multiply(tf.cast(tp_bool, tf.int32),\n sample_weight)\n tp_bool = tf.cast(tp_bool, tf.float32)\n self.true_positives.assign_add(tf.reduce_sum(tp_bool))\n\n def result(self):\n return self.true_positives\n\n def reset_states(self):\n # Used to reset metric before the start of an epoch.\n self.true_positives.assign(0.)\n\n\nclass MultiClassFalsePositives(tf.keras.metrics.Metric):\n\n def __init__(self, name='multiclass_recall', **kwargs):\n super(MultiClassFalsePositives, self).__init__(name=name, **kwargs)\n self.false_positives = self.add_weight(name='true_positives',\n initializer='zeros')\n\n def update_state(self, y_true, y_pred, num_classes, positive_inds,\n sample_weight=None):\n\n y_true = tf.cast(y_true, tf.int32)\n y_pred = tf.cast(y_pred, tf.int32)\n\n for i in range(num_classes):\n if i not in positive_inds:\n # Calculate false positives.\n ix = tf.cast(i, tf.int32)\n true = tf.equal(y_true, ix)\n pred = tf.not_equal(y_pred, ix)\n fp_bool = tf.math.logical_and(true, pred)\n if sample_weight is not None:\n fp_bool = tf.multiply(tf.cast(fp_bool, tf.int32),\n sample_weight)\n fp_bool = tf.cast(fp_bool, tf.float32)\n self.false_positives.assign_add(tf.reduce_sum(fp_bool))\n\n def result(self):\n return self.false_positives\n\n def reset_states(self):\n # Used to reset metric before the start of an epoch.\n self.false_positives.assign(0.)\n\n\nclass MultiClassFalseNegatives(tf.keras.metrics.Metric):\n\n def __init__(self, name='multiclass_recall', **kwargs):\n super(MultiClassFalseNegatives, self).__init__(name=name, **kwargs)\n self.false_negatives = self.add_weight(name='recall',\n initializer='zeros')\n\n def update_state(self, y_true, y_pred, num_classes, positive_inds,\n sample_weight=None):\n\n y_true = tf.cast(y_true, tf.int32)\n y_pred = tf.cast(y_pred, tf.int32)\n\n for i in range(num_classes):\n if i not in positive_inds:\n # Calculate false negatives.\n ix = tf.cast(i, tf.int32)\n false = tf.not_equal(y_true, ix)\n pred = tf.equal(y_pred, ix)\n fn_bool = tf.math.logical_and(false, pred)\n if sample_weight is not None:\n fn_bool = tf.multiply(tf.cast(fn_bool, tf.int32),\n sample_weight)\n fn_bool = tf.cast(fn_bool, tf.float32)\n self.false_negatives.assign_add(tf.reduce_sum(fn_bool))\n\n def result(self):\n return self.false_negatives\n\n def reset_states(self):\n # Used to reset metric before the start of an epoch.\n self.false_negatives.assign(0.)\n\n\ndef precision_fn(true_positives, false_positives):\n precision = tf.math.divide(true_positives, true_positives + false_positives)\n\n if tf.math.is_nan(precision):\n precision = tf.cast(0, tf.float32)\n\n return precision\n\n\ndef recall_fn(true_positives, false_negatives):\n recall = tf.math.divide(true_positives, true_positives + false_negatives)\n\n if tf.math.is_nan(recall):\n recall = tf.cast(0, tf.float32)\n\n return recall\n\n\ndef f1_fn(true_positives, false_positives, false_negatives):\n fp_fn = tf.math.divide(false_positives + false_negatives, 2)\n\n if tf.math.is_nan(fp_fn):\n fp_fn = tf.cast(0, tf.float32)\n\n f1 = tf.math.divide(true_positives, true_positives + fp_fn)\n\n if tf.math.is_nan(f1):\n f1 = tf.cast(0, tf.float32)\n\n return f1\n","repo_name":"lorcanpd/TagOntoText","sub_path":"custom_metrics.py","file_name":"custom_metrics.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30904695990","text":"import re\nfrom search.reviewCollection import reviewCollection\n\n# Index is built for the dataset, suitable for implementing search\n# a hashMap that maps any given keyword with the list of review objects in the ReviewCollection \n# Container that contains the keyword\nclass Index():\n\thashMap = {}\n\tflag = True\n\tdef buildIndexes(self):\n\t\tif(Index.flag):\n\t\t\treviewList = reviewCollection.reviewCollectionList\n\t\t\treview_index = 0\n\t\t\tr = re.compile(r'[^a-z]+')\n\t\t\tfor review in reviewList:\n\t\t\t\treview_text = review.text.split(\" \")\n\t\t\t\tfor word in review_text:\n\t\t\t\t\tword = r.sub('',word)\n\t\t\t\t\tif word in Index.hashMap:\n\t\t\t\t\t\tif review_index not in Index.hashMap[word]:\n\t\t\t\t\t\t\tIndex.hashMap[word].append(review_index)\n\t\t\t\t\telse:\n\t\t\t\t\t\tIndex.hashMap[word] = [review_index]\n\t\t\t\treview_index = review_index+1\n\t\tIndex.flag = False\n\n","repo_name":"varshiniramesh/tasty-search","sub_path":"search/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70175367876","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework import routers\n\n\nrouter = routers.DefaultRouter()\nrouter.register('allCompany', views.AllCompanyView, 'allCompany')\nrouter.register('allProduct', views.AllProductView, 'allProduct')\nrouter.register('company', views.CompanyView, 'company')\nrouter.register('productSet', views.ProductView, 'productSet')\nrouter.register('productRequest', views.ProductRequestView, 'productRequest')\nrouter.register('transactionHistory', views.TransactionHistoryView, 'transactionHistory')\nrouter.register('salesRecord', views.SalesRecordView, 'salesRecord')\n\nurlpatterns = [\n path('', include(router.urls)),\n path('index/', views.registration, name='index'),\n path('product_trans/', views.ProductTrans.as_view(), name='product_trans'),\n]\n","repo_name":"cedrickvstheworld/open_Product_WareHouse_REST_API","sub_path":"openPW_REST_API/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37339493403","text":"#!/user/bin/python3\n# -*- coding: utf-8 -*-\n\n' os 模拟 dir'\n\nimport os\npath = os.path.abspath('../')\ndef dir_parh(path, arr):\n _arr = []\n _path = ''\n for x in os.listdir(path):\n _path = os.path.join(path, x)\n if os.path.isdir(_path):\n print(''.join(_arr), '|---', x)\n _arr.append(' ')\n dir_parh(_path, arr)\n else:\n print(''.join(_arr), '|---', x)\ndir_parh(path, [])","repo_name":"ijisen/learn_note","sub_path":"python-learn/py_io/demo/fiel_dir.py","file_name":"fiel_dir.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32534682788","text":"import serial # to communicate with the reader physically\nfrom .message import PURPacket, retCodeDescr, tagFreqsKHz, tagEncodings # to build and reading messages\nimport struct # for parsing parameters from messages\nimport time # for waiting\nimport logging # for logging debug messages\n\n\nclass PURReader:\n '''\n Reader controller\n '''\n def __init__(self, interface):\n '''\n :param interface: serial port interface string\n '''\n self.dev = serial.Serial(interface, 9600, timeout=2)\n self.log = logging.getLogger(self.__class__.__name__)\n \n\n def __del__(self):\n if hasattr(self, 'dev'):\n # implement controlled stop here\n self.dev.close()\n \n\n def send(self, pkg, check=False):\n '''\n Sends a message to the reader\n\n :param pkg: PURPacket packet object to send\n :param check: should be set to True when response contains return code\n :returns: response PURPacket packet object(s)\n '''\n # send packet\n self.log.debug('Sending: {}'.format(pkg))\n self.dev.write(pkg.msgBytes)\n\n # receive\n respBytes = self.dev.read(1) # wait until something is in buffer\n time.sleep(0.01) # give time for the reader to write more in buffer\n respBytes += self.dev.read(self.dev.in_waiting)\n\n # get all command related packets\n start = pkg.msgBytes[:6] # bytes R,F,E,0x01,,\n respPkgs = [PURPacket(msgBytes=start+part) for part in respBytes.split(start)[1:]]\n self.log.debug('Received: '+'\\n'.join(str(p) for p in respPkgs))\n \n # check if command was successful\n if check:\n for respPkg in respPkgs:\n self.checkResp(respPkg)\n \n # return\n if len(respPkgs) == 1:\n return respPkgs[0]\n else:\n return respPkgs\n \n\n def checkResp(self, respPkg):\n '''\n Checks first byte of packet payload for return code. \n If not 0, an exception with the error description is thrown.\n\n :param respPkg: received package from reader\n '''\n if respPkg.pldBytes:\n code = respPkg.pldBytes[0]\n if code != 0:\n if code in retCodeDescr:\n raise IOError('Problem with packet {}: {}'.format(\n respPkg, retCodeDescr[code]))\n \n\n @property\n def antCount(self):\n '''\n Gets number of reader antennas\n '''\n resp = self.send(PURPacket(b'\\x01\\x10'))\n return resp.pldBytes[1]\n \n\n @property\n def attnDB(self):\n '''\n Gets attenuation in dB\n '''\n resp = self.send(PURPacket(b'\\x02\\x01'), True)\n # maximum, current attenuation\n _, curAttn = struct.unpack('!HH', resp.pldBytes[1:])\n return curAttn\n \n\n @attnDB.setter\n def attnDB(self, val):\n '''\n Sets attenuation in dB\n '''\n self.send(PURPacket(b'\\x02\\x81', struct.pack('!H', val)), True)\n \n\n @property\n def freqKHz(self):\n '''\n Gets frequency in kHz\n '''\n resp = self.send(PURPacket(b'\\x02\\x02'), True)\n # mode (0 = random hopping, 1 = static), maximum frequency count, current frequency count\n _, _, numFreqs = struct.unpack('!BBB', resp.pldBytes[1:4])\n # frequency list\n freqs = [struct.unpack('!I', b'\\x00'+resp.pldBytes[4+iF*3:4+iF*3+3])[0] for iF in range(numFreqs)]\n if len(freqs) > 1:\n return freqs\n else:\n return freqs[0]\n \n\n @freqKHz.setter\n def freqKHz(self, val):\n '''\n Sets frequency in kHz\n '''\n if isinstance(val, (tuple, list)):\n # expect frequency list in kHz\n # we use random hopping in this case\n freqs = [int(f) for f in val]\n payload = struct.pack('!BB', 1, len(freqs)) # mode 1 (random) and n frequencies\n for freq in freqs:\n payload += struct.pack('!I', freq)[1:] # add frequency as 3 bytes\n elif isinstance(val, (int, float)):\n # expect 1 frequency in kHz\n freq = int(val)\n payload = struct.pack('!BB', 0, 1) # mode 0 (static) and 1 frequency\n payload += struct.pack('!I', freq)[1:] # add frequency as 3 bytes\n else:\n raise SyntaxError('freqKHz must be either a single frequency or a list of frequencies')\n \n self.send(PURPacket(b'\\x02\\x82', payload), True)\n\n\n @property\n def sensDBm(self):\n '''\n Gets sensitivity in dBm\n '''\n resp = self.send(PURPacket(b'\\x02\\x03'), True)\n # maximum sensitivity, minimum sensitivity, current sensitivity\n _, _, curSens = struct.unpack('!hhh', resp.pldBytes[1:])\n return curSens\n \n\n @sensDBm.setter\n def sensDBm(self, val):\n '''\n Sets sensitivity in dBm\n '''\n self.send(PURPacket(b'\\x02\\x83', struct.pack('!h', val)), True)\n \n\n def setParam(self, addr, valBytes):\n '''\n Sets a device specific parameter.\n See \"Reader-Host-Protocol – PUR-Extension\"\n\n :param addr: address of the parameter\n :param valBytes: parameter value bytes\n '''\n payload = struct.pack('!HB', addr, len(valBytes))\n payload += valBytes\n self.send(PURPacket(b'\\x03\\x30', payload), True)\n \n\n def getParam(self, addr):\n '''\n Gets a device specific parameter.\n See \"Reader-Host-Protocol – PUR-Extension\"\n\n :param addr: address of the parameter\n :returns: parameter value bytes\n '''\n resp = self.send(PURPacket(b'\\x03\\x31', struct.pack('!H', addr)), True)\n size = resp.pldBytes[1]\n return resp.pldBytes[2:2+size]\n \n\n @property\n def session(self):\n '''\n Gets inventory session. \n Can be 0, 1, 2 or 3\n '''\n return self.getParam(0x0028)[0]\n \n\n @session.setter\n def session(self, val):\n '''\n Sets inventory session\n '''\n self.setParam(0x0028, struct.pack('!B', val))\n \n\n @property\n def modDepth(self):\n '''\n Gets reader modulation depth. \n Can be 0...100 %\n '''\n return self.getParam(0x0022)[0]\n \n\n @modDepth.setter\n def modDepth(self, val):\n '''\n Sets reader modulation depth\n '''\n self.setParam(0x0022, struct.pack('!B', val))\n \n\n @property\n def blfKHz(self):\n '''\n Gets tag backscatter link frequency. \n Can be 40, 80, 160, 213, 256 or 320 kHz\n '''\n blfKey = self.getParam(0x0020)\n return tagFreqsKHz[blfKey[0]]\n \n\n @blfKHz.setter\n def blfKHz(self, val):\n '''\n Sets tag backscatter link frequency\n '''\n val = int(val)\n # get key (lookup byte) for value (frequency)\n for blfKey, blfVal in tagFreqsKHz.items():\n if val == blfVal:\n self.setParam(0x0020, struct.pack('!B', blfKey))\n return\n \n raise ValueError('Invalid backscatter frequency. Can be: '+\n ', '.join('{} kHz'.format(f) for f in tagFreqsKHz.values()))\n \n\n @property\n def encoding(self):\n '''\n Gets tag backscatter link encoding. \n Can be \"FM0\", \"M2\", \"M4\" or \"M8\"\n '''\n encKey = self.getParam(0x0021)\n return tagEncodings[encKey[0]]\n \n\n @encoding.setter\n def encoding(self, val):\n '''\n Sets tag backscatter link encoding\n '''\n # get key (lookup byte) for value (string)\n for encKey, encVal in tagEncodings.items():\n if val == encVal:\n self.setParam(0x0021, struct.pack('!B', encKey))\n return\n \n raise ValueError('Invalid backscatter encoding. Can be: '+\n ', '.join(e for e in tagEncodings.values()))\n \n\n def enableOutput(self, enable):\n '''\n Sets antenna power on or off\n\n :param enable: True or False\n '''\n state = 1 if enable else 0\n self.send(PURPacket(b'\\x03\\x03', struct.pack('!B', state)), True)\n \n\n def reportRSSI(self, enable):\n '''\n Enables the report of Q/I RSSI for detected tag\n\n :param enable: True or False\n '''\n state = 1 if enable else 0\n self.setParam(0x0002, struct.pack('!B', state))\n \n\n def parseTagreports(self, recPkgs):\n '''\n Parses tagreports from inventory packets\n\n :param recPkgs: one or more packets from single or cyclic inventory\n :returns: list of dictionaries with meta infos of detected tags\n '''\n if not isinstance(recPkgs, list):\n recPkgs = [recPkgs] # unify response because there might be more than 1 package\n \n # parse tags\n tags = []\n collectedIds = 0\n idCount = 0\n for pkg in recPkgs:\n # packet meta data\n idCount, pkgIdCount = struct.unpack('!BB', pkg.pldBytes[1:3])\n collectedIds += pkgIdCount # tag ids in this packet\n \n # bytes for detected tag info structures\n tagsBytes = pkg.pldBytes[3:]\n for _ in range(pkgIdCount):\n # parse tag infos\n # check tag id start\n iStart = tagsBytes.find(0x01)\n tagsBytes = tagsBytes[iStart+1:]\n \n # get id length\n idLen = tagsBytes[0]\n tagsBytes = tagsBytes[1:]\n\n # get id\n tagID = ''.join('{:02X}'.format(b) for b in tagsBytes[:idLen])\n tagsBytes = tagsBytes[idLen:]\n\n # check RSSI start\n if tagsBytes[0] != 0x02:\n raise SyntaxError('Wrong RSSI start')\n tagsBytes = tagsBytes[1:]\n\n # get RSSI\n rssiQ, rssiI = struct.unpack('!BB', tagsBytes[:2])\n tagsBytes = tagsBytes[2:]\n\n tagData = {\n tagID: {\n 'rssiI': rssiI, \n 'rssiQ': rssiQ\n }\n }\n tags.append(tagData)\n \n if collectedIds != idCount:\n raise IOError('Only packets for {} out of {} tags where received'.format(collectedIds, idCount))\n\n return tags \n \n\n def singleInventory(self):\n '''\n Performs a single inventory and returns the tags detected \n with meta infos\n '''\n self.reportRSSI(True) # tags shall be reported with RSSI\n resp = self.send(PURPacket(b'\\x50\\x01'), True) # start inventory\n tags = self.parseTagreports(resp)\n self.log.info('{} tags found'.format(len(tags)))\n return tags\n","repo_name":"EMS-TU-Ilmenau/PURReaderControl","sub_path":"purreader/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":10851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35022214556","text":"# -*- coding: UTF-8 -*-\r\nimport numpy as np\r\nimport Tkinter as tk\r\n#from Tkinter import ttk\r\nimport ttk\r\nfrom Tkinter import *\r\nimport threading\r\nimport time\r\nimport random\r\nimport copy\r\nimport Demo\r\n# import VirtualCar\r\n\r\n\r\nclass sillycar:\r\n # 下左上右 -> 下左上右 的动作\r\n table = [\r\n [0, 2, 3, 1],\r\n [1, 0, 2, 3],\r\n [3, 1, 0, 2],\r\n [2, 3, 1, 0]\r\n ]\r\n\r\n action = [\"直走\", \"左转\", \"右转\", \"后退\"]\r\n\r\n # 整个画布的长、宽,哪个点是可以走的,[(x,y,info), ...]\r\n def __init__(self, length, width, list_of_tuples, now, orientation):\r\n self.length = length\r\n self.width = width\r\n self.Map = np.zeros((length, width), dtype='int32')\r\n self.pos2info = dict()\r\n self.info2pos = dict()\r\n for i in range(len(list_of_tuples)):\r\n self.Map[list_of_tuples[i][0]][list_of_tuples[i][1]] = i + 1\r\n self.pos2info[i + 1] = list_of_tuples[i][2]\r\n self.info2pos[list_of_tuples[i][2]] = (list_of_tuples[i][0], list_of_tuples[i][1])\r\n # 上一个位置\r\n self.pre = -1\r\n # 当前位置\r\n self.now = now\r\n # 目标位置\r\n self.target = -1\r\n # 开始一次寻路后已经走过的位置 用来画已经走过的路\r\n self.pres = []\r\n # 当前朝向\r\n self.orientation = orientation\r\n\r\n # 更新现在的位置 获得下一个位置和动作\r\n def updatePosition(self, newposition):\r\n if self.now != newposition:\r\n bestway = self.dfs(self.now, newposition)\r\n temp = []\r\n for i in range(len(bestway)):\r\n temp.append(self.pos2info[self.Map[bestway[i][0]][bestway[i][1]]])\r\n self.pre = temp[-2]\r\n #print(\"pre:\",self.pre)\r\n self.now = newposition\r\n self.pres.extend(temp[:-1])\r\n self.orientation = self.getOrientation()\r\n if self.target == -1:\r\n return -1, -1\r\n if self.now == self.target:\r\n return -1, -1\r\n bestway = self.dfs(self.now, self.target)\r\n nextpoint = self.pos2info[self.Map[bestway[1][0]][bestway[1][1]]]\r\n nextaction = self.table[bestway[0][2]][bestway[1][2]]\r\n print(nextpoint, nextaction)\r\n return nextpoint, nextaction\r\n\r\n # 获得小车当前朝向\r\n def getOrientation(self):\r\n if self.pre == -1 or self.now == -1:\r\n return 0\r\n prex, prey = self.info2pos[self.pre]\r\n nowx, nowy = self.info2pos[self.now]\r\n nowx -= prex\r\n nowy -= prey\r\n if nowx == 0:\r\n if nowy == 1:\r\n return 3\r\n else:\r\n return 1\r\n elif nowx == -1:\r\n return 2\r\n else:\r\n return 0\r\n\r\n def getOrientation2(self, pre, now):\r\n if pre == -1 or now == -1:\r\n return 0\r\n prex, prey = self.info2pos[pre]\r\n nowx, nowy = self.info2pos[now]\r\n nowx -= prex\r\n nowy -= prey\r\n if nowx == 0:\r\n if nowy == 1:\r\n return 3\r\n else:\r\n return 1\r\n elif nowx == -1:\r\n return 2\r\n else:\r\n return 0\r\n\r\n def dfs(self, start, end):\r\n visited = self.Map.copy()\r\n direction = [(1, 0), (0, -1), (-1, 0), (0, 1)] # 下 左 上 右\r\n xs, ys = self.info2pos[start]\r\n xe, ye = self.info2pos[end]\r\n\r\n bestway = []\r\n currentway = []\r\n bestcnt = 999999\r\n lastd = -1\r\n\r\n currentway.append([xs, ys, self.orientation, 0])\r\n # print(self.getOrientation())\r\n visited[xs][ys] = 0\r\n while len(currentway) > 0:\r\n # x,y,从哪个方向来,扩展到了哪个方向\r\n xs, ys, df, dt = currentway[-1]\r\n # print(xs,ys,df, dt)\r\n\r\n if xs == xe and ys == ye:\r\n currentcnt = 0\r\n for j in range(1, len(currentway)):\r\n if currentway[j][2] != currentway[j - 1][2]:\r\n currentcnt += 1\r\n if currentcnt < bestcnt or (currentcnt == bestcnt and len(currentway) < len(bestway)):\r\n bestcnt = currentcnt\r\n # bestway = currentway.copy()\r\n bestway = copy.copy(currentway)\r\n visited[xs][ys] = 1\r\n currentway.pop()\r\n continue\r\n\r\n flag = True\r\n for i in range(dt, 4):\r\n tempx = xs + direction[i][0]\r\n tempy = ys + direction[i][1]\r\n if 0 <= tempx < self.length and 0 <= tempy < self.width and visited[tempx][tempy] != 0:\r\n currentway[-1][3] = i + 1\r\n currentway.append([tempx, tempy, i, 0])\r\n visited[tempx][tempy] = 0\r\n flag = False\r\n break\r\n if flag: # 四个方向都不能走\r\n visited[xs][ys] = 1\r\n currentway.pop()\r\n\r\n return bestway\r\n\r\n def findway(self):\r\n bestway = self.dfs(self.now, self.target)\r\n result = []\r\n for i in range(len(bestway)):\r\n result.append(self.pos2info[self.Map[bestway[i][0]][bestway[i][1]]])\r\n return result\r\n\r\n\r\nclass uglyUI:\r\n buttons = []\r\n canvases = dict()\r\n\r\n def __init__(self, stupidcar):\r\n self.stupidcar = stupidcar\r\n\r\n # 界面\r\n root = tk.Tk()\r\n root.title(\"stupid car\")\r\n frame1 = ttk.Frame(root, padding=\"3 3 12 12\")\r\n frame1.grid(column=0, row=0, sticky=(N, W, E, S))\r\n root.columnconfigure(0, weight=1)\r\n root.rowconfigure(0, weight=1)\r\n\r\n # 添加按钮和线\r\n temp = stupidcar.Map\r\n for i in range(2 * stupidcar.length - 1):\r\n if i % 2 == 0:\r\n for j in range(2 * stupidcar.width - 1):\r\n if j % 2 == 0:\r\n if temp[i // 2][j // 2] != 0:\r\n b = tk.Button(frame1, text=str(int(temp[i // 2][j // 2])), width=2, bg='white')\r\n b.bind(\"\", func=self.press)\r\n b.grid(row=i, column=j)\r\n self.buttons.append(b)\r\n else:\r\n if temp[i // 2][(j - 1) // 2] != 0 and temp[i // 2][(j + 1) // 2] != 0:\r\n cv = Canvas(frame1, width=30, height=30)\r\n cv.create_line(0, 15, 30, 15, tags='line')\r\n cv.grid(row=i, column=j)\r\n self.canvases[str(temp[i // 2][(j - 1) // 2]) + 'to' + str(temp[i // 2][(j + 1) // 2])] = cv\r\n self.canvases[str(temp[i // 2][(j + 1) // 2]) + 'to' + str(temp[i // 2][(j - 1) // 2])] = cv\r\n else:\r\n for j in range(2 * stupidcar.width - 1):\r\n if j % 2 == 0 and temp[(i - 1) // 2][j // 2] != 0 and temp[(i + 1) // 2][j // 2] != 0:\r\n cv = Canvas(frame1, width=30, height=30)\r\n cv.create_line(15, 0, 15, 30, tags='line')\r\n cv.grid(row=i, column=j)\r\n self.canvases[str(temp[(i - 1) // 2][j // 2]) + 'to' + str(temp[(i + 1) // 2][j // 2])] = cv\r\n self.canvases[str(temp[(i + 1) // 2][j // 2]) + 'to' + str(temp[(i - 1) // 2][j // 2])] = cv\r\n\r\n self.newTravel()\r\n #t = threading.Thread(target=VirtualCar, args=(self,), name=\"thread-refresh\")\r\n t = threading.Thread(target=Demo.run, args=(self,), name=\"thread-car\")\r\n t.setDaemon(True)\r\n t.start()\r\n\r\n root.mainloop()\r\n\r\n # 每一次新征程之前调用一次\r\n def newTravel(self):\r\n self.stupidcar.pres = []\r\n self.stupidcar.target = -1\r\n for b in self.buttons:\r\n b['bg'] = \"white\"\r\n self.buttons[stupidcar.now - 1]['bg'] = 'yellow'\r\n for cv in self.canvases.values():\r\n cv.delete(\"triangle\")\r\n\r\n def addArrow(self, f, t):\r\n if f == t:\r\n return\r\n o = self.stupidcar.getOrientation2(f, t)\r\n cv = self.canvases[str(f) + 'to' + str(t)]\r\n if o == 0:\r\n cv.create_polygon(15, 30, 10, 25, 20, 25, tags='triangle')\r\n elif o == 1:\r\n cv.create_polygon(2, 15, 7, 10, 7, 20, tags='triangle')\r\n elif o == 2:\r\n cv.create_polygon(15, 2, 10, 7, 20, 7, tags='triangle')\r\n else:\r\n cv.create_polygon(25, 10, 25, 20, 30, 15, tags='triangle')\r\n\r\n def clear(self):\r\n for b in self.buttons:\r\n b['bg'] = 'white'\r\n for cv in self.canvases.values():\r\n cv.delete(\"triangle\")\r\n\r\n def repaint(self):\r\n # pres = self.stupidcar.pres.copy()\r\n pres = copy.copy(self.stupidcar.pres)\r\n pres.append(self.stupidcar.now)\r\n #print(pres)\r\n route = self.stupidcar.findway()\r\n self.buttons[pres[0] - 1]['bg'] = 'pink'\r\n for i in range(1, len(pres)):\r\n self.buttons[pres[i] - 1]['bg'] = 'gray'\r\n self.addArrow(pres[i - 1], pres[i])\r\n for i in range(len(route)):\r\n num = route[i]\r\n if i == 0:\r\n self.buttons[num - 1]['bg'] = 'yellow'\r\n self.addArrow(num, route[i + 1])\r\n # print(num)\r\n elif i == len(route) - 1:\r\n self.buttons[num - 1]['bg'] = 'red'\r\n else:\r\n self.buttons[num - 1]['bg'] = 'orange'\r\n self.addArrow(num, route[i + 1])\r\n\r\n def press(self, event):\r\n self.clear()\r\n target = int(event.widget['text'])\r\n self.stupidcar.target = target\r\n self.repaint()\r\n\r\n def updatePosition(self, newposition):\r\n nextposition, nextaction = self.stupidcar.updatePosition(newposition)\r\n if self.stupidcar.target != -1 and self.stupidcar.now != self.stupidcar.target:\r\n self.clear()\r\n self.repaint()\r\n else:\r\n self.newTravel()\r\n return nextposition, nextaction+1\r\n\r\n def threadmove(self):\r\n fakemove = [2, 3, 4, 5, 6, 7, 16, 21, 26, 35, 36, 37, 38, 27, 22, 17, 10, 11]\r\n it = iter(fakemove)\r\n while True:\r\n time.sleep(3)\r\n # print(\"here\")\r\n if random.randint(0, 1) == 0:\r\n #print(\"move\")\r\n self.updatePosition(next(it))\r\n\r\n\r\n# def VirtualCar(UI):\r\n# fakemove = [2, 3, 4, 15]\r\n# it = iter(fakemove)\r\n# while True:\r\n# time.sleep(3)\r\n# try:\r\n# print(UI.updatePosition(next(it)))\r\n# except:\r\n# break\r\n\r\n\r\ngrids = [\r\n (0,0,1),(0,1,2),(0,2,3),(0,3,4),(0,4,5),(0,5,6),(0,6,7),(0,7,8),(0,8,9),(0,9,10),(0,10,11),(0,11,12),(0,12,13),\r\n (1,0,14),(1,3,15),(1,6,16),(1,9,17),(1,12,18),\r\n (2,0,19),(2,3,20),(2,6,21),(2,9,22),(2,12,23),\r\n (3,0,24),(3,3,25),(3,6,26),(3,9,27),(3,12,28),\r\n (4,0,29),(4,1,30),(4,2,31),(4,3,32),(4,4,33),(4,5,34),(4,6,35),(4,7,36),(4,8,37),(4,9,38),(4,10,39),(4,11,40),(4,12,41)\r\n]\r\nstupidcar = sillycar(5,13,grids,1,3)\r\nuglyui = uglyUI(stupidcar)\r\n\r\n","repo_name":"FeHanHanBlues/Navigation","sub_path":"route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26158839338","text":"from typing import Any, Dict\nfrom datetime import date\nimport os\nimport json\n\nroot_path = json.load(open(\"../../root_path.json\"))\n\nclass FileHandler():\n def __init__(self, line_bot_api : Any) -> None:\n self._api = line_bot_api\n\n def handle_pdf(self, event : Dict) -> None:\n message_content = self._api.get_message_content(event.message.id)\n dir = f\"{root_path['UNZIP_PATH']}/{date.today().strftime('%Y%m%d')}/2\"\n\n if \".pdf\" in event.message.file_name:\n if not os.path.isdir(dir):\n os.mkdir(dir)\n \n if event.message.file_name not in os.listdir(dir):\n with open(f\"{dir}/{event.message.file_name}\", 'wb') as fd:\n for chunk in message_content.iter_content():\n fd.write(chunk)","repo_name":"e4903180/financialSite","sub_path":"data/LineBot/utils/FileHandler/FileHandler.py","file_name":"FileHandler.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18484689045","text":"import sys\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom queue import Queue\n\nsys.path.insert(0, str(Path(__file__).parent.parent))\nfrom intcode import Intcode\nsys.path = sys.path[1:]\n\npuzzle_input_path = Path(__file__).parent / \"input.txt\"\n\nwith open(puzzle_input_path) as puzzle_input_file:\n puzzle_input_raw = puzzle_input_file.read()\n\nprogram = [int(x) for x in puzzle_input_raw.split(\",\")]\n\n# Setup Robot Intcode program\ninput_queue = Queue()\noutput_queue = Queue()\nrobot = Intcode(program, inputs=input_queue, outputs=output_queue)\n\ngrid = defaultdict(int)\ncurrent_pos = (0, 0)\ncurrent_direction = (0, 1) \ngrid[current_pos] = 1\n\nwhile not robot.halted:\n input_queue.put(grid[current_pos])\n color = robot.step_to_next_output()\n turn = robot.step_to_next_output()\n grid[current_pos] = color\n if turn == 0:\n current_direction = current_direction[1] * -1, current_direction[0]\n else:\n current_direction = current_direction[1], current_direction[0] * -1\n current_pos = current_pos[0] + current_direction[0], current_pos[1] + current_direction[1]\n\n\nrow_min, row_max = min(grid, key=lambda g: g[1])[1], max(grid, key=lambda g: g[1])[1]\ncol_min, col_max = min(grid, key=lambda g: g[0])[0], max(grid, key=lambda g: g[0])[0]\n\nfor y in reversed(range(row_min - 1, row_max + 1)):\n for x in range(col_min -1, col_max + 1):\n if grid.get((x, y), 0) == 1:\n print(\" O \", end=\"\")\n else:\n print(\" \", end=\"\")\n print()","repo_name":"timofurrer/aoc","sub_path":"2019/11/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34111219091","text":"from django.contrib import admin\nfrom .models import Review\n\n\n\n@admin.register(Review)\nclass ReviewAdmin(admin.ModelAdmin):\n list_display = ('id', 'user', 'title', 'timestamp')\n\n fieldsets = (\n (None, {\n 'fields':('title', 'text','user', 'tool', 'stars')\n }),\n )","repo_name":"alias-pyking/tool-rent","sub_path":"reviews/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17720901137","text":"\"\"\"\nCommon screen related variables and functions\n\"\"\"\nimport shutil\nimport os\nimport sys\nimport time\nimport numpy as np\nimport config\nimport sound\n\n\n# boundary varibales\nCOLS = shutil.get_terminal_size().columns\nROWS = shutil.get_terminal_size().lines\nMIDS = int(COLS/2)\nR1 = int(MIDS/2)\nR2 = int((MIDS + COLS)/2)\nR3 = int((3*COLS)/8)\nR4 = int((5*COLS)/8)\nR5 = int(COLS/8)\nR6 = int((7*COLS)/8)\nMIDS_R = int(ROWS/2)\nR1_R = int(ROWS/5)\n\n# array representing the whole terminal\nARR = np.full((COLS+1, ROWS+1), \" \", dtype=np.unicode)\n\ndef set_arr(x_pos, y_pos, symbol):\n \"\"\"\n set screen\n \"\"\"\n ARR[x_pos][y_pos] = symbol\n\n\ndef reset_arr(x_pos, y_pos):\n \"\"\"\n reset screen\n \"\"\"\n ARR[x_pos][y_pos] = \" \"\n\n\ndef value_arr(x_pos, y_pos):\n \"\"\"\n get value of screen at x_pos,y_pos\n \"\"\"\n return ARR[x_pos][y_pos]\n\n\ndef print_all():\n \"\"\"\n print out the whole screen\n \"\"\"\n os.system(\"tput reset\")\n for j in range(1, ROWS+1):\n for i in range(1, COLS+1):\n sys.stdout.write(ARR[i][j])\n sys.stdout.flush()\n if j < ROWS:\n sys.stdout.write(\"\\n\")\n\n print(\"MARIO GAME BY R.S.SUBBULAKSHMI\\t\\t\\tPOINTS: \"+str(config.POINTS) +\n \"\\t\\t\\tLIVES: \"+str(config.LIVES)+\"\\t\\t\\tLEVEL: \" + str(config.LEVEL)+\"\\n\")\n\n# deletes all the copies of data present and ARR is also cleaned out\n\n\ndef restart_all():\n \"\"\"\n for next level or when Mario dies\n \"\"\"\n os.system(\"tput reset\")\n global ARR\n ARR = np.full((COLS+1, ROWS+1), \" \", dtype=np.unicode)\n config.M = \"\"\n config.E_LIST = []\n config.W_LIST = []\n config.P_LIST = []\n config.G_LIST = []\n config.M_LIST = []\n config.time_start = 0\n print_all()\n time.sleep(0.2)\n\n# game over because of quiting or winning the game or losing the game\n\n\ndef game_over():\n \"\"\"\n Game over\n \"\"\"\n sound.play_sound(\"nsmb_game_over.wav\")\n restart_all()\n os.system(\"tput reset\")\n print(\"MARIO GAME BY R.S.SUBBULAKSHMI\\t\\t\\tPOINTS: \"+str(config.POINTS) +\n \"\\t\\t\\tLIVES: \"+str(config.LIVES)+\"\\t\\t\\tLEVEL: \" + str(config.LEVEL)+\"\\n\")\n if config.STAGE == \"won\":\n print(\"WON WON WON !!!\\n\")\n elif config.STAGE == \"quit\":\n print(\"QUIT !\\n\")\n else:\n print(\"LOST SORRY !\\n\")\n","repo_name":"SubbulakshmiRS/Mario-Game","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26412525110","text":"from django.shortcuts import render\nfrom django.http import Http404\nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.models import User\nfrom qa.models import Question, Answer\nfrom qa.forms import AskForm, AnswerForm, SignUpForm, LoginForm\n\n# Create your views here.\nfrom django.http import HttpResponse, HttpResponseRedirect\n\ndef test(request, *args, **kwargs):\n return HttpResponse('OK', status=200)\n\ndef all(request):\n return show(request,Question.objects.new(),'question-all')\n\ndef popular(request):\n return show(request,Question.objects.popular(),'question-popular')\n\n\n# new questions view\ndef show(request, query, url_name):\n limit = 10 # hadrcoding!!!\n page = request.GET.get('page', 1) #hardcoding !!!\n paginator = Paginator(query, limit)\n# paginator.baseurl = reverse(url_name)\n try:\n questions = paginator.page(page)\n except EmptyPage:\n questions = paginator.page(paginator.num_pages)\n\n# questions = paginator.page(page) # Page\n return render(request, 'all.html', {\n 'questions': questions,\n 'baseurl' : reverse(url_name),\n})\n\n\ndef question(request, *args, **kwargs):\n id=int(kwargs['id'])\n try:\n question = Question.objects.get(pk=id)\n except Question.DoesNotExist:\n raise Http404\n if request.method == \"POST\":\n form = AnswerForm(request.POST)\n if form.is_valid():\n form.cleaned_data['author']=request.user\n ans=form.save()\n return HttpResponseRedirect('/question/%d/' % id)\n \n answers=Answer.objects.filter(question__pk=id)\n form = AnswerForm()\n return render(request, 'question.html', {\n 'question': question,\n 'answers': answers,\n 'form':form,\n })\n\ndef ask(request, *args, **kwargs):\n if request.method == \"POST\":\n form = AskForm(request.POST)\n if form.is_valid():\n form.cleaned_data['author']=request.user\n question = form.save()\n url = question.get_url()\n return HttpResponseRedirect(url)\n else:\n form = AskForm()\n return render(request, 'askform.html', {\n 'form': form\n })\n# --------------------------------------\nfrom django.contrib.auth import authenticate, login\n\ndef signup(request, *args, **kwargs):\n if request.method == \"POST\":\n form=SignUpForm(request.POST)\n if form.is_valid():\n user=User.objects.create_user(**form.cleaned_data)\n user.backend=None\n login(request, user)\n# request.session.create()\n return HttpResponseRedirect('/')\n form=SignUpForm()\n return render(request, 'signupform.html', {\n 'form': form,\n })\n\n\ndef tologin(request, *args, **kwargs):\n msg='Login form
'\n if request.method == \"POST\":\n form=LoginForm(request.POST)\n if form.is_valid():\n user = authenticate(\n username=form.cleaned_data['username'],\n password=form.cleaned_data['password']\n )\n if user:\n login(request,user)\n# request.session.create()\n return HttpResponseRedirect('/')\n msg+=form.cleaned_data['username']+'/'+form.cleaned_data['password']\n msg+=' invalid form'\n form=LoginForm()\n return render(request, 'loginform.html', {\n 'form': form,\n 'msg': msg,\n })\n","repo_name":"leonson63/web","sub_path":"ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41576595801","text":"# 各種パラメータの管理\nimport numpy as np\nimport tkinter as tk\n\nclass Params:\n def __init__(self, master):\n # figsize = (12.7, 2.9)\n self.lower_limit = tk.DoubleVar(master, value=0.0)\n self.upper_limit = tk.DoubleVar(master, value=5000.0)\n self.use_filter = tk.BooleanVar(master, value=False)\n self.filter_sigma = tk.IntVar(master, value=15)\n # 変更検知用のハッシュ\n self.old_hash = hash(tuple(map(lambda x: x.get(), vars(self).values())))\n\n def reflect_params(self, parent):\n if self.check_modify():\n parent.create_spectrum()\n else:\n pass\n\n def check_modify(self):\n # print(\"check\", vars(self).values())\n _vars = []\n for key, value in vars(self).items():\n if key == \"old_hash\":\n continue\n _vars.append(value.get())\n new_hash = hash(tuple(_vars))\n if new_hash == self.old_hash:\n return False\n else:\n self.old_hash = new_hash\n return True\n \n\n # def reflect_params(self, parent):\n # # 各種パラメータ変更を反映\n # self.change_limit(parent)\n\n # # 表示範囲の変更\n # def change_limit(self, parent):\n # for ax in parent.axes:\n # # x軸の変更\n # ax.set_xlim(self.lower_limit.get(),self.upper_limit.get())\n # # y軸の変更\n # x,y = list(ax.lines[0].get_data())\n # index_list = np.where((self.lower_limit.get()<=x) & (x<=self.upper_limit.get()))\n # ax.set_ylim(0,y[index_list].max())\n\n","repo_name":"Roastedtoast029/create_spectrum_from_MALDI","sub_path":"Params.py","file_name":"Params.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2004124289","text":"import argparse\nimport logging.config\nimport os\nimport re\nimport textwrap\nfrom typing import Final\n\nfrom .__version__ import __version__\nfrom .app import create_app\n\n\nclass UserAction(argparse.Action):\n \"\"\"Custom action for argparse, to facilitate validation of a user statement in form of \":\".\"\"\"\n\n PATTERN: Final = re.compile(r\"^([^:]+):([^:]+)$\") # regex for \":\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n assert isinstance(values, str)\n if values and not self.PATTERN.match(values):\n raise argparse.ArgumentError(\n self,\n \"{!r} is not a valid user statement in form of ':'\".format(values),\n )\n setattr(namespace, self.dest, values)\n\n\nclass PortAction(argparse.Action):\n \"\"\"Custom action for argparse, to facilitate validation of a port number (0-65535).\"\"\"\n\n PORT_RANGE: Final = range(0, 65535 + 1) # range of valid port numbers\n\n def __call__(self, parser, namespace, values, option_string=None):\n assert isinstance(values, int)\n if values not in self.PORT_RANGE:\n raise argparse.ArgumentError(\n self,\n \"port number must be between {} and {}\".format(self.PORT_RANGE[0], self.PORT_RANGE[-1]),\n )\n setattr(namespace, self.dest, values)\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(\n description=textwrap.dedent(\n \"\"\"\\\n Heliotherm heat pump REST API server\n \"\"\"\n ),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent(\n \"\"\"\\\n DISCLAIMER\n ----------\n\n Please note that any incorrect or careless usage of this program as well as\n errors in the implementation can damage your heat pump!\n\n Therefore, the author does not provide any guarantee or warranty concerning\n to correctness, functionality or performance and does not accept any liability\n for damage caused by this program or mentioned information.\n\n Thus, use it on your own risk!\n \"\"\"\n )\n + \"\\r\\n\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--device\",\n default=\"/dev/ttyUSB0\",\n type=str,\n help=\"the serial device on which the heat pump is connected, default: %(default)s\",\n )\n\n parser.add_argument(\n \"-b\",\n \"--baudrate\",\n default=115200,\n type=int,\n # the supported baudrates of the Heliotherm heat pump (HP08S10W-WEB):\n choices=[9600, 19200, 38400, 57600, 115200],\n help=\"baudrate of the serial connection (same as configured on the heat pump), default: %(default)s\",\n )\n\n parser.add_argument(\n \"--host\",\n default=\"127.0.0.1\",\n type=str,\n help='the hostname to listen on, set to \"0.0.0.0\" to have the server available externally as well,'\n \" default: %(default)s\",\n )\n\n parser.add_argument(\n \"--port\",\n default=8777,\n type=int,\n action=PortAction,\n help=\"the port of the web server, default: %(default)s\",\n )\n\n parser.add_argument(\n \"--user\",\n default=\"\",\n type=str,\n action=UserAction,\n help='the username and password for the basic access authentication in the form \":\",'\n \" default: %(default)s\",\n )\n\n parser.add_argument(\n \"--bool-as-int\",\n action=\"store_true\",\n help=\"boolean values are treated as integers (with false equivalent to 0 and true equivalent to 1)\",\n )\n\n parser.add_argument(\n \"--logging-config\",\n default=os.path.normpath(os.path.join(os.path.dirname(__file__), \"logging.conf\")),\n type=str,\n help=\"the filename under which the logging configuration can be found, default: %(default)s\",\n )\n\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"enable Flask debug mode\",\n )\n\n parser.add_argument(\n \"--read-only\",\n action=\"store_true\",\n help=\"disable write access to the heat pump\",\n )\n\n parser.add_argument(\n \"--no-param-verification\",\n action=\"store_true\",\n help=\"disable all parameter verification actions\",\n )\n\n args = parser.parse_args()\n print(\"Start Heliotherm heat pump REST API server v{}.\".format(__version__))\n print(args)\n\n # load logging config from file\n logging.config.fileConfig(args.logging_config, disable_existing_loggers=False)\n\n # create and start the Flask application\n app = create_app(\n args.device,\n args.baudrate,\n args.user,\n args.bool_as_int,\n args.read_only,\n args.no_param_verification,\n )\n app.run(\n host=args.host,\n port=args.port,\n debug=args.debug,\n use_reloader=False,\n threaded=False,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dstrigl/HtREST","sub_path":"htrest/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"16638184148","text":"class Graph:\n def __init__(self):\n self.edges = [] \n self.totalVertex = 0\n self.totalEdges = 0 \n self.visited = [] \n def createGraph(self):\n print(\"How many Vertext you want to add\")\n self.totalVertex = int(input()) # 5 {0 1 2 3 4}\n print(\"How mnay Edges You have in Graph\")\n self.totalEdges = int(input()) #6\n for i in range(0,self.totalVertex):\n self.visited.append(False) \n\n def addEdge(self,source,destination):\n self.edges.append({source:destination}) #0,3 0,1\n self.edges.append({destination:source}) \n\n def printAllEdges(self):\n for i in range(0,self.totalVertex): #5 -> 0 1 2 3 4 \n print(\"\\n\",i,\" connected : \")\n for edge in self.edges:\n if edge.get(i):\n print(edge.get(i),end=\",\")\n\n def BFS(self,v): #BFS(0) BFS(1)\n queue = [] \n count = 0\n queue.append(v) # 0 1 3 \n self.visited[v] = True \n\n while len(queue) != 0 and count != self.totalVertex:\n #input(\"enter character\")\n #print(\"Q =>\",queue)\n v = queue.pop(0) #0 1\n count=count + 1 \n print(v)# 0 1\n for edge in self.edges:\n if edge.get(v) and self.visited[edge.get(v)] == False: #1 \n queue.append(edge.get(v)) #1 3 2 \n\n\ng = Graph()\n\ng.createGraph()\n\n#for \ng.addEdge(0,1)\ng.addEdge(0,3)\ng.addEdge(1,4)\ng.addEdge(1,2)\ng.addEdge(3,2)\ng.addEdge(3,4)\n\ng.printAllEdges()\n\nprint(\"\\n\")\ng.BFS(0)\n\n","repo_name":"tejasshah2k19/23-club-ds-RK","sub_path":"graph_array.py","file_name":"graph_array.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21925661726","text":"crabs = [] # list[int]\nwith open(\"day7.txt\", \"r\") as infile:\n crabs = list(map(int, infile.read().strip().split(\",\")))\n\nfuel = []\nfor pos in range(max(crabs)):\n fuel.append(sum(abs(crab - pos) for crab in crabs))\n\nprint(\"Part A\", min(fuel))\n\nfuel = []\nfor pos in range(max(crabs)):\n # sum of arithmetic progresson\n fuel.append(\n sum(\n (abs(crab - pos) * (abs(crab - pos) + 1)) // 2\n for crab in crabs\n if abs(crab - pos)\n )\n )\n\nprint(\"Part B\", min(fuel))\n","repo_name":"PALuczak/AdventOfCode2021","sub_path":"day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23540861464","text":"from menu.enchilada import Enchilada\n\n\nclass EnchiladaVegan(Enchilada):\n\n def __init__(self, discount):\n self._name = \"Vegan enchilada\"\n self._price = 16.55 * (1 - discount)\n self._tortilla = \"wholewheat\"\n self._number_of_tortillas = 4\n self._spice = \"mild\"\n self._components = [\"marinated cactus leaves\", \"pico de gallo\", \"black beans\", \"lettuce\", \"maize\", \"rice\"]\n","repo_name":"bjarzembinski/DesignPatterns","sub_path":"ProjektyStudentow/2019_Jarzembinski_Bartlomiej/menu/enchiladas/vegan.py","file_name":"vegan.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"41711956728","text":"from mock import patch\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\n\nfrom slumber.connector import Client\nfrom slumber.connector.ua import _calculate_signature, _fake_http_headers\n\n\nclass ConfigureUser(object):\n def setUp(self):\n self.maxDiff = None # Show all diffs\n self.user = User(username='user', is_active=True, is_staff=True,\n is_superuser=False)\n self.user.set_password('pass')\n self.user.save()\n self.service = User(username='service', is_active=True, is_staff=True,\n is_superuser=True, password=settings.SECRET_KEY)\n self.service.save()\n self.__patchers = [\n patch('slumber.connector._get_slumber_authn_name', lambda: 'service'),\n ]\n [p.start() for p in self.__patchers]\n super(ConfigureUser, self).setUp()\n def tearDown(self):\n super(ConfigureUser, self).tearDown()\n [p.stop() for p in self.__patchers]\n\n def signed_get(self, username, url='/'):\n headers = _calculate_signature('service', 'GET', url, '', username)\n return self.client.get(url, **_fake_http_headers(headers))\n\n def signed_post(self, username, url, data):\n headers = _calculate_signature('service', 'POST', url, data, username)\n return self.client.post(url, data, **_fake_http_headers(headers))\n\n\nclass ConfigureAuthnBackend(ConfigureUser):\n def setUp(self):\n self.assertFalse(hasattr(settings, 'SLUMBER_DIRECTORY'))\n self.assertFalse(hasattr(settings, 'SLUMBER_SERVICE'))\n self.__backends = settings.AUTHENTICATION_BACKENDS\n settings.AUTHENTICATION_BACKENDS = [\n 'slumber.connector.authentication.Backend',\n ]\n super(ConfigureAuthnBackend, self).setUp()\n\n def tearDown(self):\n super(ConfigureAuthnBackend, self).tearDown()\n settings.AUTHENTICATION_BACKENDS = self.__backends\n\n\nclass PatchForAuthnService(ConfigureUser):\n def setUp(self):\n self.assertFalse(hasattr(settings, 'SLUMBER_DIRECTORY'))\n self.assertFalse(hasattr(settings, 'SLUMBER_SERVICE'))\n service = lambda: 'auth'\n directory = lambda: {\n 'auth': 'http://localhost:8000/slumber/auth/',\n }\n self.__patchers = [\n patch('slumber.server._get_slumber_service', service),\n patch('slumber.server._get_slumber_directory', directory),\n ]\n [p.start() for p in self.__patchers]\n client_patch = patch('slumber._client', Client())\n client_patch.start()\n self.__patchers.append(client_patch)\n super(PatchForAuthnService, self).setUp()\n self.user = User.objects.get(username=self.user.username)\n def tearDown(self):\n super(PatchForAuthnService, self).tearDown()\n [p.stop() for p in self.__patchers]\n\n","repo_name":"hotkit/django-slumber","sub_path":"slumber_examples/tests/configurations.py","file_name":"configurations.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"61"} +{"seq_id":"15279252764","text":"# Required imports\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n# What we are calling the database\nSQLALCHAMY_DATABASE_URL='sqlite:///./persona.db'\n\n# Code to run command in main.py\nengine=create_engine(SQLALCHAMY_DATABASE_URL,connect_args={'check_same_thread':False})\n\nSessionLocal=sessionmaker(bind=engine,autocommit=False,autoflush=False)\n# Code for models.py to run\nBase=declarative_base()\n\n# Code to get info and then close when done\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()","repo_name":"Arigith/persona5royal","sub_path":"backend/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74161982593","text":"'''编译器'''\nfrom Compiler.compiler_parser import *\nfrom tokenize import *\nfrom pegen.parser import *\nfrom Compiler.symtab import *\nfrom Compiler.genir import *\n\nsymtab = None\ngenir = None\n\n\ndef compile(filename):\n '''编译'''\n global symtab, genir\n with open(filename, encoding='utf-8') as file:\n tokengen = handle_tokens(file.readline)\n tokenizer = Tokenizer(tokengen)\n parser = GeneratedParser(tokenizer)\n symtab = Symtab()\n genir = GenIR()\n tree = parser.start()\n tree.gen()\n\n tree.print()\n print('='*32)\n for i in Symtab.all_scopes:\n i.print()\n print('='*32)\n genir.print()\n print('='*32)\n genir.toasm(filename+'.asm')\n\n\ndef handle_tokens(readline):\n '''对tokenize生成的token进行一些处理'''\n for token in generate_tokens(readline):\n if token.type in (NEWLINE, INDENT, DEDENT):\n continue\n yield token\n","repo_name":"1604042736/c--","sub_path":"c--2.4/Compiler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73691145793","text":"import time\nimport smbus as smbus\n\nADC=smbus.SMBus(1)#Declare to use I2C 1\n \nwhile True:\n ADC.write_byte(0x04,0x20)#Write a byte to the slave\n val = ADC.read_word_data(0x04,0x20);\n temp = val / 10 / 1023 * 5 * 100 \n print(temp)#Raspberry Pi reads the data returned by the expansion board and prints it out \n time.sleep(1)#Delay 1 second","repo_name":"keywish/RaspberryPi-Starter-kit","sub_path":"Lesson/Lesson 8 LM35 temperature sensor experiment/Python/lm35.py","file_name":"lm35.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27069789074","text":"########################### Map dose ###################\nimport Globals\nimport tkinter as tk\nfrom tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog,\\\n PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE\nimport os\nfrom os.path import normpath, basename\nimport cv2\nfrom cv2 import imread, IMREAD_ANYCOLOR, IMREAD_ANYDEPTH, imwrite\nimport numpy as np\nimport SimpleITK as sitk\nimport pydicom\nfrom PIL import Image, ImageTk\nimport os\nimport sys\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\ndef dose_to_pixel(D,a,b,c):\n return a + b/(D-c)\n\ndef pixel_to_dose(P,a,b,c):\n return c + b/(P-a)\n\n\n#### LEgg til medianfilter\ndef calculate_dose_map(cv2Img):\n wid = Globals.map_dose_ROI_x_end.get() - Globals.map_dose_ROI_x_start.get()\n heig = Globals.map_dose_ROI_y_end.get() - Globals.map_dose_ROI_y_start.get()\n print(wid, heig)\n doseMap_film = np.zeros((heig,wid))\n for i in range(heig):\n for j in range(wid):\n doseMap_film[i,j] = pixel_to_dose(cv2Img[Globals.map_dose_ROI_y_start.get()+i,Globals.map_dose_ROI_x_start.get()+j,2], \\\n Globals.popt_red[0], Globals.popt_red[1], Globals.popt_red[2])\n \n\n \n fig = Figure(figsize=(0.8,0.8))\n a = fig.add_subplot(111)\n #test ane:\n #plot_image = cv2.flip(doseMap_film,-1) #fjern test etterpå\n plot_image = a.pcolormesh(doseMap_film, cmap='viridis', rasterized=True, vmin=0, vmax=600)\n fig.colorbar(plot_image, ax=a)\n canvas_dosemap_film = FigureCanvasTkAgg(fig,master = Globals.tab3)\n canvas_dosemap_film.get_tk_widget().place(relwidth=0.6, relheight=0.55, relx = 0.03, rely=0.2)#relwidth=0.3, rely=0.1\n canvas_dosemap_film.draw()\n #plotte dosekartet (dette må være krympet (408,508))\n\n\ndef prepare_Image():\n cv2Img = cv2.imread(Globals.map_dose_film_dataset.get(), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n if(cv2Img is None):\n current_folder = os.getcwd()\n parent = os.path.dirname(Globals.map_dose_film_dataset.get())\n os.chdir(parent)\n cv2Img=cv2.imread(basename(normpath(Globals.map_dose_film_dataset.get())), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n os.chdir(current_folder)\n if(cv2Img is None):\n messagebox.showerror(\"Error\", \"Something has happen. Check that the filename does not contain Æ,Ø,Å\")\n return\n \n if(cv2Img.shape[2] == 3):\n if(cv2Img.shape[0]==1270 and cv2Img.shape[1]==1016):\n cv2Img = abs(cv2Img-Globals.correctionMatrix127)\n cv2Img = np.clip(cv2Img, 0, 65535)\n elif(cv2Img.shape[0]==720 and cv2Img.shape[1]==576):\n cv2Img = abs(cv2Img - Globals.correctionMatrix72)\n cv2Img = np.clip(cv2Img, 0, 65535)\n else:\n messagebox.showerror(\"Error\",\"The resolution of the image is not consistent with dpi\")\n\n else:\n messagebox.showerror(\"Error\",\"The uploaded image need to be in RGB-format\")\n return\n\n #Read last calibration done, or ask if one wish to change\n choose_batch_window = tk.Toplevel(Globals.tab3)\n choose_batch_window.geometry(\"800x400\")\n choose_batch_window.grab_set()\n\n def set_batch():\n choose_batch_window.destroy()\n f = open('calibration.txt', 'r')\n lines = f.readlines()\n words = lines[Globals.map_dose_film_batch.get()].split()\n Globals.popt_red[0] = float(words[3])\n Globals.popt_red[1] = float(words[4])\n Globals.popt_red[2] = float(words[5])\n f.close()\n calculate_dose_map(cv2Img)\n\n batch_cnt = 0\n r = open('calibration.txt', 'r')\n lines = r.readlines()\n write_batch_y_coord = 0.3\n for l in lines:\n words = l.split()\n line = \"Batch nr. : \" + words[2] + \". Date: \" + words[0] + \" \" + words[1] + \".\"\n write_batch = tk.Text(choose_batch_window, width=1, height=1)\n write_batch.place(relwidth=0.7, relheight=0.1, relx = 0.1, rely=write_batch_y_coord)\n write_batch.insert(INSERT, line)\n write_batch.config(state=DISABLED, bd = 0, font=('calibri', '12'))\n \n Radiobutton(choose_batch_window, text='',cursor='hand2',font=('calibri', '14'), \\\n variable=Globals.map_dose_film_batch, value=batch_cnt).place(relwidth=0.08, \\\n relheight=0.1, relx=0.8, rely=write_batch_y_coord)\n\n write_batch_y_coord+=0.1; batch_cnt+=1\n\n ok_batch_button = tk.Button(choose_batch_window, text='OK', cursor='hand2',\\\n font=('calibri', '14'), overrelief=GROOVE, state=tk.ACTIVE, width = 15, command=set_batch)\n ok_batch_button.place(relwidth=0.2, relheight=0.2, relx=0.4, rely=0.9)\n r.close()\n\ndef draw_ROI(img, scale_horizontal, scale_vertical):\n draw_ROI_window = tk.Toplevel(Globals.tab3)\n draw_ROI_window.grab_set()\n local_frame= Frame(draw_ROI_window, bd = 2, relief=SUNKEN)\n local_frame.grid_rowconfigure(0,weight=1)\n local_frame.grid_columnconfigure(0, weight=1)\n\n local_canvas = Canvas(local_frame, bd=0)\n local_canvas.grid(row=0,column=0, sticky=N+S+E+W)\n\n w = 10 + img.width()\n h = 10 + img.height()\n draw_ROI_window.geometry(\"%dx%d+0+0\" % (w, h))\n\n local_canvas.create_image(0,0,image=img,anchor=\"nw\")\n local_canvas.config(scrollregion=local_canvas.bbox(ALL), cursor='arrow')\n local_canvas.image= img\n \n rectangle = local_canvas.create_rectangle(0,0,0,0,outline='green')\n\n def buttonPushed(event):\n Globals.map_dose_ROI_x_start.set(event.x)\n Globals.map_dose_ROI_y_start.set(event.y)\n \n def buttonMoving(event):\n local_canvas.coords(rectangle, Globals.map_dose_ROI_x_start.get(), Globals.map_dose_ROI_y_start.get(), \\\n event.x, event.y)\n\n def buttonReleased(event):\n Globals.map_dose_ROI_x_end.set(event.x)\n Globals.map_dose_ROI_y_end.set(event.y)\n local_canvas.coords(rectangle, Globals.map_dose_ROI_x_start.get(), Globals.map_dose_ROI_y_start.get(),\\\n Globals.map_dose_ROI_x_end.get(), Globals.map_dose_ROI_y_end.get())\n local_canvas.itemconfig(rectangle, outline='Blue')\n answer = messagebox.askquestion(\"Question\",\"Happy with placement?\", parent=draw_ROI_window)\n if(answer=='yes'):\n Globals.map_dose_ROI_x_start.set(Globals.map_dose_ROI_x_start.get()*scale_horizontal)\n Globals.map_dose_ROI_y_start.set(Globals.map_dose_ROI_y_start.get()*scale_vertical)\n Globals.map_dose_ROI_x_end.set(Globals.map_dose_ROI_x_end.get()*scale_horizontal)\n Globals.map_dose_ROI_y_end.set(Globals.map_dose_ROI_y_end.get()*scale_vertical)\n prepare_Image()\n draw_ROI_window.destroy()\n \n local_canvas.bind(\"\", buttonMoving)\n local_canvas.bind(\"\", buttonPushed)\n local_canvas.bind(\"\", buttonReleased)\n\n local_frame.pack(fill='both', expand=1)\n \n\ndef draw_image_with_marks(img, scale_horizontal, scale_vertical, mark_isocenter_window, frame):\n #check_isocenter_window = tk.Toplevel(Globals.tab3)\n #check_isocenter_window.grab_set()\n #frame_local = Frame(mark_isocenter_window, bd=2, relief=SUNKEN) #check_isocenter_window, bd=2, relief=SUNKEN)\n #frame_local.grid_rowconfigure(0, weight=1)\n #frame_local.grid_columnconfigure(0, weight=1)\n canvas_local = Canvas(frame, bd=0)\n canvas_local.grid(row=0, column=0, sticky=N+S+E+W)\n\n #w = 10 + img.width()\n #h = 10 + img.height()\n #check_isocenter_window.geometry(\"%dx%d+0+0\" % (w, h))\n\n canvas_local.create_image(0,0,image=img,anchor=\"nw\")\n canvas_local.config(scrollregion=canvas_local.bbox(ALL), cursor='arrow')\n canvas_local.image= img\n canvas_local.create_oval(Globals.map_dose_isocenter_map_x_coord_unscaled[0]-2, Globals.map_dose_isocenter_map_y_coord_unscaled[0]-2,\\\n Globals.map_dose_isocenter_map_x_coord_unscaled[0]+2, Globals.map_dose_isocenter_map_y_coord_unscaled[0]+2, fill='red')\n canvas_local.create_oval(Globals.map_dose_isocenter_map_x_coord_unscaled[1]-2, Globals.map_dose_isocenter_map_y_coord_unscaled[1]-2, \\\n Globals.map_dose_isocenter_map_x_coord_unscaled[1]+2, Globals.map_dose_isocenter_map_y_coord_unscaled[1]+2, fill='red')\n canvas_local.create_oval(Globals.map_dose_isocenter_map_x_coord_unscaled[2]-2, Globals.map_dose_isocenter_map_y_coord_unscaled[2]-2,\\\n Globals.map_dose_isocenter_map_x_coord_unscaled[2]+2, Globals.map_dose_isocenter_map_y_coord_unscaled[2]+2, fill='red')\n canvas_local.create_oval(Globals.map_dose_isocenter_map_x_coord_unscaled[3]-2, Globals.map_dose_isocenter_map_y_coord_unscaled[3]-2,\\\n Globals.map_dose_isocenter_map_x_coord_unscaled[3]+2, Globals.map_dose_isocenter_map_y_coord_unscaled[3]+2, fill='red')\n \n canvas_local.create_line(Globals.map_dose_isocenter_map_x_coord_unscaled[0], Globals.map_dose_isocenter_map_y_coord_unscaled[0]\\\n , Globals.map_dose_isocenter_map_x_coord_unscaled[1], Globals.map_dose_isocenter_map_y_coord_unscaled[1], \\\n fill='purple', smooth=1, width=2)\n canvas_local.create_line(Globals.map_dose_isocenter_map_x_coord_unscaled[2], Globals.map_dose_isocenter_map_y_coord_unscaled[2]\\\n , Globals.map_dose_isocenter_map_x_coord_unscaled[3], Globals.map_dose_isocenter_map_y_coord_unscaled[3], \\\n fill='purple', smooth=1, width=2)\n\n x1 = Globals.map_dose_isocenter_map_x_coord_unscaled[0]\n x2 = Globals.map_dose_isocenter_map_x_coord_unscaled[1]\n x3 = Globals.map_dose_isocenter_map_x_coord_unscaled[2]\n x4 = Globals.map_dose_isocenter_map_x_coord_unscaled[3]\n y1 = Globals.map_dose_isocenter_map_y_coord_unscaled[0]\n y2 = Globals.map_dose_isocenter_map_y_coord_unscaled[1]\n y3 = Globals.map_dose_isocenter_map_y_coord_unscaled[2]\n y4 = Globals.map_dose_isocenter_map_y_coord_unscaled[3]\n\n \n\n if(y1==y2 and y3==y4):\n messagebox.showerror(\"Error\", \"Reference points are not correct. Try again.\")\n check_isocenter_window.destroy()\n upload_film_data()\n elif(y1==y2):\n if(x1==x2):\n messagebox.showerror(\"Error\", \"Reference points are not correct. Try again.\")\n check_isocenter_window.destroy()\n upload_film_data()\n else:\n a = 0; b=y1\n if(x3==x4):\n isocenter = [x3,y1]\n else:\n c=(y3-y4)/(x3-x4); d = y3 - c*x3\n isocenter = [(d-b)/(a-c), b]\n elif(y3==y4):\n if(x3==x4):\n messagebox.showerror(\"Error\", \"Reference points are not correct. Try again.\")\n check_isocenter_window.destroy()\n upload_film_data()\n else:\n c = 0; d = y3\n if(x1==x2):\n isocenter = [x1,y3]\n else:\n a = (y1-y2)/(x1-x2); b = y1 - a*x1\n isocenter = [(d-b)/(a-c), d]\n else:\n if(x1==x2 and x3==x4):\n messagebox.showerror(\"Error\", \"Reference points are not correct. Try again.\")\n check_isocenter_window.destroy()\n upload_film_data()\n elif(x1==x2):\n c = (y3-y4)/(x3-x4); d = y3 - c*x3\n isocenter = [x1, c*x1+d]\n elif(x3==x4):\n a = (y1-y2)/(x1-x2); b = y1 - a*x1\n isocenter = [x3, a*x3+d]\n else:\n a = (y1-y2)/(x1-x2)\n b = y1 - a*x1\n c = (y3-y4)/(x3-x4)\n d = y3 - c*x3\n isocenter = [(d-b)/(a-c), a*(d-b)/(a-c) + b]\n\n #frame.pack(fill='both', expand=1)\n if(isocenter[0] < 0 or isocenter[1] < 0 or isocenter[0] > 408 or isocenter[1] > 508):\n messagebox.showerror(\"Error\", \"Reference points are not correct. Try again.\")\n mark_isocenter_window.destroy() #check_isocenter_window.destroy()\n upload_film_data()\n else:\n canvas_local.create_oval(isocenter[0]-6, isocenter[1]-6, isocenter[0]+6,isocenter[1]+6, outline=\"pink\") \n answer = messagebox.askquestion(\"Question\",\"Happy with placement?\", parent=mark_isocenter_window)#check_isocenter_window)\n if(answer==\"yes\"):\n Globals.map_dose_isocenter_film = [isocenter[0]*scale_horizontal, isocenter[1]*scale_vertical]\n mark_isocenter_window.destroy() #check_isocenter_window.destroy()\n draw_ROI(img, scale_horizontal, scale_vertical)\n \n else:\n mark_isocenter_window.destroy() #check_isocenter_window.destroy()\n upload_film_data() \n return\n \n \n\n\n\n \n\ndef upload_film_data():\n current_folder = os.getcwd()\n os.chdir(os.path.dirname(sys.argv[0]))\n img = Image.open(Globals.map_dose_film_dataset.get())\n if(not (img.width == 1016 or img.width == 576)):\n messagebox.showerror(\"Error\", \"Dpi in image has to be 127 or 72\")\n return\n\n Globals.map_dose_isocenter_map_x_coord_scaled = []\n Globals.map_dose_isocenter_map_x_coord_unscaled = []\n Globals.map_dose_isocenter_map_y_coord_scaled = []\n Globals.map_dose_isocenter_map_y_coord_unscaled = []\n\n mark_isocenter_window = tk.Toplevel(Globals.tab3)\n mark_isocenter_window.grab_set()\n frame = Frame(mark_isocenter_window, bd=2, relief=SUNKEN)\n frame.grid_rowconfigure(0, weight=1)\n frame.grid_columnconfigure(0, weight=1)\n canvas = Canvas(frame, bd=0)\n canvas.grid(row=0, column=0, sticky=N+S+E+W)\n\n \n scale_horizontal = img.width/408\n scale_vertical = img.height/508\n img = img.resize((408,508))\n img = ImageTk.PhotoImage(image=img)\n os.chdir(current_folder)\n canvas.image = img\n\n w = 10 + img.width()\n h = 10 + img.height()\n mark_isocenter_window.geometry(\"%dx%d+0+0\" % (w, h))\n canvas.create_image(0,0,image=img,anchor=\"nw\")\n canvas.config(scrollregion=canvas.bbox(ALL), cursor='sb_up_arrow')\n #x_coor = []\n #y_coor = []\n \n def findCoords(event):\n Globals.map_dose_isocenter_map_x_coord_scaled.append(event.x*scale_vertical)\n Globals.map_dose_isocenter_map_y_coord_scaled.append(event.y*scale_horizontal)\n Globals.map_dose_isocenter_map_x_coord_unscaled.append(event.x)\n Globals.map_dose_isocenter_map_y_coord_unscaled.append(event.y)\n canvas.create_oval(event.x-2, event.y-2, event.x+2, event.y+2, fill='red')\n if (len(Globals.map_dose_isocenter_map_x_coord_scaled)==1):\n canvas.config(cursor='sb_down_arrow')\n elif(len(Globals.map_dose_isocenter_map_x_coord_scaled)==2):\n canvas.config(cursor='sb_right_arrow')\n elif(len(Globals.map_dose_isocenter_map_x_coord_scaled)==3):\n canvas.config(cursor='sb_left_arrow')\n else:\n #mark_isocenter_window.destroy()\n draw_image_with_marks(img, scale_horizontal, scale_vertical, mark_isocenter_window, frame)\n \n \n \n canvas.bind(\"